From 8d3abdd26d475e36e5aee394b05fc358ccd2a838 Mon Sep 17 00:00:00 2001 From: Cai Chen Date: Wed, 22 Jan 2025 20:41:10 +0000 Subject: [PATCH 01/15] draft --- .golangci.yml | 1 + DEVELOPER.md | 1 + Makefile | 42 +- PROJECT | 2 +- api/v1/suite_test.go | 26 +- api/v1/verticadb_types.go | 16 +- api/v1/verticadb_webhook.go | 19 +- api/v1beta1/eventtrigger_types.go | 8 +- api/v1beta1/eventtrigger_webhook.go | 17 +- api/v1beta1/eventtrigger_webhook_test.go | 33 +- api/v1beta1/verticaautoscaler_types.go | 22 +- api/v1beta1/verticaautoscaler_webhook.go | 17 +- api/v1beta1/verticaautoscaler_webhook_test.go | 24 +- api/v1beta1/verticadb_types.go | 22 +- api/v1beta1/verticareplicator_types.go | 18 +- api/v1beta1/verticareplicator_webhook.go | 17 +- api/v1beta1/verticareplicator_webhook_test.go | 54 +- .../verticarestorepointquery_webhook.go | 17 +- .../verticarestorepointquery_webhook_test.go | 18 +- .../verticarestorepointsquery_types.go | 2 +- api/v1beta1/verticascrutinize_webhook.go | 17 +- api/v1beta1/verticascrutinize_webhook_test.go | 34 +- api/v1beta1/webhook_suite_test.go | 26 +- cmd/operator/main.go | 83 ++- config/default/kustomization.yaml | 51 +- config/default/manager_auth_proxy_patch.yaml | 32 - config/default/manager_metrics_patch.yaml | 4 + config/default/metrics_service.yaml | 17 + config/manager/manager.yaml | 3 + config/manager/operator-envs | 2 + config/prometheus/monitor.yaml | 9 + .../auth_proxy_client_clusterrolebinding.yaml | 15 - config/rbac/auth_proxy_role.yaml | 13 - config/rbac/auth_proxy_role_binding.yaml | 12 - config/rbac/auth_proxy_service.yaml | 16 - config/rbac/kustomization.yaml | 17 +- config/rbac/metrics_auth_role.yaml | 17 + config/rbac/metrics_auth_role_binding.yaml | 12 + ...sterrole.yaml => metrics_reader_role.yaml} | 6 +- config/rbac/vertica-server-role.yaml | 1 + go.mod | 87 ++- go.sum | 569 ++++-------------- helm-charts/verticadb-operator/README.md | 6 +- .../tests/auth-proxy-roles_test.yaml | 25 - .../tests/image-name-and-tag_test.yaml | 6 - .../tests/metric-cert_test.yaml | 44 -- .../tests/metrics-deployment_test.yaml | 8 - .../tests/metrics-service_test.yaml | 32 - .../serviceaccount-rolebinding_test.yaml | 2 - helm-charts/verticadb-operator/values.yaml | 9 +- pkg/builder/builder.go | 4 +- pkg/controllers/et/eventtrigger_controller.go | 13 +- pkg/controllers/et/suite_test.go | 4 +- pkg/controllers/sandbox/sandbox_controller.go | 5 +- pkg/controllers/sandbox/suite_test.go | 8 +- pkg/controllers/vas/suite_test.go | 8 +- .../vas/verticaautoscaler_controller.go | 10 +- .../vdb/imageversion_reconciler.go | 2 +- pkg/controllers/vdb/obj_reconciler_test.go | 2 +- .../vdb/onlineupgrade_reconciler.go | 18 +- pkg/controllers/vdb/suite_test.go | 8 +- pkg/controllers/vdb/verticadb_controller.go | 2 + pkg/controllers/vrep/suite_test.go | 12 +- .../vrep/verticareplicator_controller.go | 6 +- pkg/controllers/vrpq/query_reconciler.go | 2 +- pkg/controllers/vrpq/suite_test.go | 12 +- .../verticarestorepointsquery_controller.go | 6 +- pkg/controllers/vscr/suite_test.go | 12 +- .../vscr/verticascrutinize_controller.go | 13 +- pkg/podfacts/suite_test.go | 8 +- pkg/vdbconfig/suite_test.go | 8 +- pkg/vk8s/suite_test.go | 8 +- scripts/authorize-metrics.sh | 3 - scripts/gen-release-artifacts.sh | 5 +- scripts/template-helm-chart.sh | 35 +- .../metrics-auth-proxy-cert/05-assert.yaml | 1 - .../05-assert.yaml | 1 - .../05-assert.yaml | 1 - .../operator-pod-scheduling/10-assert.yaml | 1 - .../operator-pod-scheduling/20-assert.yaml | 1 - .../from-1.2.0/15-assert.yaml | 1 - .../from-1.3.1/15-assert.yaml | 1 - .../from-1.4.0/15-assert.yaml | 1 - .../from-1.6.0/15-assert.yaml | 1 - .../from-1.7.0/15-assert.yaml | 1 - tests/external-images-common-ci.txt | 1 - tests/manifests/rbac/base/rbac.yaml | 1 + 87 files changed, 740 insertions(+), 1037 deletions(-) delete mode 100644 config/default/manager_auth_proxy_patch.yaml create mode 100644 config/default/manager_metrics_patch.yaml create mode 100644 config/default/metrics_service.yaml delete mode 100644 config/rbac/auth_proxy_client_clusterrolebinding.yaml delete mode 100644 config/rbac/auth_proxy_role.yaml delete mode 100644 config/rbac/auth_proxy_role_binding.yaml delete mode 100644 config/rbac/auth_proxy_service.yaml create mode 100644 config/rbac/metrics_auth_role.yaml create mode 100644 config/rbac/metrics_auth_role_binding.yaml rename config/rbac/{auth_proxy_client_clusterrole.yaml => metrics_reader_role.yaml} (66%) delete mode 100644 helm-charts/verticadb-operator/tests/auth-proxy-roles_test.yaml delete mode 100644 helm-charts/verticadb-operator/tests/metric-cert_test.yaml delete mode 100644 helm-charts/verticadb-operator/tests/metrics-service_test.yaml diff --git a/.golangci.yml b/.golangci.yml index 3a25bcaf5..d5ec87b3e 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -55,6 +55,7 @@ linters-settings: rules: - name: dot-imports disabled: true + - name: comment-spacings linters: # please, do not use `enable-all`: it's deprecated and will be removed soon. diff --git a/DEVELOPER.md b/DEVELOPER.md index 4339b23c6..9553d9244 100644 --- a/DEVELOPER.md +++ b/DEVELOPER.md @@ -688,6 +688,7 @@ kubectl logs -c vlogger - --health-probe-bind-address=:8081 - --metrics-bind-address=127.0.0.1:8080 - --leader-elect + - --health-probe-bind-address=:8081 - --enable-profiler command: - /manager diff --git a/Makefile b/Makefile index 6724bd04c..740cd8a96 100644 --- a/Makefile +++ b/Makefile @@ -132,7 +132,7 @@ ifeq ($(USE_IMAGE_DIGESTS), true) endif # ENVTEST_K8S_VERSION refers to the version of kubebuilder assets to be downloaded by envtest binary. -ENVTEST_K8S_VERSION = 1.26.0 +ENVTEST_K8S_VERSION = 1.30.0 # Image URL for the OLM catalog. This is for testing purposes only. ifeq ($(shell $(KIND_CHECK)), 1) @@ -722,11 +722,12 @@ KUBERNETES_SPLIT_YAML ?= $(LOCALBIN)/kubernetes-split-yaml GOLANGCI_LINT = $(LOCALBIN)/golangci-lint ## Tool Versions -KUSTOMIZE_VERSION ?= v4.5.5 -CONTROLLER_TOOLS_VERSION ?= v0.14.0 +KUSTOMIZE_VERSION ?= v5.4.2 +CONTROLLER_TOOLS_VERSION ?= v0.15.0 +ENVTEST_VERSION ?= release-0.18 KIND_VERSION ?= v0.20.0 KUBERNETES_SPLIT_YAML_VERSION ?= v0.3.0 -GOLANGCI_LINT_VER ?= 1.61.0 +GOLANGCI_LINT_VERSION ?= v1.61.0 ## Tool architecture GOOS ?= $(shell go env GOOS) @@ -740,17 +741,17 @@ KUSTOMIZE_DOWNLOAD_URL?=https://github.com/kubernetes-sigs/kustomize/releases/do .PHONY: kustomize kustomize: $(KUSTOMIZE) ## Download kustomize locally if necessary. $(KUSTOMIZE): $(LOCALBIN) - test -s $(KUSTOMIZE) || { curl --retry 10 --retry-max-time 1800 -sL $(KUSTOMIZE_DOWNLOAD_URL) | tar xzf - --directory $(LOCALBIN); } + $(call go-install-tool,$(KUSTOMIZE),sigs.k8s.io/kustomize/kustomize/v5,$(KUSTOMIZE_VERSION)) .PHONY: controller-gen controller-gen: $(CONTROLLER_GEN) ## Download controller-gen locally if necessary. $(CONTROLLER_GEN): $(LOCALBIN) - test -s $(CONTROLLER_GEN) || GOBIN=$(LOCALBIN) go install sigs.k8s.io/controller-tools/cmd/controller-gen@$(CONTROLLER_TOOLS_VERSION) + $(call go-install-tool,$(CONTROLLER_GEN),sigs.k8s.io/controller-tools/cmd/controller-gen,$(CONTROLLER_TOOLS_VERSION)) .PHONY: envtest envtest: $(ENVTEST) ## Download envtest-setup locally if necessary. $(ENVTEST): $(LOCALBIN) - test -s $(ENVTEST) || GOBIN=$(LOCALBIN) go install sigs.k8s.io/controller-runtime/tools/setup-envtest@v0.0.0-20240320141353-395cfc7486e6 + $(call go-install-tool,$(ENVTEST),sigs.k8s.io/controller-runtime/tools/setup-envtest,$(ENVTEST_VERSION)) .PHONY: kind kind: $(KIND) ## Download kind locally if necessary @@ -765,10 +766,7 @@ $(KUBERNETES_SPLIT_YAML): $(LOCALBIN) .PHONY: golangci-lint $(GOLANGCI_LINT) golangci-lint: $(GOLANGCI_LINT) $(GOLANGCI_LINT): $(LOCALBIN) -ifneq (${GOLANGCI_LINT_VER}, $(shell [ -f $(GOLANGCI_LINT) ] && $(GOLANGCI_LINT) version --format short 2>&1)) - @echo "golangci-lint missing or not version '${GOLANGCI_LINT_VER}', downloading..." - curl --retry 10 --retry-max-time 1800 -sSfL "https://raw.githubusercontent.com/golangci/golangci-lint/v${GOLANGCI_LINT_VER}/install.sh" | sh -s -- -b ./bin "v${GOLANGCI_LINT_VER}" -endif + $(call go-install-tool,$(GOLANGCI_LINT),github.com/golangci/golangci-lint/cmd/golangci-lint,$(GOLANGCI_LINT_VERSION)) krew: $(HOME)/.krew/bin/kubectl-krew ## Download krew plugin locally if necessary @@ -836,3 +834,25 @@ echo-versions: ## Print the current versions for various components .PHONY: echo-vars echo-vars: echo-images echo-versions ## Print out internal state @echo "DEPLOY_WITH=$(DEPLOY_WITH)" + +.PHONY: build-installer +build-installer: manifests generate kustomize ## Generate a consolidated YAML with CRDs and deployment. + mkdir -p dist + cd config/manager && $(KUSTOMIZE) edit set image controller=${IMG} + $(KUSTOMIZE) build config/default > dist/install.yaml + +# go-install-tool will 'go install' any package with custom target and name of binary, if it doesn't exist +# $1 - target path with name of binary (ideally with version) +# $2 - package url which can be installed +# $3 - specific version of package +define go-install-tool +@[ -f "$(1)-$(3)" ] || { \ +set -e; \ +package=$(2)@$(3) ;\ +echo "Downloading $${package}" ;\ +rm -f $(1) || true ;\ +GOBIN=$(LOCALBIN) go install $${package} ;\ +mv $(1) $(1)-$(3) ;\ +} ;\ +ln -sf $(1)-$(3) $(1) +endef \ No newline at end of file diff --git a/PROJECT b/PROJECT index 167429938..24c6e0057 100644 --- a/PROJECT +++ b/PROJECT @@ -4,7 +4,7 @@ # More info: https://book.kubebuilder.io/reference/project-config.html domain: vertica.com layout: -- go.kubebuilder.io/v3 +- go.kubebuilder.io/v4 plugins: manifests.sdk.operatorframework.io/v2: {} scorecard.sdk.operatorframework.io/v2: {} diff --git a/api/v1/suite_test.go b/api/v1/suite_test.go index ddfff3032..5c10d70e6 100644 --- a/api/v1/suite_test.go +++ b/api/v1/suite_test.go @@ -29,13 +29,15 @@ import ( . "github.com/onsi/gomega" admissionv1 "k8s.io/api/admission/v1" - //+kubebuilder:scaffold:imports + // +kubebuilder:scaffold:imports "k8s.io/apimachinery/pkg/runtime" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/envtest" logf "sigs.k8s.io/controller-runtime/pkg/log" "sigs.k8s.io/controller-runtime/pkg/log/zap" + metricsserver "sigs.k8s.io/controller-runtime/pkg/metrics/server" + "sigs.k8s.io/controller-runtime/pkg/webhook" ) // These tests use Ginkgo (BDD-style Go testing framework). Refer to @@ -77,7 +79,7 @@ var _ = BeforeSuite(func() { err = admissionv1.AddToScheme(scheme) Expect(err).NotTo(HaveOccurred()) - //+kubebuilder:scaffold:scheme + // +kubebuilder:scaffold:scheme k8sClient, err = client.New(cfg, client.Options{Scheme: scheme}) Expect(err).NotTo(HaveOccurred()) @@ -85,20 +87,26 @@ var _ = BeforeSuite(func() { // start webhook server using Manager webhookInstallOptions := &testEnv.WebhookInstallOptions + webhookServer := webhook.NewServer(webhook.Options{ + Host: webhookInstallOptions.LocalServingHost, + Port: webhookInstallOptions.LocalServingPort, + CertDir: webhookInstallOptions.LocalServingCertDir, + }) + metricsServerOptions := metricsserver.Options{ + BindAddress: "0", + } mgr, err := ctrl.NewManager(cfg, ctrl.Options{ - Scheme: scheme, - Host: webhookInstallOptions.LocalServingHost, - Port: webhookInstallOptions.LocalServingPort, - CertDir: webhookInstallOptions.LocalServingCertDir, - LeaderElection: false, - MetricsBindAddress: "0", + Scheme: scheme, + LeaderElection: false, + WebhookServer: webhookServer, + Metrics: metricsServerOptions, }) Expect(err).NotTo(HaveOccurred()) err = (&VerticaDB{}).SetupWebhookWithManager(mgr) Expect(err).NotTo(HaveOccurred()) - //+kubebuilder:scaffold:webhook + // +kubebuilder:scaffold:webhook go func() { err = mgr.Start(ctx) diff --git a/api/v1/verticadb_types.go b/api/v1/verticadb_types.go index 6a4322101..afdcee132 100644 --- a/api/v1/verticadb_types.go +++ b/api/v1/verticadb_types.go @@ -182,7 +182,7 @@ type VerticaDBSpec struct { // Contain details about the local storage Local LocalStorage `json:"local"` - //+operator-sdk:csv:customresourcedefinitions:type=spec + // +operator-sdk:csv:customresourcedefinitions:type=spec Subclusters []Subcluster `json:"subclusters"` // +kubebuilder:validation:Optional @@ -1035,12 +1035,12 @@ type VerticaDBPodStatus struct { UpNode bool `json:"upNode"` } -//+kubebuilder:object:root=true -//+kubebuilder:subresource:status -//+kubebuilder:storageversion -//+kubebuilder:resource:categories=all;vertica,shortName=vdb -//+kubebuilder:printcolumn:name="Subclusters",type="integer",JSONPath=".status.subclusterCount" -//+kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status +// +kubebuilder:storageversion +// +kubebuilder:resource:categories=all;vertica,shortName=vdb +// +kubebuilder:printcolumn:name="Subclusters",type="integer",JSONPath=".status.subclusterCount" +// +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp" // +operator-sdk:csv:customresourcedefinitions:resources={{Statefulset,apps/v1,""},{Pod,v1,""},{Service,v1,""}} // VerticaDB is the CR that defines a Vertica Eon mode cluster that is managed by the verticadb-operator. @@ -1052,7 +1052,7 @@ type VerticaDB struct { Status VerticaDBStatus `json:"status,omitempty"` } -//+kubebuilder:object:root=true +// +kubebuilder:object:root=true // VerticaDBList contains a list of VerticaDB type VerticaDBList struct { diff --git a/api/v1/verticadb_webhook.go b/api/v1/verticadb_webhook.go index 58968aef9..6b1482d87 100644 --- a/api/v1/verticadb_webhook.go +++ b/api/v1/verticadb_webhook.go @@ -34,6 +34,7 @@ import ( ctrl "sigs.k8s.io/controller-runtime" logf "sigs.k8s.io/controller-runtime/pkg/log" "sigs.k8s.io/controller-runtime/pkg/webhook" + "sigs.k8s.io/controller-runtime/pkg/webhook/admission" ) const ( @@ -75,7 +76,7 @@ func (v *VerticaDB) SetupWebhookWithManager(mgr ctrl.Manager) error { Complete() } -//+kubebuilder:webhook:path=/mutate-vertica-com-v1beta1-verticadb,mutating=true,failurePolicy=fail,sideEffects=None,groups=vertica.com,resources=verticadbs,verbs=create;update,versions=v1beta1,name=mverticadb.kb.io,admissionReviewVersions=v1 +// +kubebuilder:webhook:path=/mutate-vertica-com-v1beta1-verticadb,mutating=true,failurePolicy=fail,sideEffects=None,groups=vertica.com,resources=verticadbs,verbs=create;update,versions=v1beta1,name=mverticadb.kb.io,admissionReviewVersions=v1 var _ webhook.Defaulter = &VerticaDB{} @@ -109,33 +110,33 @@ func (v *VerticaDB) Default() { var _ webhook.Validator = &VerticaDB{} // ValidateCreate implements webhook.Validator so a webhook will be registered for the type -func (v *VerticaDB) ValidateCreate() error { +func (v *VerticaDB) ValidateCreate() (admission.Warnings, error) { verticadblog.Info("validate create", "name", v.Name, "GroupVersion", GroupVersion) allErrs := v.validateVerticaDBSpec() if len(allErrs) == 0 { - return nil + return nil, nil } - return apierrors.NewInvalid(schema.GroupKind{Group: Group, Kind: VerticaDBKind}, v.Name, allErrs) + return nil, apierrors.NewInvalid(schema.GroupKind{Group: Group, Kind: VerticaDBKind}, v.Name, allErrs) } // ValidateUpdate implements webhook.Validator so a webhook will be registered for the type -func (v *VerticaDB) ValidateUpdate(old runtime.Object) error { +func (v *VerticaDB) ValidateUpdate(old runtime.Object) (admission.Warnings, error) { verticadblog.Info("validate update", "name", v.Name, "GroupVersion", GroupVersion) allErrs := append(v.validateImmutableFields(old), v.validateVerticaDBSpec()...) if len(allErrs) == 0 { - return nil + return nil, nil } - return apierrors.NewInvalid(schema.GroupKind{Group: Group, Kind: VerticaDBKind}, v.Name, allErrs) + return nil, apierrors.NewInvalid(schema.GroupKind{Group: Group, Kind: VerticaDBKind}, v.Name, allErrs) } // ValidateDelete implements webhook.Validator so a webhook will be registered for the type -func (v *VerticaDB) ValidateDelete() error { +func (v *VerticaDB) ValidateDelete() (admission.Warnings, error) { verticadblog.Info("validate delete", "name", v.Name, "GroupVersion", GroupVersion) - return nil + return nil, nil } func (v *VerticaDB) validateImmutableFields(old runtime.Object) field.ErrorList { diff --git a/api/v1beta1/eventtrigger_types.go b/api/v1beta1/eventtrigger_types.go index 6a17bb1ec..5e977a266 100644 --- a/api/v1beta1/eventtrigger_types.go +++ b/api/v1beta1/eventtrigger_types.go @@ -198,9 +198,9 @@ func (r *ETRefObjectStatus) IsSameObject(other *ETRefObjectStatus) bool { return r.APIVersion == other.APIVersion && r.Kind == other.Kind && r.Namespace == other.Namespace && r.Name == other.Name } -//+kubebuilder:object:root=true -//+kubebuilder:resource:categories=vertica,shortName=et -//+kubebuilder:subresource:status +// +kubebuilder:object:root=true +// +kubebuilder:resource:categories=vertica,shortName=et +// +kubebuilder:subresource:status // +operator-sdk:csv:customresourcedefinitions:resources={{Job,batch/v1,""}} // EventTrigger is the Schema for the eventtriggers API @@ -212,7 +212,7 @@ type EventTrigger struct { Status EventTriggerStatus `json:"status,omitempty"` } -//+kubebuilder:object:root=true +// +kubebuilder:object:root=true // EventTriggerList contains a list of EventTrigger type EventTriggerList struct { diff --git a/api/v1beta1/eventtrigger_webhook.go b/api/v1beta1/eventtrigger_webhook.go index f85165156..7f1ebd4bc 100644 --- a/api/v1beta1/eventtrigger_webhook.go +++ b/api/v1beta1/eventtrigger_webhook.go @@ -24,6 +24,7 @@ import ( ctrl "sigs.k8s.io/controller-runtime" logf "sigs.k8s.io/controller-runtime/pkg/log" "sigs.k8s.io/controller-runtime/pkg/webhook" + "sigs.k8s.io/controller-runtime/pkg/webhook/admission" ) const ( @@ -50,34 +51,34 @@ func (e *EventTrigger) Default() { var _ webhook.Validator = &EventTrigger{} // ValidateCreate implements webhook.Validator so a webhook will be registered for the type -func (e *EventTrigger) ValidateCreate() error { +func (e *EventTrigger) ValidateCreate() (admission.Warnings, error) { eventtriggerlog.Info("validate create", "name", e.Name) allErrs := e.validateSpec() if allErrs == nil { - return nil + return nil, nil } - return apierrors.NewInvalid(GkET, e.Name, allErrs) + return nil, apierrors.NewInvalid(GkET, e.Name, allErrs) } // ValidateUpdate implements webhook.Validator so a webhook will be registered for the type -func (e *EventTrigger) ValidateUpdate(_ runtime.Object) error { +func (e *EventTrigger) ValidateUpdate(_ runtime.Object) (admission.Warnings, error) { eventtriggerlog.Info("validate update", "name", e.Name) allErrs := e.validateSpec() if allErrs == nil { - return nil + return nil, nil } - return apierrors.NewInvalid(GkET, e.Name, allErrs) + return nil, apierrors.NewInvalid(GkET, e.Name, allErrs) } // ValidateDelete implements webhook.Validator so a webhook will be registered for the type -func (e *EventTrigger) ValidateDelete() error { +func (e *EventTrigger) ValidateDelete() (admission.Warnings, error) { eventtriggerlog.Info("validate delete", "name", e.Name) - return nil + return nil, nil } func (e *EventTrigger) validateSpec() field.ErrorList { diff --git a/api/v1beta1/eventtrigger_webhook_test.go b/api/v1beta1/eventtrigger_webhook_test.go index 140e358ec..b61e9b6bf 100644 --- a/api/v1beta1/eventtrigger_webhook_test.go +++ b/api/v1beta1/eventtrigger_webhook_test.go @@ -10,15 +10,19 @@ var _ = Describe("eventtrigger_webhook", func() { // validate VerticaDB spec values It("should succeed with all valid fields", func() { et := MakeET() - Expect(et.ValidateCreate()).Should(Succeed()) - Expect(et.ValidateUpdate(et)).Should(Succeed()) + _, err := et.ValidateCreate() + Expect(err).Should(Succeed()) + _, err = et.ValidateUpdate(et) + Expect(err).Should(Succeed()) }) It("should fail if reference object type is not VerticaDB", func() { et := MakeET() et.Spec.References[0].Object.Kind = "Pod" - Expect(et.ValidateCreate()).ShouldNot(Succeed()) - Expect(et.ValidateUpdate(et)).ShouldNot(Succeed()) + _, err := et.ValidateCreate() + Expect(err).ShouldNot(Succeed()) + _, err1 := et.ValidateUpdate(et) + Expect(err1).ShouldNot(Succeed()) }) It("should fail on multiple reference objects", func() { @@ -34,8 +38,10 @@ var _ = Describe("eventtrigger_webhook", func() { et.Spec.References = append(et.Spec.References, ref) - Expect(et.ValidateCreate()).ShouldNot(Succeed()) - Expect(et.ValidateUpdate(et)).ShouldNot(Succeed()) + _, err := et.ValidateCreate() + Expect(err).ShouldNot(Succeed()) + _, err1 := et.ValidateUpdate(et) + Expect(err1).ShouldNot(Succeed()) }) It("should fail on multiple matches conditions", func() { @@ -48,19 +54,24 @@ var _ = Describe("eventtrigger_webhook", func() { } et.Spec.Matches = append(et.Spec.Matches, match) - Expect(et.ValidateCreate()).ShouldNot(Succeed()) - Expect(et.ValidateUpdate(et)).ShouldNot(Succeed()) + _, err := et.ValidateCreate() + Expect(err).ShouldNot(Succeed()) + _, err1 := et.ValidateUpdate(et) + Expect(err1).ShouldNot(Succeed()) }) It("should fail if job name is not specified", func() { et := MakeET() et.Spec.Template.Metadata.Name = "" et.Spec.Template.Metadata.GenerateName = "" - Expect(et.ValidateCreate()).ShouldNot(Succeed()) + _, err := et.ValidateCreate() + Expect(err).ShouldNot(Succeed()) et.Spec.Template.Metadata.GenerateName = "job1-" - Expect(et.ValidateCreate()).Should(Succeed()) + _, err = et.ValidateCreate() + Expect(err).Should(Succeed()) et.Spec.Template.Metadata.Name = "job1" et.Spec.Template.Metadata.GenerateName = "" - Expect(et.ValidateCreate()).Should(Succeed()) + _, err = et.ValidateCreate() + Expect(err).Should(Succeed()) }) }) diff --git a/api/v1beta1/verticaautoscaler_types.go b/api/v1beta1/verticaautoscaler_types.go index 884bd18cd..4ccca5f84 100644 --- a/api/v1beta1/verticaautoscaler_types.go +++ b/api/v1beta1/verticaautoscaler_types.go @@ -195,16 +195,16 @@ var VasConditionIndexMap = map[VerticaAutoscalerConditionType]int{ ScalingActive: ScalingActiveIndex, } -//+kubebuilder:object:root=true -//+kubebuilder:resource:categories=all;vertica,shortName=vas -//+kubebuilder:subresource:status -//+kubebuilder:subresource:scale:specpath=.spec.targetSize,statuspath=.status.currentSize,selectorpath=.status.selector -//+kubebuilder:printcolumn:name="Granularity",type="string",JSONPath=".spec.scalingGranularity" -//+kubebuilder:printcolumn:name="Current Size",type="integer",JSONPath=".status.currentSize" -//+kubebuilder:printcolumn:name="Target Size",type="integer",JSONPath=".spec.targetSize" -//+kubebuilder:printcolumn:name="Scaling Count",type="integer",JSONPath=".status.scalingCount" -//+kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp" -//+operator-sdk:csv:customresourcedefinitions:resources={{VerticaDB,vertica.com/v1beta1,""}} +// +kubebuilder:object:root=true +// +kubebuilder:resource:categories=all;vertica,shortName=vas +// +kubebuilder:subresource:status +// +kubebuilder:subresource:scale:specpath=.spec.targetSize,statuspath=.status.currentSize,selectorpath=.status.selector +// +kubebuilder:printcolumn:name="Granularity",type="string",JSONPath=".spec.scalingGranularity" +// +kubebuilder:printcolumn:name="Current Size",type="integer",JSONPath=".status.currentSize" +// +kubebuilder:printcolumn:name="Target Size",type="integer",JSONPath=".spec.targetSize" +// +kubebuilder:printcolumn:name="Scaling Count",type="integer",JSONPath=".status.scalingCount" +// +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp" +// +operator-sdk:csv:customresourcedefinitions:resources={{VerticaDB,vertica.com/v1beta1,""}} // VerticaAutoscaler is a CR that allows you to autoscale one or more // subclusters in a VerticaDB. @@ -216,7 +216,7 @@ type VerticaAutoscaler struct { Status VerticaAutoscalerStatus `json:"status,omitempty"` } -//+kubebuilder:object:root=true +// +kubebuilder:object:root=true // VerticaAutoscalerList contains a list of VerticaAutoscaler type VerticaAutoscalerList struct { diff --git a/api/v1beta1/verticaautoscaler_webhook.go b/api/v1beta1/verticaautoscaler_webhook.go index 6daf2631b..34bcdeb87 100644 --- a/api/v1beta1/verticaautoscaler_webhook.go +++ b/api/v1beta1/verticaautoscaler_webhook.go @@ -26,6 +26,7 @@ import ( ctrl "sigs.k8s.io/controller-runtime" logf "sigs.k8s.io/controller-runtime/pkg/log" "sigs.k8s.io/controller-runtime/pkg/webhook" + "sigs.k8s.io/controller-runtime/pkg/webhook/admission" ) // log is for logging in this package. @@ -45,32 +46,32 @@ func (v *VerticaAutoscaler) Default() { var _ webhook.Validator = &VerticaAutoscaler{} // ValidateCreate implements webhook.Validator so a webhook will be registered for the type -func (v *VerticaAutoscaler) ValidateCreate() error { +func (v *VerticaAutoscaler) ValidateCreate() (admission.Warnings, error) { verticaautoscalerlog.Info("validate create", "name", v.Name) allErrs := v.validateSpec() if len(allErrs) == 0 { - return nil + return nil, nil } - return apierrors.NewInvalid(schema.GroupKind{Group: Group, Kind: VerticaAutoscalerKind}, v.Name, allErrs) + return nil, apierrors.NewInvalid(schema.GroupKind{Group: Group, Kind: VerticaAutoscalerKind}, v.Name, allErrs) } // ValidateUpdate implements webhook.Validator so a webhook will be registered for the type -func (v *VerticaAutoscaler) ValidateUpdate(_ runtime.Object) error { +func (v *VerticaAutoscaler) ValidateUpdate(_ runtime.Object) (admission.Warnings, error) { verticaautoscalerlog.Info("validate update", "name", v.Name) allErrs := v.validateSpec() if len(allErrs) == 0 { - return nil + return nil, nil } - return apierrors.NewInvalid(schema.GroupKind{Group: Group, Kind: VerticaAutoscalerKind}, v.Name, allErrs) + return nil, apierrors.NewInvalid(schema.GroupKind{Group: Group, Kind: VerticaAutoscalerKind}, v.Name, allErrs) } // ValidateDelete implements webhook.Validator so a webhook will be registered for the type -func (v *VerticaAutoscaler) ValidateDelete() error { +func (v *VerticaAutoscaler) ValidateDelete() (admission.Warnings, error) { verticaautoscalerlog.Info("validate delete", "name", v.Name) - return nil + return nil, nil } // validateSpec will validate the current VerticaAutoscaler to see if it is valid diff --git a/api/v1beta1/verticaautoscaler_webhook_test.go b/api/v1beta1/verticaautoscaler_webhook_test.go index b5b3828d2..4a879d0af 100644 --- a/api/v1beta1/verticaautoscaler_webhook_test.go +++ b/api/v1beta1/verticaautoscaler_webhook_test.go @@ -23,13 +23,15 @@ import ( var _ = Describe("verticaautoscaler_webhook", func() { It("should succeed with all valid fields", func() { vas := MakeVAS() - Expect(vas.ValidateCreate()).Should(Succeed()) + _, err := vas.ValidateCreate() + Expect(err).Should(Succeed()) }) It("should fail if granularity isn't set properly", func() { vas := MakeVAS() vas.Spec.ScalingGranularity = "BadValue" - Expect(vas.ValidateCreate()).ShouldNot(Succeed()) + _, err := vas.ValidateCreate() + Expect(err).ShouldNot(Succeed()) }) It("should fail if the service name differs", func() { @@ -37,12 +39,16 @@ var _ = Describe("verticaautoscaler_webhook", func() { vas.Spec.ScalingGranularity = SubclusterScalingGranularity vas.Spec.Template.ServiceName = "SomethingElse" vas.Spec.Template.Size = 1 - Expect(vas.ValidateCreate()).ShouldNot(Succeed()) + _, err := vas.ValidateCreate() + Expect(err).ShouldNot(Succeed()) vas.Spec.Template.ServiceName = "" - Expect(vas.ValidateCreate()).ShouldNot(Succeed()) - Expect(vas.ValidateUpdate(MakeVAS())).ShouldNot(Succeed()) + _, err1 := vas.ValidateCreate() + Expect(err1).ShouldNot(Succeed()) + _, err2 := vas.ValidateUpdate(MakeVAS()) + Expect(err2).ShouldNot(Succeed()) vas.Spec.Template.ServiceName = vas.Spec.ServiceName - Expect(vas.ValidateUpdate(MakeVAS())).Should(Succeed()) + _, err3 := vas.ValidateUpdate(MakeVAS()) + Expect(err3).Should(Succeed()) }) It("should fail if you try to use the template with pod scalingGranularity", func() { @@ -50,8 +56,10 @@ var _ = Describe("verticaautoscaler_webhook", func() { vas.Spec.Template.ServiceName = vas.Spec.ServiceName vas.Spec.Template.Size = 1 vas.Spec.ScalingGranularity = PodScalingGranularity - Expect(vas.ValidateCreate()).ShouldNot(Succeed()) + _, err := vas.ValidateCreate() + Expect(err).ShouldNot(Succeed()) vas.Spec.ScalingGranularity = SubclusterScalingGranularity - Expect(vas.ValidateCreate()).Should(Succeed()) + _, err1 := vas.ValidateCreate() + Expect(err1).Should(Succeed()) }) }) diff --git a/api/v1beta1/verticadb_types.go b/api/v1beta1/verticadb_types.go index 1514542f9..46bb02350 100644 --- a/api/v1beta1/verticadb_types.go +++ b/api/v1beta1/verticadb_types.go @@ -198,7 +198,7 @@ type VerticaDBSpec struct { // Contain details about the local storage Local LocalStorage `json:"local"` - //+operator-sdk:csv:customresourcedefinitions:type=spec + // +operator-sdk:csv:customresourcedefinitions:type=spec Subclusters []Subcluster `json:"subclusters"` // +kubebuilder:validation:Optional @@ -1128,16 +1128,16 @@ type VerticaDBPodStatus struct { ReadOnly bool `json:"readOnly"` } -//+kubebuilder:object:root=true -//+kubebuilder:subresource:status -//+kubebuilder:resource:categories=all;vertica,shortName=vdb -//+kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp" -//+kubebuilder:printcolumn:name="Subclusters",type="integer",JSONPath=".status.subclusterCount" -//+kubebuilder:printcolumn:name="Installed",type="integer",JSONPath=".status.installCount" -//+kubebuilder:printcolumn:name="DBAdded",type="integer",JSONPath=".status.addedToDBCount" -//+kubebuilder:printcolumn:name="Up",type="integer",JSONPath=".status.upNodeCount" +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status +// +kubebuilder:resource:categories=all;vertica,shortName=vdb +// +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:printcolumn:name="Subclusters",type="integer",JSONPath=".status.subclusterCount" +// +kubebuilder:printcolumn:name="Installed",type="integer",JSONPath=".status.installCount" +// +kubebuilder:printcolumn:name="DBAdded",type="integer",JSONPath=".status.addedToDBCount" +// +kubebuilder:printcolumn:name="Up",type="integer",JSONPath=".status.upNodeCount" // +operator-sdk:csv:customresourcedefinitions:resources={{Statefulset,apps/v1,""},{Pod,v1,""},{Service,v1,""}} -//+kubebuilder:deprecatedversion:warning="vertica.com/v1beta1 VerticaDB is deprecated, use vertica.com/v1 VerticaDB" +// +kubebuilder:deprecatedversion:warning="vertica.com/v1beta1 VerticaDB is deprecated, use vertica.com/v1 VerticaDB" // VerticaDB is the CR that defines a Vertica Eon mode cluster that is managed by the verticadb-operator. type VerticaDB struct { @@ -1148,7 +1148,7 @@ type VerticaDB struct { Status VerticaDBStatus `json:"status,omitempty"` } -//+kubebuilder:object:root=true +// +kubebuilder:object:root=true // VerticaDBList contains a list of VerticaDB type VerticaDBList struct { diff --git a/api/v1beta1/verticareplicator_types.go b/api/v1beta1/verticareplicator_types.go index fd936c53a..ceb9bbfb5 100644 --- a/api/v1beta1/verticareplicator_types.go +++ b/api/v1beta1/verticareplicator_types.go @@ -152,14 +152,14 @@ const ( ReplicationModeSync = "sync" ) -//+kubebuilder:object:root=true -//+kubebuilder:resource:path=verticareplicators,singular=verticareplicator,categories=all;vertica,shortName=vrep -//+kubebuilder:subresource:status -//+kubebuilder:printcolumn:name="SourceVerticaDB",type="string",JSONPath=".spec.source.verticaDB" -//+kubebuilder:printcolumn:name="TargetVerticaDB",type="string",JSONPath=".spec.target.verticaDB" -//+kubebuilder:printcolumn:name="State",type="string",JSONPath=".status.state" -//+kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp" -//+operator-sdk:csv:customresourcedefinitions:resources={{VerticaDB,vertica.com/v1beta1,""}} +// +kubebuilder:object:root=true +// +kubebuilder:resource:path=verticareplicators,singular=verticareplicator,categories=all;vertica,shortName=vrep +// +kubebuilder:subresource:status +// +kubebuilder:printcolumn:name="SourceVerticaDB",type="string",JSONPath=".spec.source.verticaDB" +// +kubebuilder:printcolumn:name="TargetVerticaDB",type="string",JSONPath=".spec.target.verticaDB" +// +kubebuilder:printcolumn:name="State",type="string",JSONPath=".status.state" +// +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp" +// +operator-sdk:csv:customresourcedefinitions:resources={{VerticaDB,vertica.com/v1beta1,""}} // VerticaReplicator is the Schema for the verticareplicators API type VerticaReplicator struct { @@ -170,7 +170,7 @@ type VerticaReplicator struct { Status VerticaReplicatorStatus `json:"status,omitempty"` } -//+kubebuilder:object:root=true +// +kubebuilder:object:root=true // VerticaReplicatorList contains a list of VerticaReplicator type VerticaReplicatorList struct { diff --git a/api/v1beta1/verticareplicator_webhook.go b/api/v1beta1/verticareplicator_webhook.go index 9b01176d2..ffcd7991c 100644 --- a/api/v1beta1/verticareplicator_webhook.go +++ b/api/v1beta1/verticareplicator_webhook.go @@ -25,6 +25,7 @@ import ( ctrl "sigs.k8s.io/controller-runtime" logf "sigs.k8s.io/controller-runtime/pkg/log" "sigs.k8s.io/controller-runtime/pkg/webhook" + "sigs.k8s.io/controller-runtime/pkg/webhook/admission" ) var verticareplicatorlog = logf.Log.WithName("verticareplicator-resource") @@ -45,32 +46,32 @@ func (vrep *VerticaReplicator) Default() { var _ webhook.Validator = &VerticaReplicator{} // ValidateCreate implements webhook.Validator so a webhook will be registered for the type -func (vrep *VerticaReplicator) ValidateCreate() error { +func (vrep *VerticaReplicator) ValidateCreate() (admission.Warnings, error) { verticareplicatorlog.Info("validate create", "name", vrep.Name) allErrs := vrep.validateVrepSpec() if len(allErrs) == 0 { - return nil + return nil, nil } - return apierrors.NewInvalid(GkVR, vrep.Name, allErrs) + return nil, apierrors.NewInvalid(GkVR, vrep.Name, allErrs) } // ValidateUpdate implements webhook.Validator so a webhook will be registered for the type -func (vrep *VerticaReplicator) ValidateUpdate(_ runtime.Object) error { +func (vrep *VerticaReplicator) ValidateUpdate(_ runtime.Object) (admission.Warnings, error) { verticareplicatorlog.Info("validate update", "name", vrep.Name) allErrs := vrep.validateVrepSpec() if len(allErrs) == 0 { - return nil + return nil, nil } - return apierrors.NewInvalid(GkVR, vrep.Name, allErrs) + return nil, apierrors.NewInvalid(GkVR, vrep.Name, allErrs) } // ValidateDelete implements webhook.Validator so a webhook will be registered for the type -func (vrep *VerticaReplicator) ValidateDelete() error { +func (vrep *VerticaReplicator) ValidateDelete() (admission.Warnings, error) { verticareplicatorlog.Info("validate delete", "name", vrep.Name) - return nil + return nil, nil } // validateVrepSpec will validate the current VerticaReplicator to see if it is valid diff --git a/api/v1beta1/verticareplicator_webhook_test.go b/api/v1beta1/verticareplicator_webhook_test.go index 9842a6ad0..18def08a4 100644 --- a/api/v1beta1/verticareplicator_webhook_test.go +++ b/api/v1beta1/verticareplicator_webhook_test.go @@ -31,21 +31,25 @@ var _ = Describe("verticascrutinize_webhook", func() { It("should succeed with default async options", func() { vrep := MakeVrep() - Expect(vrep.ValidateCreate()).Should(Succeed()) - Expect(vrep.ValidateUpdate(vrep)).Should(Succeed()) + _, err := vrep.ValidateCreate() + Expect(err).Should(Succeed()) + _, err = vrep.ValidateUpdate(vrep) + Expect(err).Should(Succeed()) }) It("should succeed with default sync options", func() { vrep := MakeVrep() vrep.Spec.Mode = ReplicationModeSync - Expect(vrep.ValidateCreate()).Should(Succeed()) - Expect(vrep.ValidateUpdate(vrep)).Should(Succeed()) + _, err := vrep.ValidateCreate() + Expect(err).Should(Succeed()) + _, err = vrep.ValidateUpdate(vrep) + Expect(err).Should(Succeed()) }) It("should fail with invalid mode", func() { vrep := MakeVrep() vrep.Spec.Mode = "invalid" - err := vrep.ValidateCreate() + _, err := vrep.ValidateCreate() Expect(err.Error()).To(ContainSubstring("Mode must be either 'sync' or 'async'")) }) @@ -53,16 +57,20 @@ var _ = Describe("verticascrutinize_webhook", func() { vrep := MakeVrep() vrep.Spec.Mode = ReplicationModeAsync vrep.Spec.Source.ObjectName = validObjectName - Expect(vrep.ValidateCreate()).Should(Succeed()) - Expect(vrep.ValidateUpdate(vrep)).Should(Succeed()) + _, err := vrep.ValidateCreate() + Expect(err).Should(Succeed()) + _, err = vrep.ValidateUpdate(vrep) + Expect(err).Should(Succeed()) }) It("should succeed if valid include pattern is used in async replication mode", func() { vrep := MakeVrep() vrep.Spec.Mode = ReplicationModeAsync vrep.Spec.Source.IncludePattern = validIncludePattern - Expect(vrep.ValidateCreate()).Should(Succeed()) - Expect(vrep.ValidateUpdate(vrep)).Should(Succeed()) + _, err := vrep.ValidateCreate() + Expect(err).Should(Succeed()) + _, err = vrep.ValidateUpdate(vrep) + Expect(err).Should(Succeed()) }) It("should succeed if valid exclude pattern is used in async replication mode", func() { @@ -70,23 +78,27 @@ var _ = Describe("verticascrutinize_webhook", func() { vrep.Spec.Mode = ReplicationModeAsync vrep.Spec.Source.IncludePattern = validIncludePattern vrep.Spec.Source.ExcludePattern = validExcludePattern - Expect(vrep.ValidateCreate()).Should(Succeed()) - Expect(vrep.ValidateUpdate(vrep)).Should(Succeed()) + _, err := vrep.ValidateCreate() + Expect(err).Should(Succeed()) + _, err = vrep.ValidateUpdate(vrep) + Expect(err).Should(Succeed()) }) It("should succeed if valid target namespace is used in async replication mode", func() { vrep := MakeVrep() vrep.Spec.Mode = ReplicationModeAsync vrep.Spec.Target.Namespace = validTargetNamespace - Expect(vrep.ValidateCreate()).Should(Succeed()) - Expect(vrep.ValidateUpdate(vrep)).Should(Succeed()) + _, err := vrep.ValidateCreate() + Expect(err).Should(Succeed()) + _, err = vrep.ValidateUpdate(vrep) + Expect(err).Should(Succeed()) }) It("should fail if object name is used in sync replication mode", func() { vrep := MakeVrep() vrep.Spec.Mode = ReplicationModeSync vrep.Spec.Source.ObjectName = validObjectName - err := vrep.ValidateCreate() + _, err := vrep.ValidateCreate() Expect(err.Error()).To(ContainSubstring("Object name cannot be used in replication mode 'sync'")) }) @@ -94,7 +106,7 @@ var _ = Describe("verticascrutinize_webhook", func() { vrep := MakeVrep() vrep.Spec.Mode = ReplicationModeSync vrep.Spec.Source.IncludePattern = validIncludePattern - err := vrep.ValidateCreate() + _, err := vrep.ValidateCreate() Expect(err.Error()).To(ContainSubstring("Include pattern cannot be used in replication mode 'sync'")) }) @@ -103,7 +115,7 @@ var _ = Describe("verticascrutinize_webhook", func() { vrep.Spec.Mode = ReplicationModeSync vrep.Spec.Source.IncludePattern = validIncludePattern vrep.Spec.Source.ExcludePattern = validExcludePattern - err := vrep.ValidateCreate() + _, err := vrep.ValidateCreate() Expect(err.Error()).To(ContainSubstring("Exclude pattern cannot be used in replication mode 'sync'")) }) @@ -111,7 +123,7 @@ var _ = Describe("verticascrutinize_webhook", func() { vrep := MakeVrep() vrep.Spec.Mode = ReplicationModeSync vrep.Spec.Target.Namespace = validTargetNamespace - err := vrep.ValidateCreate() + _, err := vrep.ValidateCreate() Expect(err.Error()).To(ContainSubstring("Target namespace cannot be used in replication mode 'sync'")) }) @@ -120,7 +132,7 @@ var _ = Describe("verticascrutinize_webhook", func() { vrep.Spec.Mode = ReplicationModeAsync vrep.Spec.Source.ObjectName = validObjectName vrep.Spec.Source.IncludePattern = validIncludePattern - err := vrep.ValidateCreate() + _, err := vrep.ValidateCreate() Expect(err.Error()).To(ContainSubstring("Object name and include pattern cannot be used together")) }) @@ -130,7 +142,7 @@ var _ = Describe("verticascrutinize_webhook", func() { vrep.Spec.Source.ObjectName = validObjectName vrep.Spec.Source.IncludePattern = validIncludePattern vrep.Spec.Source.ExcludePattern = validExcludePattern - err := vrep.ValidateCreate() + _, err := vrep.ValidateCreate() Expect(err.Error()).To(ContainSubstring("Object name and exclude pattern cannot be used together")) }) @@ -138,7 +150,7 @@ var _ = Describe("verticascrutinize_webhook", func() { vrep := MakeVrep() vrep.Spec.Mode = ReplicationModeAsync vrep.Spec.Source.ExcludePattern = validExcludePattern - err := vrep.ValidateCreate() + _, err := vrep.ValidateCreate() Expect(err.Error()).To(ContainSubstring("Exclude pattern cannot be used without include pattern")) }) @@ -148,7 +160,7 @@ var _ = Describe("verticascrutinize_webhook", func() { vmeta.ReplicationTimeoutAnnotation: "10", vmeta.ReplicationPollingFrequencyAnnotation: "0", } - err := vrep.ValidateCreate() + _, err := vrep.ValidateCreate() Expect(err.Error()).To(ContainSubstring("polling frequency cannot be 0 or less than 0")) }) diff --git a/api/v1beta1/verticarestorepointquery_webhook.go b/api/v1beta1/verticarestorepointquery_webhook.go index fb6fdf32c..939813b54 100644 --- a/api/v1beta1/verticarestorepointquery_webhook.go +++ b/api/v1beta1/verticarestorepointquery_webhook.go @@ -23,6 +23,7 @@ import ( ctrl "sigs.k8s.io/controller-runtime" logf "sigs.k8s.io/controller-runtime/pkg/log" "sigs.k8s.io/controller-runtime/pkg/webhook" + "sigs.k8s.io/controller-runtime/pkg/webhook/admission" ) // log is for logging in this package. @@ -44,32 +45,32 @@ func (vrpq *VerticaRestorePointsQuery) Default() { var _ webhook.Validator = &VerticaRestorePointsQuery{} // ValidateCreate implements webhook.Validator so a webhook will be registered for the type -func (vrpq *VerticaRestorePointsQuery) ValidateCreate() error { +func (vrpq *VerticaRestorePointsQuery) ValidateCreate() (admission.Warnings, error) { verticarestorepointsquerylog.Info("validate create", "name", vrpq.Name) allErrs := vrpq.validateVrpqSpec() if len(allErrs) == 0 { - return nil + return nil, nil } - return apierrors.NewInvalid(GkVRPQ, vrpq.Name, allErrs) + return nil, apierrors.NewInvalid(GkVRPQ, vrpq.Name, allErrs) } // ValidateUpdate implements webhook.Validator so a webhook will be registered for the type -func (vrpq *VerticaRestorePointsQuery) ValidateUpdate(_ runtime.Object) error { +func (vrpq *VerticaRestorePointsQuery) ValidateUpdate(_ runtime.Object) (admission.Warnings, error) { verticarestorepointsquerylog.Info("validate update", "name", vrpq.Name) allErrs := vrpq.validateVrpqSpec() if len(allErrs) == 0 { - return nil + return nil, nil } - return apierrors.NewInvalid(GkVRPQ, vrpq.Name, allErrs) + return nil, apierrors.NewInvalid(GkVRPQ, vrpq.Name, allErrs) } // ValidateDelete implements webhook.Validator so a webhook will be registered for the type -func (vrpq *VerticaRestorePointsQuery) ValidateDelete() error { +func (vrpq *VerticaRestorePointsQuery) ValidateDelete() (admission.Warnings, error) { verticarestorepointsquerylog.Info("validate delete", "name", vrpq.Name) - return nil + return nil, nil } // validateSpec will validate the current VerticaRestorePointsQuery to see if it is valid diff --git a/api/v1beta1/verticarestorepointquery_webhook_test.go b/api/v1beta1/verticarestorepointquery_webhook_test.go index b456daec4..fc1749b00 100644 --- a/api/v1beta1/verticarestorepointquery_webhook_test.go +++ b/api/v1beta1/verticarestorepointquery_webhook_test.go @@ -23,8 +23,10 @@ import ( var _ = Describe("verticarestorepointsquery_webhook", func() { It("should succeed with no filter option fields", func() { vrpq := MakeVrpq() - Expect(vrpq.ValidateCreate()).Should(Succeed()) - Expect(vrpq.ValidateUpdate(vrpq)).Should(Succeed()) + _, err := vrpq.ValidateCreate() + Expect(err).Should(Succeed()) + _, err = vrpq.ValidateUpdate(vrpq) + Expect(err).Should(Succeed()) }) It("should succeed with all valid fields", func() { @@ -32,21 +34,23 @@ var _ = Describe("verticarestorepointsquery_webhook", func() { vrpq.Spec.FilterOptions.ArchiveName = "db" vrpq.Spec.FilterOptions.StartTimestamp = "2006-01-02 23:59:56" vrpq.Spec.FilterOptions.EndTimestamp = "2006-01-02 23:59:58" - Expect(vrpq.ValidateCreate()).Should(Succeed()) - Expect(vrpq.ValidateUpdate(vrpq)).Should(Succeed()) + _, err := vrpq.ValidateCreate() + Expect(err).Should(Succeed()) + _, err = vrpq.ValidateUpdate(vrpq) + Expect(err).Should(Succeed()) }) It("should fail if invalid start timestamp", func() { vrpq := MakeVrpq() vrpq.Spec.FilterOptions.StartTimestamp = "start" - err := vrpq.ValidateCreate() + _, err := vrpq.ValidateCreate() Expect(err.Error()).To(ContainSubstring("start timestamp \"start\" is invalid; cannot parse as a datetime")) }) It("should fail if invalid end timestamp", func() { vrpq := MakeVrpq() vrpq.Spec.FilterOptions.EndTimestamp = "end" - err := vrpq.ValidateCreate() + _, err := vrpq.ValidateCreate() Expect(err.Error()).To(ContainSubstring("end timestamp \"end\" is invalid; cannot parse as a datetime")) }) @@ -54,7 +58,7 @@ var _ = Describe("verticarestorepointsquery_webhook", func() { vrpq := MakeVrpq() vrpq.Spec.FilterOptions.StartTimestamp = "2006-01-02 23:59:59.123456789" vrpq.Spec.FilterOptions.EndTimestamp = "2006-01-02 23:59:59" - err := vrpq.ValidateCreate() + _, err := vrpq.ValidateCreate() Expect(err.Error()).To(ContainSubstring("start timestamp must be before end timestamp")) }) }) diff --git a/api/v1beta1/verticarestorepointsquery_types.go b/api/v1beta1/verticarestorepointsquery_types.go index 1057fdec7..3c9373767 100644 --- a/api/v1beta1/verticarestorepointsquery_types.go +++ b/api/v1beta1/verticarestorepointsquery_types.go @@ -109,7 +109,7 @@ type VerticaRestorePointsQuery struct { Status VerticaRestorePointsQueryStatus `json:"status,omitempty"` } -//+kubebuilder:object:root=true +// +kubebuilder:object:root=true // VerticaRestorePointsQueryList contains a list of VerticaRestorePointsQuery type VerticaRestorePointsQueryList struct { diff --git a/api/v1beta1/verticascrutinize_webhook.go b/api/v1beta1/verticascrutinize_webhook.go index 9e963a773..036c9fa86 100644 --- a/api/v1beta1/verticascrutinize_webhook.go +++ b/api/v1beta1/verticascrutinize_webhook.go @@ -29,6 +29,7 @@ import ( ctrl "sigs.k8s.io/controller-runtime" logf "sigs.k8s.io/controller-runtime/pkg/log" "sigs.k8s.io/controller-runtime/pkg/webhook" + "sigs.k8s.io/controller-runtime/pkg/webhook/admission" ) // log is for logging in this package. @@ -50,32 +51,32 @@ func (vscr *VerticaScrutinize) Default() { var _ webhook.Validator = &VerticaScrutinize{} // ValidateCreate implements webhook.Validator so a webhook will be registered for the type -func (vscr *VerticaScrutinize) ValidateCreate() error { +func (vscr *VerticaScrutinize) ValidateCreate() (admission.Warnings, error) { verticascrutinizelog.Info("validate create", "name", vscr.Name) allErrs := vscr.validateVscrSpec() if len(allErrs) == 0 { - return nil + return nil, nil } - return apierrors.NewInvalid(GkVSCR, vscr.Name, allErrs) + return nil, apierrors.NewInvalid(GkVSCR, vscr.Name, allErrs) } // ValidateUpdate implements webhook.Validator so a webhook will be registered for the type -func (vscr *VerticaScrutinize) ValidateUpdate(_ runtime.Object) error { +func (vscr *VerticaScrutinize) ValidateUpdate(_ runtime.Object) (admission.Warnings, error) { verticascrutinizelog.Info("validate update", "name", vscr.Name) allErrs := vscr.validateVscrSpec() if len(allErrs) == 0 { - return nil + return nil, nil } - return apierrors.NewInvalid(GkVSCR, vscr.Name, allErrs) + return nil, apierrors.NewInvalid(GkVSCR, vscr.Name, allErrs) } // ValidateDelete implements webhook.Validator so a webhook will be registered for the type -func (vscr *VerticaScrutinize) ValidateDelete() error { +func (vscr *VerticaScrutinize) ValidateDelete() (admission.Warnings, error) { verticascrutinizelog.Info("validate delete", "name", vscr.Name) - return nil + return nil, nil } // validateVscrSpec will validate the current VerticaScrutinize to see if it is valid diff --git a/api/v1beta1/verticascrutinize_webhook_test.go b/api/v1beta1/verticascrutinize_webhook_test.go index ff849f606..7f5989d52 100644 --- a/api/v1beta1/verticascrutinize_webhook_test.go +++ b/api/v1beta1/verticascrutinize_webhook_test.go @@ -26,30 +26,36 @@ import ( var _ = Describe("verticascrutinize_webhook", func() { It("should succeed with no log age times", func() { vscr := MakeVscr() - Expect(vscr.ValidateCreate()).Should(Succeed()) - Expect(vscr.ValidateUpdate(vscr)).Should(Succeed()) + _, err := vscr.ValidateCreate() + Expect(err).Should(Succeed()) + _, err = vscr.ValidateUpdate(vscr) + Expect(err).Should(Succeed()) }) It("should succeed with log-age-hours only", func() { vscr := MakeVscr() vscr.Annotations[vmeta.ScrutinizeLogAgeHours] = "8" - Expect(vscr.ValidateCreate()).Should(Succeed()) - Expect(vscr.ValidateUpdate(vscr)).Should(Succeed()) + _, err := vscr.ValidateCreate() + Expect(err).Should(Succeed()) + _, err = vscr.ValidateUpdate(vscr) + Expect(err).Should(Succeed()) }) It("should succeed with valid log-age-oldest-time and log-age-newest-time", func() { vscr := MakeVscr() vscr.Annotations[vmeta.ScrutinizeLogAgeOldestTime] = GenerateLogAgeTime(-8, "-05") vscr.Annotations[vmeta.ScrutinizeLogAgeNewestTime] = GenerateLogAgeTime(24, "") - Expect(vscr.ValidateCreate()).Should(Succeed()) - Expect(vscr.ValidateUpdate(vscr)).Should(Succeed()) + _, err := vscr.ValidateCreate() + Expect(err).Should(Succeed()) + _, err = vscr.ValidateUpdate(vscr) + Expect(err).Should(Succeed()) }) // log-age-hours should be a positive integer It("failed to parse log-age-hours", func() { vscr := MakeVscr() vscr.Annotations[vmeta.ScrutinizeLogAgeHours] = "not-a-number" - err := vscr.ValidateCreate() + _, err := vscr.ValidateCreate() Expect(err.Error()).To(ContainSubstring("failed to parse log-age-hours")) }) @@ -58,14 +64,14 @@ var _ = Describe("verticascrutinize_webhook", func() { vscr.Annotations[vmeta.ScrutinizeLogAgeHours] = "8" vscr.Annotations[vmeta.ScrutinizeLogAgeOldestTime] = GenerateLogAgeTime(-8, "-05") vscr.Annotations[vmeta.ScrutinizeLogAgeNewestTime] = GenerateLogAgeTime(24, "") - err := vscr.ValidateCreate() + _, err := vscr.ValidateCreate() Expect(err.Error()).To(ContainSubstring("log-age-hours cannot be set alongside log-age-oldest-time and log-age-newest-time")) }) It("should fail if log-age-hours is negative", func() { vscr := MakeVscr() vscr.Annotations[vmeta.ScrutinizeLogAgeHours] = "-8" - err := vscr.ValidateCreate() + _, err := vscr.ValidateCreate() Expect(err.Error()).To(ContainSubstring("log-age-hours cannot be negative")) }) @@ -74,14 +80,14 @@ var _ = Describe("verticascrutinize_webhook", func() { vscr := MakeVscr() vscr.Annotations[vmeta.ScrutinizeLogAgeOldestTime] = "2024" vscr.Annotations[vmeta.ScrutinizeLogAgeNewestTime] = "not-time-type" - err := vscr.ValidateCreate() + _, err := vscr.ValidateCreate() Expect(err.Error()).To(ContainSubstring("failed to parse log-age-*-time")) }) It("should fail if log-age-oldest-time is after current time", func() { vscr := MakeVscr() vscr.Annotations[vmeta.ScrutinizeLogAgeOldestTime] = GenerateLogAgeTime(22, "+08") - err := vscr.ValidateCreate() + _, err := vscr.ValidateCreate() Expect(err.Error()).To(ContainSubstring("log-age-oldest-time cannot be set after current time")) }) @@ -89,7 +95,7 @@ var _ = Describe("verticascrutinize_webhook", func() { vscr := MakeVscr() vscr.Annotations[vmeta.ScrutinizeLogAgeOldestTime] = GenerateLogAgeTime(-4, "-05") vscr.Annotations[vmeta.ScrutinizeLogAgeNewestTime] = GenerateLogAgeTime(-24, "-05") - err := vscr.ValidateCreate() + _, err := vscr.ValidateCreate() Expect(err.Error()).To(ContainSubstring("log-age-oldest-time cannot be set after log-age-newest-time")) }) @@ -97,7 +103,7 @@ var _ = Describe("verticascrutinize_webhook", func() { It("should fail if log-age-oldest-time is in wrong format", func() { vscr := MakeVscr() vscr.Annotations[vmeta.ScrutinizeLogAgeOldestTime] = time.Now().AddDate(0, 0, -1).Format(time.RFC1123) - err := vscr.ValidateCreate() + _, err := vscr.ValidateCreate() Expect(err.Error()).To(ContainSubstring("should be formatted as: YYYY-MM-DD HH [+/-XX]")) }) @@ -105,7 +111,7 @@ var _ = Describe("verticascrutinize_webhook", func() { It("should fail if log-age-newest-time is in wrong format", func() { vscr := MakeVscr() vscr.Annotations[vmeta.ScrutinizeLogAgeNewestTime] = "invalid time format" - err := vscr.ValidateCreate() + _, err := vscr.ValidateCreate() Expect(err.Error()).To(ContainSubstring("should be formatted as: YYYY-MM-DD HH [+/-XX]")) }) diff --git a/api/v1beta1/webhook_suite_test.go b/api/v1beta1/webhook_suite_test.go index 096861fce..89b2c453e 100644 --- a/api/v1beta1/webhook_suite_test.go +++ b/api/v1beta1/webhook_suite_test.go @@ -29,13 +29,15 @@ import ( . "github.com/onsi/gomega" admissionv1 "k8s.io/api/admission/v1" - //+kubebuilder:scaffold:imports + // +kubebuilder:scaffold:imports "k8s.io/apimachinery/pkg/runtime" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/envtest" logf "sigs.k8s.io/controller-runtime/pkg/log" "sigs.k8s.io/controller-runtime/pkg/log/zap" + metricsserver "sigs.k8s.io/controller-runtime/pkg/metrics/server" + "sigs.k8s.io/controller-runtime/pkg/webhook" ) // These tests use Ginkgo (BDD-style Go testing framework). Refer to @@ -77,7 +79,7 @@ var _ = BeforeSuite(func() { err = admissionv1.AddToScheme(scheme) Expect(err).NotTo(HaveOccurred()) - //+kubebuilder:scaffold:scheme + // +kubebuilder:scaffold:scheme k8sClient, err = client.New(cfg, client.Options{Scheme: scheme}) Expect(err).NotTo(HaveOccurred()) @@ -85,20 +87,26 @@ var _ = BeforeSuite(func() { // start webhook server using Manager webhookInstallOptions := &testEnv.WebhookInstallOptions + webhookServer := webhook.NewServer(webhook.Options{ + Host: webhookInstallOptions.LocalServingHost, + Port: webhookInstallOptions.LocalServingPort, + CertDir: webhookInstallOptions.LocalServingCertDir, + }) + metricsServerOptions := metricsserver.Options{ + BindAddress: "0", + } mgr, err := ctrl.NewManager(cfg, ctrl.Options{ - Scheme: scheme, - Host: webhookInstallOptions.LocalServingHost, - Port: webhookInstallOptions.LocalServingPort, - CertDir: webhookInstallOptions.LocalServingCertDir, - LeaderElection: false, - MetricsBindAddress: "0", + Scheme: scheme, + LeaderElection: false, + WebhookServer: webhookServer, + Metrics: metricsServerOptions, }) Expect(err).NotTo(HaveOccurred()) err = (&VerticaAutoscaler{}).SetupWebhookWithManager(mgr) Expect(err).NotTo(HaveOccurred()) - //+kubebuilder:scaffold:webhook + // +kubebuilder:scaffold:webhook go func() { err = mgr.Start(ctx) diff --git a/cmd/operator/main.go b/cmd/operator/main.go index 8093bb81e..e4f11cff0 100644 --- a/cmd/operator/main.go +++ b/cmd/operator/main.go @@ -17,8 +17,10 @@ package main import ( "context" + "crypto/tls" "log" "os" + "strings" "time" // Allows us to pull in things generated from `go generate` @@ -36,7 +38,8 @@ import ( "k8s.io/client-go/tools/record" "k8s.io/client-go/util/workqueue" ctrl "sigs.k8s.io/controller-runtime" - "sigs.k8s.io/controller-runtime/pkg/config/v1alpha1" + "sigs.k8s.io/controller-runtime/pkg/cache" + "sigs.k8s.io/controller-runtime/pkg/config" "sigs.k8s.io/controller-runtime/pkg/controller" "sigs.k8s.io/controller-runtime/pkg/healthz" "sigs.k8s.io/controller-runtime/pkg/manager" @@ -54,7 +57,10 @@ import ( vmeta "github.com/vertica/vertica-kubernetes/pkg/meta" "github.com/vertica/vertica-kubernetes/pkg/opcfg" "github.com/vertica/vertica-kubernetes/pkg/security" - //+kubebuilder:scaffold:imports + "sigs.k8s.io/controller-runtime/pkg/metrics/filters" + metricsserver "sigs.k8s.io/controller-runtime/pkg/metrics/server" + "sigs.k8s.io/controller-runtime/pkg/webhook" + // +kubebuilder:scaffold:imports ) const ( @@ -81,7 +87,7 @@ func init() { utilruntime.Must(vapiB1.AddToScheme(scheme)) utilruntime.Must(vapiV1.AddToScheme(scheme)) - //+kubebuilder:scaffold:scheme + // +kubebuilder:scaffold:scheme } // addReconcilersToManager will add a controller for each CR that this operator @@ -167,21 +173,12 @@ func addReconcilersToManager(mgr manager.Manager, restCfg *rest.Config) { setupLog.Error(err, "unable to create controller", "controller", "VerticaReplicator") os.Exit(1) } - //+kubebuilder:scaffold:builder + // +kubebuilder:scaffold:builder } // addWebhooktsToManager will add any webhooks to the manager. If any failure // occurs, it will exit the program. func addWebhooksToManager(mgr manager.Manager) { - // Set the minimum TLS version for the webhook. By default it will use - // TLS 1.0, which has a lot of security flaws. This is a hacky way to - // set this and should be removed once there is a supported way. - // There are numerous proposals to allow this to be configured from - // Manager -- based on most recent activity this one looks promising: - // https://github.com/kubernetes-sigs/controller-runtime/issues/852 - webhookServer := mgr.GetWebhookServer() - webhookServer.TLSMinVersion = "1.3" - if err := (&vapiV1.VerticaDB{}).SetupWebhookWithManager(mgr); err != nil { setupLog.Error(err, "unable to create webhook", "webhook", "VerticaDB", "version", vapiV1.Version) } @@ -249,6 +246,7 @@ func getReadinessProbeCallback(mgr ctrl.Manager) healthz.Checker { return healthz.Ping } +//nolint:funlen func main() { logger := opcfg.GetLogger() if opcfg.GetLoggingFilePath() != "" { @@ -268,19 +266,66 @@ func main() { "broadcasterBurstSize", burstSize, ) + var webhookTLSOpts []func(*tls.Config) + var metricsTLSOpts []func(*tls.Config) + // Set the minimum TLS version for the webhook. By default it will use + // TLS 1.0, which has a lot of security flaws. This is a hacky way to + // set this and should be removed once there is a supported way. + // There are numerous proposals to allow this to be configured from + // Manager -- based on most recent activity this one looks promising: + // https://github.com/kubernetes-sigs/controller-runtime/issues/852 + webhookTLSOpts = append(webhookTLSOpts, func(c *tls.Config) { + c.MinVersion = tls.VersionTLS13 + }) + metricsTLSOpts = append(metricsTLSOpts, func(c *tls.Config) { + c.MinVersion = tls.VersionTLS13 + }) + + webhookServer := webhook.NewServer(webhook.Options{ + Port: 9443, + CertDir: CertDir, + TLSOpts: webhookTLSOpts, + }) + + secureMetrics := strings.HasSuffix(opcfg.GetMetricsAddr(), "8443") + // Metrics endpoint is enabled in 'config/default/kustomization.yaml'. The Metrics options configure the server. + // More info: + // - https://pkg.go.dev/sigs.k8s.io/controller-runtime@v0.18.4/pkg/metrics/server + // - https://book.kubebuilder.io/reference/metrics.html + metricsServerOptions := metricsserver.Options{ + BindAddress: opcfg.GetMetricsAddr(), + SecureServing: secureMetrics, + // TODO(user): TLSOpts is used to allow configuring the TLS config used for the server. If certificates are + // not provided, self-signed certificates will be generated by default. This option is not recommended for + // production environments as self-signed certificates do not offer the same level of trust and security + // as certificates issued by a trusted Certificate Authority (CA). The primary risk is potentially allowing + // unauthorized access to sensitive metrics data. Consider replacing with CertDir, CertName, and KeyName + // to provide certificates, ensuring the server communicates using trusted and secure certificates. + TLSOpts: metricsTLSOpts, + } + + if secureMetrics { + // FilterProvider is used to protect the metrics endpoint with authn/authz. + // These configurations ensure that only authorized users and service accounts + // can access the metrics endpoint. The RBAC are configured in 'config/rbac/kustomization.yaml'. More info: + // https://pkg.go.dev/sigs.k8s.io/controller-runtime@v0.18.4/pkg/metrics/filters#WithAuthenticationAndAuthorization + metricsServerOptions.FilterProvider = filters.WithAuthenticationAndAuthorization + } + restCfg := ctrl.GetConfigOrDie() mgr, err := ctrl.NewManager(restCfg, ctrl.Options{ Scheme: scheme, - MetricsBindAddress: opcfg.GetMetricsAddr(), - Port: 9443, + Metrics: metricsServerOptions, + WebhookServer: webhookServer, HealthProbeBindAddress: ":8081", LeaderElection: true, LeaderElectionID: opcfg.GetLeaderElectionID(), - Namespace: opcfg.GetWatchNamespace(), - EventBroadcaster: multibroadcaster, - CertDir: CertDir, - Controller: v1alpha1.ControllerConfigurationSpec{ + Cache: cache.Options{DefaultNamespaces: map[string]cache.Config{ + opcfg.GetWatchNamespace(): {}, + }}, + EventBroadcaster: multibroadcaster, + Controller: config.Controller{ GroupKindConcurrency: map[string]int{ vapiB1.GkVDB.String(): opcfg.GetVerticaDBConcurrency(), vapiB1.GkVAS.String(): opcfg.GetVerticaAutoscalerConcurrency(), diff --git a/config/default/kustomization.yaml b/config/default/kustomization.yaml index a0ffb86d2..0a3538b30 100644 --- a/config/default/kustomization.yaml +++ b/config/default/kustomization.yaml @@ -14,22 +14,22 @@ namePrefix: verticadb-operator- # pairs: # someName: someValue +# [CERTMANAGER] To enable cert-manager, uncomment all sections with 'CERTMANAGER'. 'WEBHOOK' components are required. +# [PROMETHEUS] To enable prometheus monitor, uncomment all sections with 'PROMETHEUS'. +# [METRICS] Expose the controller manager metrics service. resources: - ../crd - ../rbac - ../manager - ../webhook - ../clusterpermissions -# [CERTMANAGER] To enable cert-manager, uncomment all sections with 'CERTMANAGER'. 'WEBHOOK' components are required. - ../certmanager -# [PROMETHEUS] To enable prometheus monitor, uncomment all sections with 'PROMETHEUS'. - ../prometheus +- metrics_service.yaml -patchesStrategicMerge: # Protect the /metrics endpoint by putting it behind auth. # If you want your operator to expose the /metrics # endpoint w/o any authn/z, please comment the following line. -- manager_auth_proxy_patch.yaml # Mount the controller config file for loading manager configurations # through a ComponentConfig type @@ -37,39 +37,48 @@ patchesStrategicMerge: # [WEBHOOK] To enable webhook, uncomment all the sections with [WEBHOOK] prefix including the one in # crd/kustomization.yaml -- manager_webhook_patch.yaml # [CERTMANAGER] To enable cert-manager, uncomment all sections with 'CERTMANAGER'. # Uncomment 'CERTMANAGER' sections in crd/kustomization.yaml to enable the CA injection in the admission webhooks. # 'CERTMANAGER' needs to be enabled to use ca injection -- webhookcainjection_patch.yaml # the following config is for teaching kustomize how to do var substitution -vars: # [CERTMANAGER] To enable cert-manager, uncomment all sections with 'CERTMANAGER' prefix. -- name: CERTIFICATE_NAMESPACE # namespace of the certificate CR +vars: +- fieldref: + fieldPath: metadata.namespace + name: CERTIFICATE_NAMESPACE objref: - kind: Certificate group: cert-manager.io + kind: Certificate + name: serving-cert version: v1 - name: serving-cert # this name should match the one in certificate.yaml - fieldref: - fieldpath: metadata.namespace -- name: CERTIFICATE_NAME +- fieldref: {} + name: CERTIFICATE_NAME objref: - kind: Certificate group: cert-manager.io + kind: Certificate + name: serving-cert version: v1 - name: serving-cert # this name should match the one in certificate.yaml -- name: SERVICE_NAMESPACE # namespace of the service +- fieldref: + fieldPath: metadata.namespace + name: SERVICE_NAMESPACE objref: kind: Service - version: v1 name: webhook-service - fieldref: - fieldpath: metadata.namespace -- name: SERVICE_NAME + version: v1 +- fieldref: {} + name: SERVICE_NAME objref: kind: Service - version: v1 name: webhook-service + version: v1 +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +patches: +- path: manager_metrics_patch.yaml + target: + kind: Deployment + +- path: manager_webhook_patch.yaml +- path: webhookcainjection_patch.yaml diff --git a/config/default/manager_auth_proxy_patch.yaml b/config/default/manager_auth_proxy_patch.yaml deleted file mode 100644 index 5c55570ac..000000000 --- a/config/default/manager_auth_proxy_patch.yaml +++ /dev/null @@ -1,32 +0,0 @@ -# This patch inject a sidecar container which is a HTTP proxy for the -# controller manager, it performs RBAC authorization against the Kubernetes API using SubjectAccessReviews. -apiVersion: apps/v1 -kind: Deployment -metadata: - name: manager - namespace: system -spec: - template: - spec: - containers: - - name: kube-rbac-proxy - image: gcr.io/kubebuilder/kube-rbac-proxy:v0.13.1 - args: - - "--secure-listen-address=0.0.0.0:8443" - - "--upstream=http://127.0.0.1:8080/" - - "--logtostderr=true" - - "--v=0" - ports: - - containerPort: 8443 - protocol: TCP - name: metrics - resources: - limits: - cpu: 500m - memory: 128Mi - requests: - cpu: 5m - # Note, if changing this memory request, be sure to update the - # pattern matcher in template-helm-chart.sh. We look for this to - # know where the end of the rbac proxy container spec is. - memory: 64Mi diff --git a/config/default/manager_metrics_patch.yaml b/config/default/manager_metrics_patch.yaml new file mode 100644 index 000000000..488f13693 --- /dev/null +++ b/config/default/manager_metrics_patch.yaml @@ -0,0 +1,4 @@ +# This patch adds the args to allow exposing the metrics endpoint using HTTPS +- op: add + path: /spec/template/spec/containers/0/args/0 + value: --metrics-bind-address=:8443 \ No newline at end of file diff --git a/config/default/metrics_service.yaml b/config/default/metrics_service.yaml new file mode 100644 index 000000000..ca644dfe3 --- /dev/null +++ b/config/default/metrics_service.yaml @@ -0,0 +1,17 @@ +apiVersion: v1 +kind: Service +metadata: + labels: + control-plane: controller-manager + app.kubernetes.io/name: verticadb-operator + app.kubernetes.io/managed-by: kustomize + name: controller-manager-metrics-service + namespace: system +spec: + ports: + - name: https + port: 8443 + protocol: TCP + targetPort: 8443 + selector: + control-plane: controller-manager \ No newline at end of file diff --git a/config/manager/manager.yaml b/config/manager/manager.yaml index ac2c45756..393caa265 100644 --- a/config/manager/manager.yaml +++ b/config/manager/manager.yaml @@ -23,6 +23,9 @@ spec: containers: - command: - /manager + args: + - --leader-elect + - --health-probe-bind-address=:8081 image: controller imagePullPolicy: IfNotPresent name: manager diff --git a/config/manager/operator-envs b/config/manager/operator-envs index a7fdb88f7..bb7f84cfd 100644 --- a/config/manager/operator-envs +++ b/config/manager/operator-envs @@ -6,6 +6,8 @@ WEBHOOKS_ENABLED CONTROLLERS_ENABLED CONTROLLERS_SCOPE METRICS_ADDR +METRICS_TLS +METRICS_PROXY_RBAC LOG_LEVEL CONCURRENCY_VERTICADB CONCURRENCY_VERTICAAUTOSCALER diff --git a/config/prometheus/monitor.yaml b/config/prometheus/monitor.yaml index b2cb6d3d7..9be2d80c7 100644 --- a/config/prometheus/monitor.yaml +++ b/config/prometheus/monitor.yaml @@ -14,6 +14,15 @@ spec: scheme: https bearerTokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token tlsConfig: + # TODO(user): The option insecureSkipVerify: true is not recommended for production since it disables + # certificate verification. This poses a significant security risk by making the system vulnerable to + # man-in-the-middle attacks, where an attacker could intercept and manipulate the communication between + # Prometheus and the monitored services. This could lead to unauthorized access to sensitive metrics data, + # compromising the integrity and confidentiality of the information. + # Please use the following options for secure configurations: + # caFile: /etc/metrics-certs/ca.crt + # certFile: /etc/metrics-certs/tls.crt + # keyFile: /etc/metrics-certs/tls.key insecureSkipVerify: true selector: matchLabels: diff --git a/config/rbac/auth_proxy_client_clusterrolebinding.yaml b/config/rbac/auth_proxy_client_clusterrolebinding.yaml deleted file mode 100644 index ae2fa8f66..000000000 --- a/config/rbac/auth_proxy_client_clusterrolebinding.yaml +++ /dev/null @@ -1,15 +0,0 @@ -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding -metadata: - name: metrics-reader -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: metrics-reader -subjects: -- kind: ServiceAccount - name: manager - namespace: system -- apiGroup: rbac.authorization.k8s.io - kind: Group - name: system:authenticated diff --git a/config/rbac/auth_proxy_role.yaml b/config/rbac/auth_proxy_role.yaml deleted file mode 100644 index 618f5e417..000000000 --- a/config/rbac/auth_proxy_role.yaml +++ /dev/null @@ -1,13 +0,0 @@ -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - name: proxy-role -rules: -- apiGroups: ["authentication.k8s.io"] - resources: - - tokenreviews - verbs: ["create"] -- apiGroups: ["authorization.k8s.io"] - resources: - - subjectaccessreviews - verbs: ["create"] diff --git a/config/rbac/auth_proxy_role_binding.yaml b/config/rbac/auth_proxy_role_binding.yaml deleted file mode 100644 index 5f70c2d54..000000000 --- a/config/rbac/auth_proxy_role_binding.yaml +++ /dev/null @@ -1,12 +0,0 @@ -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding -metadata: - name: proxy-rolebinding -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: proxy-role -subjects: -- kind: ServiceAccount - name: manager - namespace: system diff --git a/config/rbac/auth_proxy_service.yaml b/config/rbac/auth_proxy_service.yaml deleted file mode 100644 index 7ab405a22..000000000 --- a/config/rbac/auth_proxy_service.yaml +++ /dev/null @@ -1,16 +0,0 @@ -apiVersion: v1 -kind: Service -metadata: - labels: - control-plane: verticadb-operator - vertica.com/svc-type: operator-metrics - name: metrics-service - namespace: system -spec: - ports: - - name: metrics - port: 8443 - protocol: TCP - targetPort: metrics - selector: - control-plane: verticadb-operator diff --git a/config/rbac/kustomization.yaml b/config/rbac/kustomization.yaml index ae4828118..69bd5461c 100644 --- a/config/rbac/kustomization.yaml +++ b/config/rbac/kustomization.yaml @@ -9,14 +9,15 @@ resources: - role_binding.yaml - leader_election_role.yaml - leader_election_role_binding.yaml -# Comment the following 4 lines if you want to disable -# the auth proxy (https://github.com/brancz/kube-rbac-proxy) -# which protects your /metrics endpoint. -- auth_proxy_service.yaml -- auth_proxy_role.yaml -- auth_proxy_role_binding.yaml -- auth_proxy_client_clusterrole.yaml -- auth_proxy_client_clusterrolebinding.yaml +# The following RBAC configurations are used to protect +# the metrics endpoint with authn/authz. These configurations +# ensure that only authorized users and service accounts +# can access the metrics endpoint. Comment the following +# permissions if you want to disable this protection. +# More info: https://book.kubebuilder.io/reference/metrics.html +- metrics_auth_role.yaml +- metrics_auth_role_binding.yaml +- metrics_reader_role.yaml # The next setup the RBAC rules for the webhook. - webhook_config_clusterrole.yaml - webhook_config_clusterrolebinding.yaml diff --git a/config/rbac/metrics_auth_role.yaml b/config/rbac/metrics_auth_role.yaml new file mode 100644 index 000000000..2ba875039 --- /dev/null +++ b/config/rbac/metrics_auth_role.yaml @@ -0,0 +1,17 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: metrics-auth-role +rules: + - apiGroups: + - authentication.k8s.io + resources: + - tokenreviews + verbs: + - create + - apiGroups: + - authorization.k8s.io + resources: + - subjectaccessreviews + verbs: + - create \ No newline at end of file diff --git a/config/rbac/metrics_auth_role_binding.yaml b/config/rbac/metrics_auth_role_binding.yaml new file mode 100644 index 000000000..c7063d7b3 --- /dev/null +++ b/config/rbac/metrics_auth_role_binding.yaml @@ -0,0 +1,12 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: metrics-auth-rolebinding +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: metrics-auth-role +subjects: + - kind: ServiceAccount + name: controller-manager + namespace: system \ No newline at end of file diff --git a/config/rbac/auth_proxy_client_clusterrole.yaml b/config/rbac/metrics_reader_role.yaml similarity index 66% rename from config/rbac/auth_proxy_client_clusterrole.yaml rename to config/rbac/metrics_reader_role.yaml index bd4af137a..50fbca545 100644 --- a/config/rbac/auth_proxy_client_clusterrole.yaml +++ b/config/rbac/metrics_reader_role.yaml @@ -3,5 +3,7 @@ kind: ClusterRole metadata: name: metrics-reader rules: -- nonResourceURLs: ["/metrics"] - verbs: ["get"] +- nonResourceURLs: + - "/metrics" + verbs: + - get \ No newline at end of file diff --git a/config/rbac/vertica-server-role.yaml b/config/rbac/vertica-server-role.yaml index e82fa152c..be3d81f0a 100644 --- a/config/rbac/vertica-server-role.yaml +++ b/config/rbac/vertica-server-role.yaml @@ -27,3 +27,4 @@ rules: verbs: - get - list + - watch diff --git a/go.mod b/go.mod index 45fa37d15..47ca77698 100644 --- a/go.mod +++ b/go.mod @@ -8,73 +8,93 @@ require ( github.com/aws/aws-sdk-go v1.49.5 github.com/bigkevmcd/go-configparser v0.0.0-20210106142102-909504547ead github.com/ghodss/yaml v1.0.0 - github.com/go-logr/logr v1.2.4 - github.com/go-logr/zapr v1.2.4 + github.com/go-logr/logr v1.4.1 + github.com/go-logr/zapr v1.3.0 github.com/google/uuid v1.4.0 github.com/lithammer/dedent v1.1.0 - github.com/onsi/ginkgo/v2 v2.7.0 - github.com/onsi/gomega v1.24.2 + github.com/onsi/ginkgo/v2 v2.17.1 + github.com/onsi/gomega v1.32.0 github.com/pkg/errors v0.9.1 - github.com/prometheus/client_golang v1.14.0 + github.com/prometheus/client_golang v1.16.0 github.com/vertica/vcluster v1.0.0 github.com/vertica/vertica-sql-go v1.1.1 - go.uber.org/zap v1.25.0 + go.uber.org/zap v1.26.0 golang.org/x/exp v0.0.0-20230905200255-921286631fa9 golang.org/x/text v0.21.0 - gopkg.in/natefinch/lumberjack.v2 v2.0.0 + gopkg.in/natefinch/lumberjack.v2 v2.2.1 gopkg.in/yaml.v2 v2.4.0 - k8s.io/api v0.26.2 - k8s.io/apiextensions-apiserver v0.26.2 - k8s.io/apimachinery v0.26.2 - k8s.io/client-go v0.26.2 - sigs.k8s.io/controller-runtime v0.14.5 + k8s.io/api v0.30.1 + k8s.io/apiextensions-apiserver v0.30.1 + k8s.io/apimachinery v0.30.1 + k8s.io/client-go v0.30.1 + sigs.k8s.io/controller-runtime v0.18.4 ) require ( cloud.google.com/go/compute v1.23.3 // indirect cloud.google.com/go/compute/metadata v0.2.3 // indirect cloud.google.com/go/iam v1.1.5 // indirect + github.com/antlr/antlr4/runtime/Go/antlr/v4 v4.0.0-20230305170008-8188dc5388df // indirect github.com/beorn7/perks v1.0.1 // indirect + github.com/blang/semver/v4 v4.0.0 // indirect + github.com/cenkalti/backoff/v4 v4.2.1 // indirect github.com/cespare/xxhash/v2 v2.2.0 // indirect github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect github.com/deckarep/golang-set/v2 v2.3.1 // indirect github.com/emicklei/go-restful/v3 v3.11.0 // indirect - github.com/evanphx/json-patch/v5 v5.6.0 // indirect + github.com/evanphx/json-patch/v5 v5.9.0 // indirect github.com/fatih/color v1.14.1 // indirect + github.com/felixge/httpsnoop v1.0.3 // indirect github.com/fsnotify/fsnotify v1.7.0 // indirect - github.com/go-openapi/jsonpointer v0.19.5 // indirect - github.com/go-openapi/jsonreference v0.20.0 // indirect - github.com/go-openapi/swag v0.19.14 // indirect + github.com/go-logr/stdr v1.2.2 // indirect + github.com/go-openapi/jsonpointer v0.19.6 // indirect + github.com/go-openapi/jsonreference v0.20.2 // indirect + github.com/go-openapi/swag v0.22.3 // indirect + github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 // indirect github.com/gogo/protobuf v1.3.2 // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect - github.com/golang/protobuf v1.5.3 // indirect - github.com/google/gnostic v0.5.7-v3refs // indirect + github.com/golang/protobuf v1.5.4 // indirect + github.com/google/cel-go v0.17.8 // indirect + github.com/google/gnostic-models v0.6.8 // indirect github.com/google/go-cmp v0.6.0 // indirect - github.com/google/gofuzz v1.1.0 // indirect + github.com/google/gofuzz v1.2.0 // indirect + github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1 // indirect github.com/google/s2a-go v0.1.7 // indirect github.com/googleapis/enterprise-certificate-proxy v0.3.2 // indirect github.com/googleapis/gax-go/v2 v2.12.0 // indirect + github.com/gorilla/websocket v1.5.0 // indirect + github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0 // indirect github.com/imdario/mergo v0.3.12 // indirect github.com/jmespath/go-jmespath v0.4.0 // indirect github.com/josharian/intern v1.0.0 // indirect github.com/json-iterator/go v1.1.12 // indirect - github.com/mailru/easyjson v0.7.6 // indirect + github.com/mailru/easyjson v0.7.7 // indirect github.com/mattn/go-colorable v0.1.13 // indirect github.com/mattn/go-isatty v0.0.17 // indirect github.com/mattn/go-runewidth v0.0.13 // indirect - github.com/matttproud/golang_protobuf_extensions v1.0.2 // indirect + github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect github.com/moby/spdystream v0.2.0 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect - github.com/prometheus/client_model v0.3.0 // indirect - github.com/prometheus/common v0.37.0 // indirect - github.com/prometheus/procfs v0.8.0 // indirect + github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f // indirect + github.com/prometheus/client_model v0.4.0 // indirect + github.com/prometheus/common v0.44.0 // indirect + github.com/prometheus/procfs v0.12.0 // indirect github.com/rivo/uniseg v0.2.0 // indirect github.com/spf13/pflag v1.0.5 // indirect + github.com/stoewer/go-strcase v1.2.0 // indirect github.com/theckman/yacspin v0.13.12 // indirect go.opencensus.io v0.24.0 // indirect - go.uber.org/multierr v1.10.0 // indirect + go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.44.0 // indirect + go.opentelemetry.io/otel v1.19.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.19.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.19.0 // indirect + go.opentelemetry.io/otel/metric v1.19.0 // indirect + go.opentelemetry.io/otel/sdk v1.19.0 // indirect + go.opentelemetry.io/otel/trace v1.19.0 // indirect + go.opentelemetry.io/proto/otlp v1.0.0 // indirect + go.uber.org/multierr v1.11.0 // indirect golang.org/x/crypto v0.31.0 // indirect golang.org/x/net v0.33.0 // indirect golang.org/x/oauth2 v0.15.0 // indirect @@ -82,7 +102,8 @@ require ( golang.org/x/sys v0.28.0 // indirect golang.org/x/term v0.27.0 // indirect golang.org/x/time v0.5.0 // indirect - gomodules.xyz/jsonpatch/v2 v2.2.0 // indirect + golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d // indirect + gomodules.xyz/jsonpatch/v2 v2.4.0 // indirect google.golang.org/api v0.153.0 // indirect google.golang.org/appengine v1.6.7 // indirect google.golang.org/genproto v0.0.0-20231106174013-bbf56f31fb17 // indirect @@ -92,12 +113,14 @@ require ( google.golang.org/protobuf v1.33.0 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect - k8s.io/component-base v0.26.2 // indirect - k8s.io/klog/v2 v2.80.1 // indirect - k8s.io/kube-openapi v0.0.0-20221012153701-172d655c2280 // indirect - k8s.io/utils v0.0.0-20221128185143-99ec85e7a448 // indirect - sigs.k8s.io/json v0.0.0-20220713155537-f223a00ba0e2 // indirect - sigs.k8s.io/structured-merge-diff/v4 v4.2.3 // indirect + k8s.io/apiserver v0.30.1 // indirect + k8s.io/component-base v0.30.1 // indirect + k8s.io/klog/v2 v2.120.1 // indirect + k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340 // indirect + k8s.io/utils v0.0.0-20230726121419-3b25d923346b // indirect + sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.29.0 // indirect + sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect + sigs.k8s.io/structured-merge-diff/v4 v4.4.1 // indirect sigs.k8s.io/yaml v1.3.0 // indirect ) diff --git a/go.sum b/go.sum index c7fb97d16..32606d69f 100644 --- a/go.sum +++ b/go.sum @@ -1,72 +1,32 @@ cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= -cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= -cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= -cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc= -cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0= -cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To= -cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4= -cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M= -cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc= -cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk= -cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs= -cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc= -cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY= cloud.google.com/go v0.110.10 h1:LXy9GEO+timppncPIAZoOj3l58LIU9k+kn48AN7IO3Y= cloud.google.com/go v0.110.10/go.mod h1:v1OoFqYxiBkUrruItNM3eT4lLByNjxmJSV/xDKJNnic= -cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= -cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= -cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= -cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= -cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= -cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= cloud.google.com/go/compute v1.23.3 h1:6sVlXXBmbd7jNX0Ipq0trII3e4n1/MsADLK6a+aiVlk= cloud.google.com/go/compute v1.23.3/go.mod h1:VCgBUoMnIVIR0CscqQiPJLAG25E3ZRZMzcFZeQ+h8CI= cloud.google.com/go/compute/metadata v0.2.3 h1:mg4jlk7mCAj6xXp9UJ4fjI9VUI5rubuGBW5aJ7UnBMY= cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA= -cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= -cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= cloud.google.com/go/iam v1.1.5 h1:1jTsCu4bcsNsE4iiqNT5SHwrDRCfRmIaaaVFhRveTJI= cloud.google.com/go/iam v1.1.5/go.mod h1:rB6P/Ic3mykPbFio+vo7403drjlgvoWfYpJhMXEbzv8= -cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= -cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= -cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= -cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU= cloud.google.com/go/secretmanager v1.11.4 h1:krnX9qpG2kR2fJ+u+uNyNo+ACVhplIAS4Pu7u+4gd+k= cloud.google.com/go/secretmanager v1.11.4/go.mod h1:wreJlbS9Zdq21lMzWmJ0XhWW2ZxgPeahsqeV/vZoJ3w= -cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= -cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= -cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= -cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= -cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= -dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= -github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= -github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= github.com/DATA-DOG/go-sqlmock v1.5.0 h1:Shsta01QNfFxHCfpW6YH2STWB0MudeXXEWMr20OEh60= github.com/DATA-DOG/go-sqlmock v1.5.0/go.mod h1:f/Ixk793poVmq4qj/V1dPUg2JEAKC73Q5eFN3EC/SaM= -github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= -github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= -github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= -github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= -github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho= +github.com/antlr/antlr4/runtime/Go/antlr/v4 v4.0.0-20230305170008-8188dc5388df h1:7RFfzj4SSt6nnvCPbCqijJi1nWCd+TqAT3bYCStRC18= +github.com/antlr/antlr4/runtime/Go/antlr/v4 v4.0.0-20230305170008-8188dc5388df/go.mod h1:pSwJ0fSY5KhvocuWSx4fz3BA8OrA1bQn+K1Eli3BRwM= github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 h1:0CwZNZbxp69SHPdPJAN/hZIm0C4OItdklCFmMRWYpio= github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs= github.com/aws/aws-sdk-go v1.49.5 h1:y2yfBlwjPDi3/sBVKeznYEdDy6wIhjA2L5NCBMLUIYA= github.com/aws/aws-sdk-go v1.49.5/go.mod h1:LF8svs817+Nz+DmiMQKTO3ubZ/6IaTpq3TjupRn3Eqk= -github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= -github.com/benbjohnson/clock v1.3.0 h1:ip6w0uFQkncKQ979AypyG0ER7mqUSBdKLOgAle/AT8A= -github.com/benbjohnson/clock v1.3.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= -github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= -github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= github.com/bigkevmcd/go-configparser v0.0.0-20210106142102-909504547ead h1:UhYWAphNveMty305skySR5ST/hbYDexgsgkhcy0MDhM= github.com/bigkevmcd/go-configparser v0.0.0-20210106142102-909504547ead/go.mod h1:RI5D4DqbDX0Kb0SvKTuAKMYlkSBND3zLQZI/wiS5Ij0= +github.com/blang/semver/v4 v4.0.0 h1:1PFHFE6yCCTv8C1TeyNNarDzntLi7wMI5i/pzqYIsAM= +github.com/blang/semver/v4 v4.0.0/go.mod h1:IbckMUScFkM3pff0VJDNKRiT6TG/YpiHIM2yvyW5YoQ= +github.com/cenkalti/backoff/v4 v4.2.1 h1:y4OZtCnogmCPw98Zjyt5a6+QwPLGkiQsYW5oUqylYbM= +github.com/cenkalti/backoff/v4 v4.2.1/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= -github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= -github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= @@ -81,114 +41,78 @@ github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1 github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/deckarep/golang-set/v2 v2.3.1 h1:vjmkvJt/IV27WXPyYQpAh4bRyWJc5Y435D17XQ9QU5A= github.com/deckarep/golang-set/v2 v2.3.1/go.mod h1:VAky9rY/yGXJOLEDv3OMci+7wtDpOF4IN+y82NBOac4= -github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE= -github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153 h1:yUdfgN0XgIJw7foRItutHYUIhlcKzcSf5vDpdhQAKTc= -github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc= github.com/emicklei/go-restful/v3 v3.11.0 h1:rAQeMHw1c7zTmncogyy8VvRZwtkmkZ4FxERmMY4rD+g= github.com/emicklei/go-restful/v3 v3.11.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= -github.com/evanphx/json-patch v0.5.2/go.mod h1:ZWS5hhDbVDyob71nXKNL0+PWn6ToqBHMikGIFbs31qQ= github.com/evanphx/json-patch v4.12.0+incompatible h1:4onqiflcdA9EOZ4RxV643DvftH5pOlLGNtQ5lPWQu84= github.com/evanphx/json-patch v4.12.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= -github.com/evanphx/json-patch/v5 v5.6.0 h1:b91NhWfaz02IuVxO9faSllyAtNXHMPkC5J8sJCLunww= -github.com/evanphx/json-patch/v5 v5.6.0/go.mod h1:G79N1coSVB93tBe7j6PhzjmR3/2VvlbKOFpnXhI9Bw4= +github.com/evanphx/json-patch/v5 v5.9.0 h1:kcBlZQbplgElYIlo/n1hJbls2z/1awpXxpRi0/FOJfg= +github.com/evanphx/json-patch/v5 v5.9.0/go.mod h1:VNkHZ/282BpEyt/tObQO8s5CMPmYYq14uClGH4abBuQ= github.com/fatih/color v1.14.1 h1:qfhVLaG5s+nCROl1zJsZRxFeYrHLqWroPOQ8BWiNb4w= github.com/fatih/color v1.14.1/go.mod h1:2oHN61fhTpgcxD3TSWCgKDiH1+x4OiDVVGH8WlgGZGg= +github.com/felixge/httpsnoop v1.0.3 h1:s/nj+GCswXYzN5v2DpNMuMQYe+0DDwt5WVCU6CWBdXk= +github.com/felixge/httpsnoop v1.0.3/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA= github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM= github.com/ghodss/yaml v1.0.0 h1:wQHKEahhL6wmXdzwWG11gIVCkOv05bNOh+Rxn0yngAk= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= -github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= -github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= -github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= -github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= -github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= -github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY= -github.com/go-kit/log v0.2.0/go.mod h1:NwTd00d/i8cPZ3xOwwiv2PO5MOcx78fFErGNcVmBjv0= -github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= -github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= -github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= -github.com/go-logfmt/logfmt v0.5.1/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs= -github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= -github.com/go-logr/logr v1.2.4 h1:g01GSCwiDw2xSZfjJ2/T9M+S6pFdcNtFYsp+Y43HYDQ= -github.com/go-logr/logr v1.2.4/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= -github.com/go-logr/zapr v1.2.4 h1:QHVo+6stLbfJmYGkQ7uGHUCu5hnAFAj6mDe6Ea0SeOo= -github.com/go-logr/zapr v1.2.4/go.mod h1:FyHWQIzQORZ0QVE1BtVHv3cKtNLuXsbNLtpuhNapBOA= -github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= -github.com/go-openapi/jsonpointer v0.19.5 h1:gZr+CIYByUqjcgeLXnQu2gHYQC9o73G2XUeOFYEICuY= -github.com/go-openapi/jsonpointer v0.19.5/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= -github.com/go-openapi/jsonreference v0.20.0 h1:MYlu0sBgChmCfJxxUKZ8g1cPWFOB37YSZqewK7OKeyA= -github.com/go-openapi/jsonreference v0.20.0/go.mod h1:Ag74Ico3lPc+zR+qjn4XBUmXymS4zJbYVCZmcgkasdo= -github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= -github.com/go-openapi/swag v0.19.14 h1:gm3vOOXfiuw5i9p5N9xJvfjvuofpyvLA9Wr6QfK5Fng= -github.com/go-openapi/swag v0.19.14/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ= -github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= -github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= +github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/logr v1.4.1 h1:pKouT5E8xu9zeFC39JXRDukb6JFQPXM5p5I91188VAQ= +github.com/go-logr/logr v1.4.1/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= +github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= +github.com/go-logr/zapr v1.3.0 h1:XGdV8XW8zdwFiwOA2Dryh1gj2KRQyOOoNmBy4EplIcQ= +github.com/go-logr/zapr v1.3.0/go.mod h1:YKepepNBd1u/oyhd/yQmtjVXmm9uML4IXUgMOwR8/Gg= +github.com/go-openapi/jsonpointer v0.19.6 h1:eCs3fxoIi3Wh6vtgmLTOjdhSpiqphQ+DaPn38N2ZdrE= +github.com/go-openapi/jsonpointer v0.19.6/go.mod h1:osyAmYz/mB/C3I+WsTTSgw1ONzaLJoLCyoi6/zppojs= +github.com/go-openapi/jsonreference v0.20.2 h1:3sVjiK66+uXK/6oQ8xgcRKcFgQ5KXa2KvnJRumpMGbE= +github.com/go-openapi/jsonreference v0.20.2/go.mod h1:Bl1zwGIM8/wsvqjsOQLJ/SH+En5Ap4rVB5KVcIDZG2k= +github.com/go-openapi/swag v0.22.3 h1:yMBqmnQ0gyZvEb/+KzuWZOXgllrXT4SADYbvDaXHv/g= +github.com/go-openapi/swag v0.22.3/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14= +github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEevZMzYi5KSi8KkcZtzBcTgAUUtapy0OI= +github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572/go.mod h1:9Pwr4B2jHnOSGXyyzV8ROjYa2ojvAY6HCGYYfMoC3Ls= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= -github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/glog v1.1.2 h1:DVjP2PbBOzHyzA+dn3WhHIq4NdVu3Q+pvivFICf/7fo= +github.com/golang/glog v1.1.2/go.mod h1:zR+okUeTbrL6EL3xHUDxZuEtGv04p5shwip1+mL/rLQ= github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= -github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= -github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= -github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= -github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= -github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= -github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= -github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= -github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk= github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= -github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= -github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= -github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= -github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg= -github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= -github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= -github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= -github.com/google/gnostic v0.5.7-v3refs h1:FhTMOKj2VhjpouxvWJAV1TL304uMlb9zcDqkl6cEI54= -github.com/google/gnostic v0.5.7-v3refs/go.mod h1:73MKFl6jIHelAJNaBGFzt3SPtZULs9dYrGFt8OiIsHQ= +github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= +github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= +github.com/google/cel-go v0.17.8 h1:j9m730pMZt1Fc4oKhCLUHfjj6527LuhYcYw0Rl8gqto= +github.com/google/cel-go v0.17.8/go.mod h1:HXZKzB0LXqer5lHHgfWAnlYwJaQBDKMjxjulNQzhwhY= +github.com/google/gnostic-models v0.6.8 h1:yo/ABAfM5IMRsS1VnXjTBvUb61tFIHozhlYvRgGre9I= +github.com/google/gnostic-models v0.6.8/go.mod h1:5n7qKqH0f5wFt+aWF8CW6pZLLNOfYuF5OpfBSENuI8U= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= -github.com/google/gofuzz v1.1.0 h1:Hsa8mG0dQ46ij8Sl2AYJDUv1oA9/d6Vk+3LG99Oe02g= -github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= -github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= -github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= -github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= -github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= -github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= +github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= +github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1 h1:K6RDEckDVWvDI9JAJYCmNdQXq6neHJOYx3V6jnqNEec= +github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/s2a-go v0.1.7 h1:60BLSyTrOV4/haCDW4zb1guZItoSq8foHCXrAnjBo/o= github.com/google/s2a-go v0.1.7/go.mod h1:50CgR4k1jNlWBu4UfS4AcfhVe1r6pdZPygJ3R8F0Qdw= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= @@ -196,50 +120,38 @@ github.com/google/uuid v1.4.0 h1:MtMxsa51/r9yyhkyLsVeVt0B+BGQZzpQiTQ4eHZ8bc4= github.com/google/uuid v1.4.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/googleapis/enterprise-certificate-proxy v0.3.2 h1:Vie5ybvEvT75RniqhfFxPRy3Bf7vr3h0cechB90XaQs= github.com/googleapis/enterprise-certificate-proxy v0.3.2/go.mod h1:VLSiSSBs/ksPL8kq3OBOQ6WRI2QnaFynd1DCjZ62+V0= -github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= -github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= github.com/googleapis/gax-go/v2 v2.12.0 h1:A+gCJKdRfqXkr+BIRGtZLibNXf0m1f9E4HG56etFpas= github.com/googleapis/gax-go/v2 v2.12.0/go.mod h1:y+aIqrI5eb1YGMVJfuV3185Ts/D7qKpsEkdD5+I6QGU= github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= -github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= -github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= -github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= +github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc= +github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0 h1:YBftPWNWd4WwGqtY2yeZL2ef8rHAxPBD8KFhJpmcqms= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0/go.mod h1:YN5jB8ie0yfIUg6VvR9Kz84aCaG7AsGZnLjhHbUqwPg= +github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/imdario/mergo v0.3.12 h1:b6R2BslTbIEToALKP7LxUvijTsNI9TAe80pLWN2g/HU= github.com/imdario/mergo v0.3.12/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= -github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg= github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGwWFoC7ycTf1rcQZHOlsJ6N8= github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U= github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= -github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= -github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= -github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= -github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= -github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= -github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= -github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= -github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= -github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= -github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= -github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= -github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= +github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/lithammer/dedent v1.1.0 h1:VNzHMVCBNG1j0fh3OrsFRkVUwStdDArbgBWoPAffktY= github.com/lithammer/dedent v1.1.0/go.mod h1:jrXYCQtgg0nJiN+StA2KgR7w6CiQNv9Fd/Z9BP0jIOc= -github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= -github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= -github.com/mailru/easyjson v0.7.6 h1:8yTIVnZgCoiM1TgqoeTl+LfU5Jg6/xL3QhGQnimLYnA= -github.com/mailru/easyjson v0.7.6/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= +github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= +github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg= github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= @@ -247,80 +159,51 @@ github.com/mattn/go-isatty v0.0.17 h1:BTarxUcIeDqL27Mc+vyvdWYSL28zpIhv3RoTdsLMPn github.com/mattn/go-isatty v0.0.17/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= github.com/mattn/go-runewidth v0.0.13 h1:lTGmDsbAYt5DmK6OnoV7EuIF1wEIFAcxld6ypU4OSgU= github.com/mattn/go-runewidth v0.0.13/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w= -github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= -github.com/matttproud/golang_protobuf_extensions v1.0.2 h1:hAHbPm5IJGijwng3PWk09JkG9WeqChjprR5s9bBZ+OM= -github.com/matttproud/golang_protobuf_extensions v1.0.2/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= +github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo= +github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= github.com/moby/spdystream v0.2.0 h1:cjW1zVyyoiM0T7b6UoySUFqzXMoqRckQtXwGPiBhOM8= github.com/moby/spdystream v0.2.0/go.mod h1:f7i0iNDQJ059oMTcWxx8MA/zKFIuD/lY+0GqbN2Wy8c= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= -github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= -github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= -github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= -github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= -github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e h1:fD57ERR4JtEqsWbfPhv4DMiApHyliiK5xCTNVSPiaAs= -github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= -github.com/onsi/ginkgo/v2 v2.7.0 h1:/XxtEV3I3Eif/HobnVx9YmJgk8ENdRsuUmM+fLCFNow= -github.com/onsi/ginkgo/v2 v2.7.0/go.mod h1:yjiuMwPokqY1XauOgju45q3sJt6VzQ/Fict1LFVcsAo= -github.com/onsi/gomega v1.24.2 h1:J/tulyYK6JwBldPViHJReihxxZ+22FHs0piGjQAvoUE= -github.com/onsi/gomega v1.24.2/go.mod h1:gs3J10IS7Z7r7eXRoNJIrNqU4ToQukCJhFtKrWgHWnk= -github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f h1:y5//uYreIhSUg3J1GEMiLbxo1LJaP8RfCpH6pymGZus= +github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= +github.com/onsi/ginkgo/v2 v2.17.1 h1:V++EzdbhI4ZV4ev0UTIj0PzhzOcReJFyJaLjtSF55M8= +github.com/onsi/ginkgo/v2 v2.17.1/go.mod h1:llBI3WDLL9Z6taip6f33H76YcWtJv+7R3HigUjbIBOs= +github.com/onsi/gomega v1.32.0 h1:JRYU78fJ1LPxlckP6Txi/EYqJvjtMrDC04/MM5XRHPk= +github.com/onsi/gomega v1.32.0/go.mod h1:a4x4gW6Pz2yK1MAmvluYme5lvYTn61afQ2ETw/8n4Lg= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= -github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= -github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= -github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= -github.com/prometheus/client_golang v1.12.1/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY= -github.com/prometheus/client_golang v1.14.0 h1:nJdhIvne2eSX/XRAFV9PcvFFRbrjbcTUj0VP62TMhnw= -github.com/prometheus/client_golang v1.14.0/go.mod h1:8vpkKitgIVNcqrRBWh1C4TIUQgYNtG/XQE4E/Zae36Y= -github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= -github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_golang v1.16.0 h1:yk/hx9hDbrGHovbci4BY+pRMfSuuat626eFsHb7tmT8= +github.com/prometheus/client_golang v1.16.0/go.mod h1:Zsulrv/L9oM40tJ7T815tM89lFEugiJ9HzIqaAx4LKc= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/client_model v0.3.0 h1:UBgGFHqYdG/TPFD1B1ogZywDqEkwp3fBMvqdiQ7Xew4= -github.com/prometheus/client_model v0.3.0/go.mod h1:LDGWKZIo7rky3hgvBe+caln+Dr3dPggB5dvjtD7w9+w= -github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= -github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= -github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc= -github.com/prometheus/common v0.32.1/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= -github.com/prometheus/common v0.37.0 h1:ccBbHCgIiT9uSoFY0vX8H3zsNR5eLt17/RQLUvn8pXE= -github.com/prometheus/common v0.37.0/go.mod h1:phzohg0JFMnBEFGxTDbfu3QyL5GI8gTQJFhYO5B3mfA= -github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= -github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= -github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= -github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= -github.com/prometheus/procfs v0.7.3/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= -github.com/prometheus/procfs v0.8.0 h1:ODq8ZFEaYeCaZOJlZZdJA2AbQR98dSHSM1KW/You5mo= -github.com/prometheus/procfs v0.8.0/go.mod h1:z7EfXMXOkbkqb9IINtpCn86r/to3BnA0uaxHdg830/4= +github.com/prometheus/client_model v0.4.0 h1:5lQXD3cAg1OXBf4Wq03gTrXHeaV0TQvGfUooCfx1yqY= +github.com/prometheus/client_model v0.4.0/go.mod h1:oMQmHW1/JoDwqLtg57MGgP/Fb1CJEYF2imWWhWtMkYU= +github.com/prometheus/common v0.44.0 h1:+5BrQJwiBB9xsMygAB3TNvpQKOwlkc25LbISbrdOOfY= +github.com/prometheus/common v0.44.0/go.mod h1:ofAIvZbQ1e/nugmZGz4/qCb9Ap1VoSTIO7x0VV9VvuY= +github.com/prometheus/procfs v0.12.0 h1:jluTpSng7V9hY0O2R9DzzJHYb2xULk9VTR1V1R/k6Bo= +github.com/prometheus/procfs v0.12.0/go.mod h1:pcuDEFsWDnvcgNzo4EEweacyhjeA9Zk3cnaOZAZEfOo= github.com/rivo/uniseg v0.2.0 h1:S1pD9weZBuJdFmowNwbpi7BJ8TNftyUImj/0WQi72jY= github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc= -github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= -github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= -github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= -github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= +github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ= +github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/stoewer/go-strcase v1.2.0 h1:Z2iHWqGXH00XYgqDmNgQbIBxf3wrNq0F3feEy0ainaU= github.com/stoewer/go-strcase v1.2.0/go.mod h1:IBiWB2sKIp3wVVQ3Y035++gc+knqhUQag1KpM8ahLw8= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= -github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= -github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= @@ -332,294 +215,110 @@ github.com/tonglil/buflogr v1.0.1 h1:WXFZLKxLfqcVSmckwiMCF8jJwjIgmStJmg63YKRF1p0 github.com/tonglil/buflogr v1.0.1/go.mod h1:yYWwvSpn/3uAaqjf6mJg/XMiAciaR0QcRJH2gJGDxNE= github.com/vertica/vertica-sql-go v1.1.1 h1:sZYijzBbvdAbJcl4cYlKjR+Eh/X1hGKzukWuhh8PjvI= github.com/vertica/vertica-sql-go v1.1.1/go.mod h1:fGr44VWdEvL+f+Qt5LkKLOT7GoxaWdoUCnPBU9h6t04= -github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= -go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= -go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= -go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= -go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= -go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= -go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= -go.uber.org/goleak v1.1.11/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ= -go.uber.org/goleak v1.2.0 h1:xqgm/S+aQvhWFTtR0XK3Jvg7z8kGV8P4X14IzwN3Eqk= -go.uber.org/goleak v1.2.0/go.mod h1:XJYK+MuIchqpmGmUSAzotztawfKvYLUIgg7guXrwVUo= -go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU= -go.uber.org/multierr v1.10.0 h1:S0h4aNzvfcFsC3dRF1jLoaov7oRaKqRGC/pUEJ2yvPQ= -go.uber.org/multierr v1.10.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= -go.uber.org/zap v1.24.0/go.mod h1:2kMP+WWQ8aoFoedH3T2sq6iJ2yDWpHbP0f6MQbS9Gkg= -go.uber.org/zap v1.25.0 h1:4Hvk6GtkucQ790dqmj7l1eEnRdKm3k3ZUrUMS2d5+5c= -go.uber.org/zap v1.25.0/go.mod h1:JIAUzQIH94IC4fOJQm7gMmBJP5k7wQfdcnYdPoEXJYk= -golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.44.0 h1:KfYpVmrjI7JuToy5k8XV3nkapjWx48k4E4JOtVstzQI= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.44.0/go.mod h1:SeQhzAEccGVZVEy7aH87Nh0km+utSpo1pTv6eMMop48= +go.opentelemetry.io/otel v1.19.0 h1:MuS/TNf4/j4IXsZuJegVzI1cwut7Qc00344rgH7p8bs= +go.opentelemetry.io/otel v1.19.0/go.mod h1:i0QyjOq3UPoTzff0PJB2N66fb4S0+rSbSB15/oyH9fY= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.19.0 h1:Mne5On7VWdx7omSrSSZvM4Kw7cS7NQkOOmLcgscI51U= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.19.0/go.mod h1:IPtUMKL4O3tH5y+iXVyAXqpAwMuzC1IrxVS81rummfE= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.19.0 h1:3d+S281UTjM+AbF31XSOYn1qXn3BgIdWl8HNEpx08Jk= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.19.0/go.mod h1:0+KuTDyKL4gjKCF75pHOX4wuzYDUZYfAQdSu43o+Z2I= +go.opentelemetry.io/otel/metric v1.19.0 h1:aTzpGtV0ar9wlV4Sna9sdJyII5jTVJEvKETPiOKwvpE= +go.opentelemetry.io/otel/metric v1.19.0/go.mod h1:L5rUsV9kM1IxCj1MmSdS+JQAcVm319EUrDVLrt7jqt8= +go.opentelemetry.io/otel/sdk v1.19.0 h1:6USY6zH+L8uMH8L3t1enZPR3WFEmSTADlqldyHtJi3o= +go.opentelemetry.io/otel/sdk v1.19.0/go.mod h1:NedEbbS4w3C6zElbLdPJKOpJQOrGUJ+GfzpjUvI0v1A= +go.opentelemetry.io/otel/trace v1.19.0 h1:DFVQmlVbfVeOuBRrwdtaehRrWiL1JoVs9CPIQ1Dzxpg= +go.opentelemetry.io/otel/trace v1.19.0/go.mod h1:mfaSyvGyEJEI0nyV2I4qhNQnbBOUUmYZpYojqMnX2vo= +go.opentelemetry.io/proto/otlp v1.0.0 h1:T0TX0tmXU8a3CbNXzEKGeU5mIVOdf0oykP+u2lIVU/I= +go.opentelemetry.io/proto/otlp v1.0.0/go.mod h1:Sy6pihPLfYHkr3NkUbEhGHFhINUSI/v80hjKIs5JXpM= +go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= +go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= +go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= +go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= +go.uber.org/zap v1.26.0 h1:sI7k6L95XOKS281NhVKOFCUNIvv9e0w4BF8N3u+tCRo= +go.uber.org/zap v1.26.0/go.mod h1:dtElttAiwGvoJ/vj4IwHBS/gXsEu/pZ50mUIRWuG0so= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.31.0 h1:ihbySMvVjLAeSH1IbfcRTkD/iNscyz8rGzjF/E5hV6U= golang.org/x/crypto v0.31.0/go.mod h1:kDsLvtWBEx7MV9tJOj9bnXsPbxwJQ6csT/x4KIN4Ssk= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= -golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek= -golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY= -golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= -golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= -golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= -golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= -golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= golang.org/x/exp v0.0.0-20230905200255-921286631fa9 h1:GoHiUyI/Tp2nVkLI2mCxVkOjsbSXD66ic0XW0js0R9g= golang.org/x/exp v0.0.0-20230905200255-921286631fa9/go.mod h1:S2oDrQGGwySpoQPVqRShND87VCbxmc6bL1Yd2oYrm6k= -golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= -golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= -golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs= -golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= -golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= -golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= -golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= -golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= -golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= -golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= -golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= -golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= -golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= -golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= -golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= -golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/net v0.33.0 h1:74SYHlV8BIgHIFC/LrYkOGIwL19eTYXQ5wc6TBuO36I= golang.org/x/net v0.33.0/go.mod h1:HXLR5J+9DxmrqMwG9qjGCxZ+zKXxBru04zlTvWlWuN4= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= -golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= golang.org/x/oauth2 v0.15.0 h1:s8pnnxNVzjWyrvYdFUQq5llS1PX2zhPXmccZv99h7uQ= golang.org/x/oauth2 v0.15.0/go.mod h1:q48ptWNTY5XWf+JNten23lcvHpLJ0ZSxF5ttTHKVCAM= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.10.0 h1:3NQrjDixjgGwUOCaF8w2+VYHv0Ve/vGYSbdkTa98gmQ= golang.org/x/sync v0.10.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.28.0 h1:Fksou7UEQUWlKvIdsqzJmUmCX3cZuD2+P3XyyzwMhlA= golang.org/x/sys v0.28.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= -golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.27.0 h1:WP60Sv1nlK1T6SupCHbXzSaN0b9wUmsPoRS9b61A23Q= golang.org/x/term v0.27.0/go.mod h1:iMsnZpn0cago0GOrHO2+Y7u7JPn5AylBrcoWkElMTSM= -golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/text v0.21.0 h1:zyQAAkrwaneQ066sspRyJaG9VNi/YJ1NfzcGB3hZ/qo= golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ= -golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk= golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= -golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= -golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= -golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= -golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= -golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= -golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= -golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d h1:vU5i/LfpvrRCpgM/VPfJLg5KjxD3E+hfT1SH+d9zLwg= +golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d/go.mod h1:aiJjzUbINMkxbQROHiO6hDPo2LHcIPhhQsa9DLh0yGk= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -gomodules.xyz/jsonpatch/v2 v2.2.0 h1:4pT439QV83L+G9FkcCriY6EkpcK6r6bK+A5FBUMI7qY= -gomodules.xyz/jsonpatch/v2 v2.2.0/go.mod h1:WXp+iVDkoLQqPudfQ9GBlwB2eZ5DKOnjQZCYdOS8GPY= -google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= -google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= -google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= -google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= -google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= -google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= -google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= -google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.19.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= -google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= -google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM= -google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc= +gomodules.xyz/jsonpatch/v2 v2.4.0 h1:Ci3iUJyx9UeRx7CeFN8ARgGbkESwJK+KB9lLcWxY/Zw= +gomodules.xyz/jsonpatch/v2 v2.4.0/go.mod h1:AH3dM2RI6uoBZxn3LVrfvJ3E0/9dG4cSrbuBJT4moAY= google.golang.org/api v0.153.0 h1:N1AwGhielyKFaUqH07/ZSIQR3uNPcV7NVw0vj+j4iR4= google.golang.org/api v0.153.0/go.mod h1:3qNJX5eOmhiWYc67jRA/3GsDw97UFb5ivv7Y2PrriAY= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= -google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= -google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= -google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= -google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c= google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= -google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= -google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= -google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA= -google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= -google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= -google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20201019141844-1ed22bb0c154/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20231106174013-bbf56f31fb17 h1:wpZ8pe2x1Q3f2KyT5f8oP/fa9rHAKgFPr/HZdNuS+PQ= google.golang.org/genproto v0.0.0-20231106174013-bbf56f31fb17/go.mod h1:J7XzRzVy1+IPwWHZUzoD0IccYZIrXILAQpc+Qy9CMhY= google.golang.org/genproto/googleapis/api v0.0.0-20231106174013-bbf56f31fb17 h1:JpwMPBpFN3uKhdaekDpiNlImDdkUAyiJ6ez/uxGaUSo= @@ -627,17 +326,9 @@ google.golang.org/genproto/googleapis/api v0.0.0-20231106174013-bbf56f31fb17/go. google.golang.org/genproto/googleapis/rpc v0.0.0-20231120223509-83a465c0220f h1:ultW7fxlIvee4HYrtnaRPon9HpEgFk5zYpmfMgtKB5I= google.golang.org/genproto/googleapis/rpc v0.0.0-20231120223509-83a465c0220f/go.mod h1:L9KNLi232K1/xB6f7AlSX692koaRnKaWSR0stBki0Yc= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= -google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= -google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= -google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60= -google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= -google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= -google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= google.golang.org/grpc v1.59.0 h1:Z5Iec2pjwb+LEOqzpB2MR12/eKFhDPhuqW91O+4bwUk= google.golang.org/grpc v1.59.0/go.mod h1:aUPDwccQo6OTjy7Hct4AfBPD1GptF4fyUjIkQ9YtF98= @@ -649,66 +340,52 @@ google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzi google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= -google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= -google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.33.0 h1:uNO2rsAINq/JlFpSdYEKIZ0uKD/R9cpdv0T+yoGwGmI= google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= -gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f h1:BLraFXnmrev5lT+xlilqcH8XK9/i0At2xKjWk4p6zsU= -gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= -gopkg.in/natefinch/lumberjack.v2 v2.0.0 h1:1Lc07Kr7qY4U2YPouBjpCLxpiyxIVoxqXgkXLknAOE8= -gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k= -gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/natefinch/lumberjack.v2 v2.2.1 h1:bBRl1b0OH9s/DuPhuXpNl+VtCaJXFZ5/uEFST95x9zc= +gopkg.in/natefinch/lumberjack.v2 v2.2.1/go.mod h1:YD8tP3GAjkrDg1eZH7EGmyESg/lsYskCTPBJVb9jqSc= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= -honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= -honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= -k8s.io/api v0.26.2 h1:dM3cinp3PGB6asOySalOZxEG4CZ0IAdJsrYZXE/ovGQ= -k8s.io/api v0.26.2/go.mod h1:1kjMQsFE+QHPfskEcVNgL3+Hp88B80uj0QtSOlj8itU= -k8s.io/apiextensions-apiserver v0.26.2 h1:/yTG2B9jGY2Q70iGskMf41qTLhL9XeNN2KhI0uDgwko= -k8s.io/apiextensions-apiserver v0.26.2/go.mod h1:Y7UPgch8nph8mGCuVk0SK83LnS8Esf3n6fUBgew8SH8= -k8s.io/apimachinery v0.26.2 h1:da1u3D5wfR5u2RpLhE/ZtZS2P7QvDgLZTi9wrNZl/tQ= -k8s.io/apimachinery v0.26.2/go.mod h1:ats7nN1LExKHvJ9TmwootT00Yz05MuYqPXEXaVeOy5I= -k8s.io/client-go v0.26.2 h1:s1WkVujHX3kTp4Zn4yGNFK+dlDXy1bAAkIl+cFAiuYI= -k8s.io/client-go v0.26.2/go.mod h1:u5EjOuSyBa09yqqyY7m3abZeovO/7D/WehVVlZ2qcqU= -k8s.io/component-base v0.26.2 h1:IfWgCGUDzrD6wLLgXEstJKYZKAFS2kO+rBRi0p3LqcI= -k8s.io/component-base v0.26.2/go.mod h1:DxbuIe9M3IZPRxPIzhch2m1eT7uFrSBJUBuVCQEBivs= -k8s.io/klog/v2 v2.80.1 h1:atnLQ121W371wYYFawwYx1aEY2eUfs4l3J72wtgAwV4= -k8s.io/klog/v2 v2.80.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= -k8s.io/kube-openapi v0.0.0-20221012153701-172d655c2280 h1:+70TFaan3hfJzs+7VK2o+OGxg8HsuBr/5f6tVAjDu6E= -k8s.io/kube-openapi v0.0.0-20221012153701-172d655c2280/go.mod h1:+Axhij7bCpeqhklhUTe3xmOn6bWxolyZEeyaFpjGtl4= -k8s.io/utils v0.0.0-20221128185143-99ec85e7a448 h1:KTgPnR10d5zhztWptI952TNtt/4u5h3IzDXkdIMuo2Y= -k8s.io/utils v0.0.0-20221128185143-99ec85e7a448/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= -rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= -rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= -rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= -sigs.k8s.io/controller-runtime v0.14.5 h1:6xaWFqzT5KuAQ9ufgUaj1G/+C4Y1GRkhrxl+BJ9i+5s= -sigs.k8s.io/controller-runtime v0.14.5/go.mod h1:WqIdsAY6JBsjfc/CqO0CORmNtoCtE4S6qbPc9s68h+0= -sigs.k8s.io/json v0.0.0-20220713155537-f223a00ba0e2 h1:iXTIw73aPyC+oRdyqqvVJuloN1p0AC/kzH07hu3NE+k= -sigs.k8s.io/json v0.0.0-20220713155537-f223a00ba0e2/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0= -sigs.k8s.io/structured-merge-diff/v4 v4.2.3 h1:PRbqxJClWWYMNV1dhaG4NsibJbArud9kFxnAMREiWFE= -sigs.k8s.io/structured-merge-diff/v4 v4.2.3/go.mod h1:qjx8mGObPmV2aSZepjQjbmb2ihdVs8cGKBraizNC69E= +k8s.io/api v0.30.1 h1:kCm/6mADMdbAxmIh0LBjS54nQBE+U4KmbCfIkF5CpJY= +k8s.io/api v0.30.1/go.mod h1:ddbN2C0+0DIiPntan/bye3SW3PdwLa11/0yqwvuRrJM= +k8s.io/apiextensions-apiserver v0.30.1 h1:4fAJZ9985BmpJG6PkoxVRpXv9vmPUOVzl614xarePws= +k8s.io/apiextensions-apiserver v0.30.1/go.mod h1:R4GuSrlhgq43oRY9sF2IToFh7PVlF1JjfWdoG3pixk4= +k8s.io/apimachinery v0.30.1 h1:ZQStsEfo4n65yAdlGTfP/uSHMQSoYzU/oeEbkmF7P2U= +k8s.io/apimachinery v0.30.1/go.mod h1:iexa2somDaxdnj7bha06bhb43Zpa6eWH8N8dbqVjTUc= +k8s.io/apiserver v0.30.1 h1:BEWEe8bzS12nMtDKXzCF5Q5ovp6LjjYkSp8qOPk8LZ8= +k8s.io/apiserver v0.30.1/go.mod h1:i87ZnQ+/PGAmSbD/iEKM68bm1D5reX8fO4Ito4B01mo= +k8s.io/client-go v0.30.1 h1:uC/Ir6A3R46wdkgCV3vbLyNOYyCJ8oZnjtJGKfytl/Q= +k8s.io/client-go v0.30.1/go.mod h1:wrAqLNs2trwiCH/wxxmT/x3hKVH9PuV0GGW0oDoHVqc= +k8s.io/component-base v0.30.1 h1:bvAtlPh1UrdaZL20D9+sWxsJljMi0QZ3Lmw+kmZAaxQ= +k8s.io/component-base v0.30.1/go.mod h1:e/X9kDiOebwlI41AvBHuWdqFriSRrX50CdwA9TFaHLI= +k8s.io/klog/v2 v2.120.1 h1:QXU6cPEOIslTGvZaXvFWiP9VKyeet3sawzTOvdXb4Vw= +k8s.io/klog/v2 v2.120.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= +k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340 h1:BZqlfIlq5YbRMFko6/PM7FjZpUb45WallggurYhKGag= +k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340/go.mod h1:yD4MZYeKMBwQKVht279WycxKyM84kkAx2DPrTXaeb98= +k8s.io/utils v0.0.0-20230726121419-3b25d923346b h1:sgn3ZU783SCgtaSJjpcVVlRqd6GSnlTLKgpAAttJvpI= +k8s.io/utils v0.0.0-20230726121419-3b25d923346b/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= +sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.29.0 h1:/U5vjBbQn3RChhv7P11uhYvCSm5G2GaIi5AIGBS6r4c= +sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.29.0/go.mod h1:z7+wmGM2dfIiLRfrC6jb5kV2Mq/sK1ZP303cxzkV5Y4= +sigs.k8s.io/controller-runtime v0.18.4 h1:87+guW1zhvuPLh1PHybKdYFLU0YJp4FhJRmiHvm5BZw= +sigs.k8s.io/controller-runtime v0.18.4/go.mod h1:TVoGrfdpbA9VRFaRnKgk9P5/atA0pMwq+f+msb9M8Sg= +sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo= +sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0= +sigs.k8s.io/structured-merge-diff/v4 v4.4.1 h1:150L+0vs/8DA78h1u02ooW1/fFq/Lwr+sGiqlzvrtq4= +sigs.k8s.io/structured-merge-diff/v4 v4.4.1/go.mod h1:N8hJocpFajUSSeSJ9bOZ77VzejKZaXsTtZo4/u7Io08= sigs.k8s.io/yaml v1.3.0 h1:a2VclLzOGrwOHDiV8EfBGhvjHvP46CtW5j6POvhYGGo= sigs.k8s.io/yaml v1.3.0/go.mod h1:GeOyir5tyXNByN85N/dRIT9es5UQNerPYEKK56eTBm8= diff --git a/helm-charts/verticadb-operator/README.md b/helm-charts/verticadb-operator/README.md index 1eb0d6b80..66a7fe9a5 100644 --- a/helm-charts/verticadb-operator/README.md +++ b/helm-charts/verticadb-operator/README.md @@ -22,8 +22,8 @@ This helm chart will install the operator and an admission controller webhook. | priorityClassName | The [priority class name](https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/#priorityclass) that is assigned to the operator pod. This affects where the pod gets scheduled. | Not set | | prometheus.createProxyRBAC | Set this to false if you want to avoid creating the rbac rules for accessing the metrics endpoint when it is protected by the rbac auth proxy. By default, we will create those RBAC rules. | true | | prometheus.createServiceMonitor | Set this to true if you want to create a ServiceMonitor. This object is a CR provided by the prometheus operator to allow for easy service discovery. If set to true, the prometheus operator must be installed before installing this chart.
See: https://github.com/prometheus-operator/prometheus-operator

*This parameter is deprecated and will be removed in a future release.* | false | -| prometheus.expose | Controls exposing of the prometheus metrics endpoint. Valid options are:

- **EnableWithAuthProxy**: A new service object will be created that exposes the metrics endpoint. Access to the metrics are controlled by rbac rules using the proxy (see https://github.com/brancz/kube-rbac-proxy). The metrics endpoint will use the https scheme.

- **EnableWithoutAuth**: Like EnableWithAuthProxy, this will create a service object to expose the metrics endpoint. However, there is no authority checking when using the endpoint. Anyone who has network access to the endpoint (i.e. any pod in k8s) will be able to read the metrics. The metrics endpoint will use the http scheme.

- **Disable**: Prometheus metrics are not exposed at all. | Disable | -| prometheus.tlsSecret | Use this if you want to provide your own certs for the prometheus metrics endpoint. It refers to a secret in the same namespace that the helm chart is deployed in. The secret must have the following keys set:

- **tls.key** – private key
- **tls.crt** – cert for the private key
- **ca.crt** – CA certificate

The prometheus.expose=EnableWithAuthProxy must be set for the operator to use the certs provided. If this field is omitted, the RBAC proxy sidecar will generate its own self-signed cert. | "" | +| prometheus.expose | Controls exposing of the prometheus metrics endpoint. Valid options are:

- **EnableWithAuthProxy**: A new service object will be created that exposes the metrics endpoint. Access to the metrics are controlled by rbac rules. The metrics endpoint will use the https scheme.

- **EnableWithoutAuth**: Like EnableWithAuthProxy, this will create a service object to expose the metrics endpoint. However, there is no authority checking when using the endpoint. Anyone who has network access to the endpoint (i.e. any pod in k8s) will be able to read the metrics. The metrics endpoint will use the http scheme.

- **Disable**: Prometheus metrics are not exposed at all. | Disable | +| prometheus.tlsSecret | Use this if you want to provide your own certs for the prometheus metrics endpoint. It refers to a secret in the same namespace that the helm chart is deployed in. The secret must have the following keys set:

- **tls.key** – private key
- **tls.crt** – cert for the private key
- **ca.crt** – CA certificate

The prometheus.expose=EnableWithAuthProxy must be set for the operator to use the certs provided. If this field is omitted, the operator will generate its own self-signed cert. | "" | | reconcileConcurrency.eventtrigger | Set this to control the concurrency of reconciliations of EventTrigger CRs | 1 | | reconcileConcurrency.sandboxconfigmap | Set this to control the concurrency of reconciliations of ConfigMaps that contain state for a sandbox | 1 | | reconcileConcurrency.verticaautoscaler | Set this to control the concurrency of reconciliations of VerticaAutoscaler CRs | 1 | @@ -31,8 +31,6 @@ This helm chart will install the operator and an admission controller webhook. | reconcileConcurrency.verticarestorepointsquery | Set this to control the concurrency of reconciliations of VerticaRestorePointsQuery CRs | 1 | | reconcileConcurrency.verticascrutinize | Set this to control the concurrency of reconciliations of VerticaScrutinize CRs | 1 | | reconcileConcurrency.verticareplicator | Set this to control the concurrency of reconciliations of VerticaReplicator CRs | 3 | -| rbac_proxy_image.name | Image name of Kubernetes RBAC proxy. | kubebuilder/kube-rbac-proxy:v0.13.1 | -| rbac_proxy_image.repo | Repo server hosting rbac_proxy_image.name | gcr.io | | resources.\* | The resource requirements for the operator pod. |
limits:
cpu: 100m
memory: 750Mi
requests:
cpu: 100m
memory: 20Mi
| | serviceAccountAnnotations | A map of annotations that will be added to the serviceaccount created. | | | serviceAccountNameOverride | Controls the name given to the serviceaccount that is created. | | diff --git a/helm-charts/verticadb-operator/tests/auth-proxy-roles_test.yaml b/helm-charts/verticadb-operator/tests/auth-proxy-roles_test.yaml deleted file mode 100644 index 5388c9422..000000000 --- a/helm-charts/verticadb-operator/tests/auth-proxy-roles_test.yaml +++ /dev/null @@ -1,25 +0,0 @@ -suite: Auth proxy roles -templates: - - verticadb-operator-metrics-reader-cr.yaml - - verticadb-operator-metrics-reader-crb.yaml - - verticadb-operator-proxy-role-cr.yaml - - verticadb-operator-proxy-rolebinding-crb.yaml -tests: - - it: should not create roles/rolebinding by default - asserts: - - hasDocuments: - count: 0 - - it: should create roles/rolebinding if exposing with auth - set: - prometheus: - expose: EnableWithAuthProxy - asserts: - - hasDocuments: - count: 1 - - it: should not create roles/rolebinding if exposing without auth - set: - prometheus: - expose: EnableWithoutAuth - asserts: - - hasDocuments: - count: 0 diff --git a/helm-charts/verticadb-operator/tests/image-name-and-tag_test.yaml b/helm-charts/verticadb-operator/tests/image-name-and-tag_test.yaml index f76ee513d..82820f637 100644 --- a/helm-charts/verticadb-operator/tests/image-name-and-tag_test.yaml +++ b/helm-charts/verticadb-operator/tests/image-name-and-tag_test.yaml @@ -18,9 +18,6 @@ tests: image: repo: first-private-repo:5000 name: vertica/verticadb-operator:latest - rbac_proxy_image: - repo: second-private-repo:5000 - name: my-rbac-proxy:v1 imagePullSecrets: - name: image-pull-secrets prometheus: @@ -29,9 +26,6 @@ tests: - equal: path: spec.template.spec.containers[0].image value: first-private-repo:5000/vertica/verticadb-operator:latest - - equal: - path: spec.template.spec.containers[1].image - value: second-private-repo:5000/my-rbac-proxy:v1 - equal: path: spec.template.spec.imagePullSecrets[0].name value: image-pull-secrets diff --git a/helm-charts/verticadb-operator/tests/metric-cert_test.yaml b/helm-charts/verticadb-operator/tests/metric-cert_test.yaml deleted file mode 100644 index 7ece2d630..000000000 --- a/helm-charts/verticadb-operator/tests/metric-cert_test.yaml +++ /dev/null @@ -1,44 +0,0 @@ -suite: Metrics certificate tests -templates: - - verticadb-operator-manager-deployment.yaml -tests: - - it: should include the cert if prometheus.tlsSecret is set - set: - prometheus: - expose: EnableWithAuthProxy - tlsSecret: my-secret - asserts: - - equal: - path: spec.template.spec.containers[1].volumeMounts[0] - value: - name: auth-cert - mountPath: /cert - - equal: - path: spec.template.spec.volumes[0] - value: - name: auth-cert - secret: - secretName: my-secret - - equal: - path: spec.template.spec.containers[1].args - value: - - --secure-listen-address=0.0.0.0:8443 - - --upstream=http://127.0.0.1:8080/ - - --logtostderr=true - - --v=0 - - --tls-cert-file=/cert/tls.crt - - --tls-private-key-file=/cert/tls.key - - --client-ca-file=/cert/ca.crt - - it: should not include the cert if prometheus.tlsSecret is not set - set: - prometheus: - expose: EnableWithAuthProxy - tlsSecret: "" - asserts: - - equal: - path: spec.template.spec.containers[1].args - value: - - --secure-listen-address=0.0.0.0:8443 - - --upstream=http://127.0.0.1:8080/ - - --logtostderr=true - - --v=0 diff --git a/helm-charts/verticadb-operator/tests/metrics-deployment_test.yaml b/helm-charts/verticadb-operator/tests/metrics-deployment_test.yaml index e6c6be84e..961249156 100644 --- a/helm-charts/verticadb-operator/tests/metrics-deployment_test.yaml +++ b/helm-charts/verticadb-operator/tests/metrics-deployment_test.yaml @@ -20,11 +20,3 @@ tests: path: spec.template.spec.containers content: name: kube-rbac-proxy - - it: should include proxy sidecar if expose is with auth - set: - prometheus: - expose: EnableWithAuthProxy - asserts: - - equal: - path: spec.template.spec.containers[1].name - value: kube-rbac-proxy diff --git a/helm-charts/verticadb-operator/tests/metrics-service_test.yaml b/helm-charts/verticadb-operator/tests/metrics-service_test.yaml deleted file mode 100644 index fe5cee147..000000000 --- a/helm-charts/verticadb-operator/tests/metrics-service_test.yaml +++ /dev/null @@ -1,32 +0,0 @@ -suite: Metrics service tests -templates: - - verticadb-operator-metrics-service-svc.yaml -tests: - - it: should not include any object if expose is disabled - set: - prometheus: - expose: Disable - asserts: - - hasDocuments: - count: 0 - - it: should include object if EnableWithProxy is set - set: - prometheus: - expose: EnableWithAuthProxy - asserts: - - hasDocuments: - count: 1 - - isKind: - of: Service - - it: should include object if EnableWithoutAuth is set - set: - prometheus: - expose: EnableWithoutAuth - asserts: - - hasDocuments: - count: 1 - - isKind: - of: Service - - - diff --git a/helm-charts/verticadb-operator/tests/serviceaccount-rolebinding_test.yaml b/helm-charts/verticadb-operator/tests/serviceaccount-rolebinding_test.yaml index c170de487..16b7d0a6f 100644 --- a/helm-charts/verticadb-operator/tests/serviceaccount-rolebinding_test.yaml +++ b/helm-charts/verticadb-operator/tests/serviceaccount-rolebinding_test.yaml @@ -2,8 +2,6 @@ suite: ServiceAccount tests templates: - verticadb-operator-manager-clusterrolebinding-crb.yaml - verticadb-operator-webhook-config-crb.yaml - - verticadb-operator-proxy-rolebinding-crb.yaml - - verticadb-operator-metrics-reader-crb.yaml - verticadb-operator-leader-election-rolebinding-rb.yaml tests: - it: should include the serviceaccount name when an override is set diff --git a/helm-charts/verticadb-operator/values.yaml b/helm-charts/verticadb-operator/values.yaml index b1a2ced91..fe38b8523 100644 --- a/helm-charts/verticadb-operator/values.yaml +++ b/helm-charts/verticadb-operator/values.yaml @@ -29,10 +29,6 @@ image: name: opentext/verticadb-operator:25.1.0-0 pullPolicy: IfNotPresent -rbac_proxy_image: - repo: gcr.io - name: kubebuilder/kube-rbac-proxy:v0.13.1 - imagePullSecrets: null controllers: @@ -156,9 +152,8 @@ prometheus: # Controls exposing of the prometheus metrics endpoint. Valid options are: # # EnableWithAuthProxy: A new service object will be created that exposes the - # metrics endpoint. Access to the metrics are controlled by rbac rules - # using the proxy (see https://github.com/brancz/kube-rbac-proxy). The - # metrics endpoint will use the https scheme. + # metrics endpoint. Access to the metrics are controlled by rbac rules. + # The metrics endpoint will use the https scheme. # EnableWithoutAuth: Like EnableWithAuthProxy, this will create a service # object to expose the metrics endpoint. However, there is no authority # checking when using the endpoint. Anyone who had network access diff --git a/pkg/builder/builder.go b/pkg/builder/builder.go index b465d3593..3f6a0ed1e 100644 --- a/pkg/builder/builder.go +++ b/pkg/builder/builder.go @@ -1498,7 +1498,7 @@ func BuildStsSpec(nm types.NamespacedName, vdb *vapi.VerticaDB, sc *vapi.Subclus Spec: corev1.PersistentVolumeClaimSpec{ AccessModes: []corev1.PersistentVolumeAccessMode{corev1.ReadWriteOnce}, StorageClassName: getStorageClassName(vdb), - Resources: corev1.ResourceRequirements{ + Resources: corev1.VolumeResourceRequirements{ Requests: corev1.ResourceList{ corev1.ResourceStorage: vdb.Spec.Local.RequestSize, }, @@ -1596,7 +1596,7 @@ func BuildPVC(vdb *vapi.VerticaDB, sc *vapi.Subcluster, podIndex int32) *corev1. AccessModes: []corev1.PersistentVolumeAccessMode{ "ReadWriteOnce", }, - Resources: corev1.ResourceRequirements{ + Resources: corev1.VolumeResourceRequirements{ Requests: corev1.ResourceList{ corev1.ResourceStorage: vdb.Spec.Local.RequestSize, }, diff --git a/pkg/controllers/et/eventtrigger_controller.go b/pkg/controllers/et/eventtrigger_controller.go index 0939ad9d7..f0e263d66 100644 --- a/pkg/controllers/et/eventtrigger_controller.go +++ b/pkg/controllers/et/eventtrigger_controller.go @@ -32,7 +32,6 @@ import ( "sigs.k8s.io/controller-runtime/pkg/handler" "sigs.k8s.io/controller-runtime/pkg/predicate" "sigs.k8s.io/controller-runtime/pkg/reconcile" - "sigs.k8s.io/controller-runtime/pkg/source" "github.com/go-logr/logr" v1vapi "github.com/vertica/vertica-kubernetes/api/v1" @@ -53,10 +52,10 @@ const ( vdbNameField = ".spec.references.object.name" ) -//+kubebuilder:rbac:groups=vertica.com,resources=eventtriggers,verbs=get;list;watch;create;update;patch;delete -//+kubebuilder:rbac:groups=vertica.com,resources=eventtriggers/status,verbs=get;update;patch -//+kubebuilder:rbac:groups=vertica.com,resources=eventtriggers/finalizers,verbs=update -//+kubebuilder:rbac:groups="batch",resources=jobs,verbs=get;list;watch;create +// +kubebuilder:rbac:groups=vertica.com,resources=eventtriggers,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=vertica.com,resources=eventtriggers/status,verbs=get;update;patch +// +kubebuilder:rbac:groups=vertica.com,resources=eventtriggers/finalizers,verbs=update +// +kubebuilder:rbac:groups="batch",resources=jobs,verbs=get;list;watch;create // Reconcile is part of the main kubernetes reconciliation loop which aims to // move the current state of the cluster closer to the desired state. @@ -112,7 +111,7 @@ func (r *EventTriggerReconciler) SetupWithManager(mgr ctrl.Manager) error { For(&vapi.EventTrigger{}). Owns(&batchv1.Job{}). Watches( - &source.Kind{Type: &v1vapi.VerticaDB{}}, + &v1vapi.VerticaDB{}, handler.EnqueueRequestsFromMapFunc(r.findObjectsForVerticaDB), builder.WithPredicates(predicate.ResourceVersionChangedPredicate{}), ). @@ -139,7 +138,7 @@ func (r *EventTriggerReconciler) setupFieldIndexer(indx client.FieldIndexer) err // findObjectsForVerticaDB will generate requests to reconcile EventTriggers // based on watched VerticaDB. -func (r *EventTriggerReconciler) findObjectsForVerticaDB(vdb client.Object) []reconcile.Request { +func (r *EventTriggerReconciler) findObjectsForVerticaDB(_ context.Context, vdb client.Object) []reconcile.Request { attachedTriggers := &vapi.EventTriggerList{} listOps := &client.ListOptions{ FieldSelector: fields.OneTermEqualSelector(vdbNameField, vdb.GetName()), diff --git a/pkg/controllers/et/suite_test.go b/pkg/controllers/et/suite_test.go index 5b66ac56c..edfa2336b 100644 --- a/pkg/controllers/et/suite_test.go +++ b/pkg/controllers/et/suite_test.go @@ -33,7 +33,7 @@ import ( v1vapi "github.com/vertica/vertica-kubernetes/api/v1" vapi "github.com/vertica/vertica-kubernetes/api/v1beta1" - //+kubebuilder:scaffold:imports + // +kubebuilder:scaffold:imports ) // These tests use Ginkgo (BDD-style Go testing framework). Refer to @@ -73,7 +73,7 @@ var _ = BeforeSuite(func() { err = v1vapi.AddToScheme(scheme.Scheme) Expect(err).NotTo(HaveOccurred()) - //+kubebuilder:scaffold:scheme + // +kubebuilder:scaffold:scheme // Create a client that doesn't have a cache. k8sClient, err = client.New(cfg, client.Options{Scheme: scheme.Scheme}) diff --git a/pkg/controllers/sandbox/sandbox_controller.go b/pkg/controllers/sandbox/sandbox_controller.go index 22ff09066..8ba2779cc 100644 --- a/pkg/controllers/sandbox/sandbox_controller.go +++ b/pkg/controllers/sandbox/sandbox_controller.go @@ -48,7 +48,6 @@ import ( "sigs.k8s.io/controller-runtime/pkg/handler" "sigs.k8s.io/controller-runtime/pkg/predicate" "sigs.k8s.io/controller-runtime/pkg/reconcile" - "sigs.k8s.io/controller-runtime/pkg/source" ) // SandboxConfigMapReconciler reconciles a ConfigMap for sandboxing @@ -71,7 +70,7 @@ func (r *SandboxConfigMapReconciler) SetupWithManager(mgr ctrl.Manager) error { builder.WithPredicates(r.predicateFuncs(), predicate.ResourceVersionChangedPredicate{}), ). Watches( - &source.Kind{Type: &appsv1.StatefulSet{}}, + &appsv1.StatefulSet{}, handler.EnqueueRequestsFromMapFunc(r.findObjectsForStatesulSet), builder.WithPredicates(r.predicateFuncs(), predicate.ResourceVersionChangedPredicate{}), ). @@ -261,7 +260,7 @@ func (r *SandboxConfigMapReconciler) GetConfig() *rest.Config { // findObjectsForStatesulSet will generate requests to reconcile sandbox ConfigMaps // based on watched Statefulset -func (r *SandboxConfigMapReconciler) findObjectsForStatesulSet(sts client.Object) []reconcile.Request { +func (r *SandboxConfigMapReconciler) findObjectsForStatesulSet(_ context.Context, sts client.Object) []reconcile.Request { configMaps := corev1.ConfigMapList{} stsLabels := sts.GetLabels() sbLabels := make(map[string]string, len(vmeta.SandboxConfigMapLabels)) diff --git a/pkg/controllers/sandbox/suite_test.go b/pkg/controllers/sandbox/suite_test.go index c1261ca29..991301ebb 100644 --- a/pkg/controllers/sandbox/suite_test.go +++ b/pkg/controllers/sandbox/suite_test.go @@ -34,6 +34,7 @@ import ( v1 "github.com/vertica/vertica-kubernetes/api/v1" "github.com/vertica/vertica-kubernetes/api/v1beta1" + metricsserver "sigs.k8s.io/controller-runtime/pkg/metrics/server" ) var sbRec *SandboxConfigMapReconciler @@ -74,9 +75,12 @@ var _ = BeforeSuite(func() { Expect(err).NotTo(HaveOccurred()) Expect(k8sClient).NotTo(BeNil()) + metricsServerOptions := metricsserver.Options{ + BindAddress: "0", // Disable metrics for the test + } mgr, err := ctrl.NewManager(cfg, ctrl.Options{ - Scheme: scheme.Scheme, - MetricsBindAddress: "0", // Disable metrics for the test + Scheme: scheme.Scheme, + Metrics: metricsServerOptions, }) sbRec = &SandboxConfigMapReconciler{ diff --git a/pkg/controllers/vas/suite_test.go b/pkg/controllers/vas/suite_test.go index d1bb28110..09e7641e5 100644 --- a/pkg/controllers/vas/suite_test.go +++ b/pkg/controllers/vas/suite_test.go @@ -32,6 +32,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/envtest" logf "sigs.k8s.io/controller-runtime/pkg/log" "sigs.k8s.io/controller-runtime/pkg/log/zap" + metricsserver "sigs.k8s.io/controller-runtime/pkg/metrics/server" ) var k8sClient client.Client @@ -63,9 +64,12 @@ var _ = BeforeSuite(func() { k8sClient, err = client.New(restCfg, client.Options{Scheme: scheme.Scheme}) ExpectWithOffset(1, err).NotTo(HaveOccurred()) + metricsServerOptions := metricsserver.Options{ + BindAddress: "0", // Disable metrics for the test + } mgr, err := ctrl.NewManager(cfg, ctrl.Options{ - Scheme: scheme.Scheme, - MetricsBindAddress: "0", // Disable metrics for the test + Scheme: scheme.Scheme, + Metrics: metricsServerOptions, }) Expect(err).NotTo(HaveOccurred()) diff --git a/pkg/controllers/vas/verticaautoscaler_controller.go b/pkg/controllers/vas/verticaautoscaler_controller.go index 2cd4b30dc..109cf9d75 100644 --- a/pkg/controllers/vas/verticaautoscaler_controller.go +++ b/pkg/controllers/vas/verticaautoscaler_controller.go @@ -44,11 +44,11 @@ type VerticaAutoscalerReconciler struct { EVRec record.EventRecorder } -//+kubebuilder:rbac:groups=vertica.com,resources=verticaautoscalers,verbs=get;list;watch;create;update;patch;delete -//+kubebuilder:rbac:groups=vertica.com,resources=verticaautoscalers/status,verbs=get;update;patch -//+kubebuilder:rbac:groups=vertica.com,resources=verticaautoscalers/finalizers,verbs=update -//+kubebuilder:rbac:groups=vertica.com,resources=verticadbs,verbs=get;list;create;update;patch;delete -//+kubebuilder:rbac:groups=autoscaling,resources=horizontalpodautoscalers,verbs=get;list;watch;create;update;delete;patch +// +kubebuilder:rbac:groups=vertica.com,resources=verticaautoscalers,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=vertica.com,resources=verticaautoscalers/status,verbs=get;update;patch +// +kubebuilder:rbac:groups=vertica.com,resources=verticaautoscalers/finalizers,verbs=update +// +kubebuilder:rbac:groups=vertica.com,resources=verticadbs,verbs=get;list;create;update;patch;delete +// +kubebuilder:rbac:groups=autoscaling,resources=horizontalpodautoscalers,verbs=get;list;watch;create;update;delete;patch // Reconcile is part of the main kubernetes reconciliation loop which aims to // move the current state of the cluster closer to the desired state. diff --git a/pkg/controllers/vdb/imageversion_reconciler.go b/pkg/controllers/vdb/imageversion_reconciler.go index 835ffb18f..2cf35531a 100644 --- a/pkg/controllers/vdb/imageversion_reconciler.go +++ b/pkg/controllers/vdb/imageversion_reconciler.go @@ -130,7 +130,7 @@ func (v *ImageVersionReconciler) isValidSandboxUpgradePath(ctx context.Context, // makeSandboxVersionInfo will build and return the sandbox version info based // on the configmap annotations func (v *ImageVersionReconciler) makeSandboxVersionInfo(ctx context.Context) (*version.Info, bool, error) { - sbMan := MakeSandboxConfigMapManager(v.Rec, v.Vdb, v.PFacts.SandboxName, "" /*no uuid*/) + sbMan := MakeSandboxConfigMapManager(v.Rec, v.Vdb, v.PFacts.SandboxName, "" /* no uuid */) oldVersion, found, err := sbMan.getSandboxVersion(ctx) // If the version annotation isn't present, we abort creation of Info if !found || err != nil { diff --git a/pkg/controllers/vdb/obj_reconciler_test.go b/pkg/controllers/vdb/obj_reconciler_test.go index f29f40326..5d617a5e5 100644 --- a/pkg/controllers/vdb/obj_reconciler_test.go +++ b/pkg/controllers/vdb/obj_reconciler_test.go @@ -38,7 +38,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" ctrl "sigs.k8s.io/controller-runtime" - //+kubebuilder:scaffold:imports + // +kubebuilder:scaffold:imports ) // These tests use Ginkgo (BDD-style Go testing framework). Refer to diff --git a/pkg/controllers/vdb/onlineupgrade_reconciler.go b/pkg/controllers/vdb/onlineupgrade_reconciler.go index 817291e64..32b57178f 100644 --- a/pkg/controllers/vdb/onlineupgrade_reconciler.go +++ b/pkg/controllers/vdb/onlineupgrade_reconciler.go @@ -407,7 +407,7 @@ func (r *OnlineUpgradeReconciler) runRebalanceSandboxSubcluster(ctx context.Cont } pf := r.PFacts[vapi.MainCluster] - actor := MakeRebalanceShardsReconciler(r.VRec, r.Log, r.VDB, pf.PRunner, pf, "" /*all subclusters*/) + actor := MakeRebalanceShardsReconciler(r.VRec, r.Log, r.VDB, pf.PRunner, pf, "" /* all subclusters */) r.Manager.traceActorReconcile(actor) res, err := actor.Reconcile(ctx, &ctrl.Request{}) r.PFacts[vapi.MainCluster].Invalidate() @@ -463,7 +463,7 @@ func (r *OnlineUpgradeReconciler) queryOriginalConfigParamDisableNonReplicatable return ctrl.Result{}, err } pf := r.PFacts[vapi.MainCluster] - initiator, ok := pf.FindFirstUpPod(false /*not allow read-only*/, "" /*arbitrary subcluster*/) + initiator, ok := pf.FindFirstUpPod(false /* not allow read-only */, "" /* arbitrary subcluster */) if !ok { r.Log.Info("No Up nodes found. Requeue reconciliation.") return ctrl.Result{Requeue: true}, nil @@ -526,7 +526,7 @@ func (r *OnlineUpgradeReconciler) clearConfigParamDisableNonReplicatableQueries( func (r *OnlineUpgradeReconciler) setConfigParamDisableNonReplicatableQueriesImpl(ctx context.Context, value, clusterName string) (ctrl.Result, error) { pf := r.PFacts[clusterName] - initiator, ok := pf.FindFirstUpPod(false /*not allow read-only*/, "" /*arbitrary subcluster*/) + initiator, ok := pf.FindFirstUpPod(false /* not allow read-only */, "" /* arbitrary subcluster */) if !ok { r.Log.Info("No Up nodes found. Requeue reconciliation.") return ctrl.Result{Requeue: true}, nil @@ -721,7 +721,7 @@ func (r *OnlineUpgradeReconciler) pauseConnectionsAtReplicaGroupA(ctx context.Co } pf := r.PFacts[vapi.MainCluster] - initiator, ok := pf.FindFirstUpPod(false /*not allow read-only*/, "" /*arbitrary subcluster*/) + initiator, ok := pf.FindFirstUpPod(false /* not allow read-only */, "" /* arbitrary subcluster */) if !ok { r.Log.Info("No Up nodes found. Requeue reconciliation.") return ctrl.Result{Requeue: true}, nil @@ -741,7 +741,7 @@ func (r *OnlineUpgradeReconciler) waitForConnectionsPaused(ctx context.Context) } pfacts := r.PFacts[vapi.MainCluster] - _, ok := pfacts.FindFirstUpPod(false /*not allow read-only*/, "" /*arbitrary subcluster*/) + _, ok := pfacts.FindFirstUpPod(false /* not allow read-only */, "" /* arbitrary subcluster */) if !ok { r.Log.Info("No Up nodes found; Requeue reconciliation") return ctrl.Result{Requeue: true}, nil @@ -971,8 +971,8 @@ func (r *OnlineUpgradeReconciler) copyRedirectStateToReplicaGroupB(ctx context.C r.Log.Error(err, "failed to gather podfacts for sandbox") return ctrl.Result{Requeue: true}, nil } - mainInitiator, mainOK := mainPFacts.FindFirstUpPod(false /*not allow read-only*/, "" /*arbitrary subcluster*/) - sbInitiator, sbOK := sbPFacts.FindFirstUpPod(false /*not allow read-only*/, "" /*arbitrary subcluster*/) + mainInitiator, mainOK := mainPFacts.FindFirstUpPod(false /* not allow read-only */, "" /* arbitrary subcluster */) + sbInitiator, sbOK := sbPFacts.FindFirstUpPod(false /* not allow read-only */, "" /* arbitrary subcluster */) if !mainOK || !sbOK { r.Log.Info("No Up nodes found; requeueing reconciliation") return ctrl.Result{Requeue: true}, nil @@ -1080,7 +1080,7 @@ func (r *OnlineUpgradeReconciler) redirectConnectionsToReplicaGroupB(ctx context // redirectConnectionsToSandbox will redirect all of the connections // established at replica group A to replica group B. func (r *OnlineUpgradeReconciler) redirectConnectionsToSandbox(ctx context.Context) (ctrl.Result, error) { - initiator, ok := r.PFacts[vapi.MainCluster].FindFirstUpPod(false /*not allow read-only*/, "" /*arbitrary subcluster*/) + initiator, ok := r.PFacts[vapi.MainCluster].FindFirstUpPod(false /* not allow read-only */, "" /* arbitrary subcluster */) if !ok { r.Log.Info("No Up nodes found; requeueing reconciliation") return ctrl.Result{Requeue: true}, nil @@ -1197,7 +1197,7 @@ func (r *OnlineUpgradeReconciler) deleteSandboxConfigMap(ctx context.Context) (c // We requeue if the sandbox still exists in the status return ctrl.Result{Requeue: true}, nil } - sbMan := MakeSandboxConfigMapManager(r.VRec, r.VDB, r.sandboxName, "" /*no uuid*/) + sbMan := MakeSandboxConfigMapManager(r.VRec, r.VDB, r.sandboxName, "" /* no uuid */) calledDelete, err := sbMan.deleteConfigMap(ctx) if !calledDelete { return ctrl.Result{}, err diff --git a/pkg/controllers/vdb/suite_test.go b/pkg/controllers/vdb/suite_test.go index 3759f700e..918431be1 100644 --- a/pkg/controllers/vdb/suite_test.go +++ b/pkg/controllers/vdb/suite_test.go @@ -41,6 +41,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/envtest" logf "sigs.k8s.io/controller-runtime/pkg/log" "sigs.k8s.io/controller-runtime/pkg/log/zap" + metricsserver "sigs.k8s.io/controller-runtime/pkg/metrics/server" ) var k8sClient client.Client @@ -73,9 +74,12 @@ var _ = BeforeSuite(func() { k8sClient, err = client.New(restCfg, client.Options{Scheme: scheme.Scheme}) ExpectWithOffset(1, err).NotTo(HaveOccurred()) + metricsServerOptions := metricsserver.Options{ + BindAddress: "0", // Disable metrics for the test + } mgr, err := ctrl.NewManager(cfg, ctrl.Options{ - Scheme: scheme.Scheme, - MetricsBindAddress: "0", // Disable metrics for the test + Scheme: scheme.Scheme, + Metrics: metricsServerOptions, }) Expect(err).NotTo(HaveOccurred()) diff --git a/pkg/controllers/vdb/verticadb_controller.go b/pkg/controllers/vdb/verticadb_controller.go index 6bb2db6ce..cdb126b50 100644 --- a/pkg/controllers/vdb/verticadb_controller.go +++ b/pkg/controllers/vdb/verticadb_controller.go @@ -74,6 +74,8 @@ type VerticaDBReconciler struct { // +kubebuilder:rbac:groups="",resources=events,verbs=create;patch // SetupWithManager sets up the controller with the Manager. +// +//nolint:gocritic func (r *VerticaDBReconciler) SetupWithManager(mgr ctrl.Manager, options controller.Options) error { return ctrl.NewControllerManagedBy(mgr). WithOptions(options). diff --git a/pkg/controllers/vrep/suite_test.go b/pkg/controllers/vrep/suite_test.go index 7584f7eb9..4a4e21523 100644 --- a/pkg/controllers/vrep/suite_test.go +++ b/pkg/controllers/vrep/suite_test.go @@ -42,7 +42,8 @@ import ( v1 "github.com/vertica/vertica-kubernetes/api/v1" "github.com/vertica/vertica-kubernetes/api/v1beta1" - //+kubebuilder:scaffold:imports + metricsserver "sigs.k8s.io/controller-runtime/pkg/metrics/server" + // +kubebuilder:scaffold:imports ) // These tests use Ginkgo (BDD-style Go testing framework). Refer to @@ -79,15 +80,18 @@ var _ = BeforeSuite(func() { Expect(err).NotTo(HaveOccurred()) err = v1.AddToScheme(scheme.Scheme) Expect(err).NotTo(HaveOccurred()) - //+kubebuilder:scaffold:scheme + // +kubebuilder:scaffold:scheme k8sClient, err = client.New(cfg, client.Options{Scheme: scheme.Scheme}) Expect(err).NotTo(HaveOccurred()) Expect(k8sClient).NotTo(BeNil()) + metricsServerOptions := metricsserver.Options{ + BindAddress: "0", // Disable metrics for the test + } mgr, err := ctrl.NewManager(cfg, ctrl.Options{ - Scheme: scheme.Scheme, - MetricsBindAddress: "0", // Disable metrics for the test + Scheme: scheme.Scheme, + Metrics: metricsServerOptions, }) vrepRec = &VerticaReplicatorReconciler{ diff --git a/pkg/controllers/vrep/verticareplicator_controller.go b/pkg/controllers/vrep/verticareplicator_controller.go index a1e7a89b2..70e97dca9 100644 --- a/pkg/controllers/vrep/verticareplicator_controller.go +++ b/pkg/controllers/vrep/verticareplicator_controller.go @@ -46,9 +46,9 @@ type VerticaReplicatorReconciler struct { Concurrency int } -//+kubebuilder:rbac:groups=vertica.com,resources=verticareplicators,verbs=get;list;watch;create;update;patch;delete -//+kubebuilder:rbac:groups=vertica.com,resources=verticareplicators/status,verbs=get;update;patch -//+kubebuilder:rbac:groups=vertica.com,resources=verticareplicators/finalizers,verbs=update +// +kubebuilder:rbac:groups=vertica.com,resources=verticareplicators,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=vertica.com,resources=verticareplicators/status,verbs=get;update;patch +// +kubebuilder:rbac:groups=vertica.com,resources=verticareplicators/finalizers,verbs=update // Reconcile is part of the main kubernetes reconciliation loop which aims to // move the current state of the cluster closer to the desired state. diff --git a/pkg/controllers/vrpq/query_reconciler.go b/pkg/controllers/vrpq/query_reconciler.go index 19134ab27..9b37171bd 100644 --- a/pkg/controllers/vrpq/query_reconciler.go +++ b/pkg/controllers/vrpq/query_reconciler.go @@ -87,7 +87,7 @@ func (q *QueryReconciler) Reconcile(ctx context.Context, _ *ctrl.Request) (ctrl. } // setup dispatcher for vclusterops API - dispatcher, err := q.makeDispatcher(q.Log, q.Vdb, nil /*password*/) + dispatcher, err := q.makeDispatcher(q.Log, q.Vdb, nil /* password */) if err != nil { return ctrl.Result{}, err } diff --git a/pkg/controllers/vrpq/suite_test.go b/pkg/controllers/vrpq/suite_test.go index f6739ec3b..90a3e1a93 100644 --- a/pkg/controllers/vrpq/suite_test.go +++ b/pkg/controllers/vrpq/suite_test.go @@ -41,7 +41,8 @@ import ( v1 "github.com/vertica/vertica-kubernetes/api/v1" "github.com/vertica/vertica-kubernetes/api/v1beta1" - //+kubebuilder:scaffold:imports + metricsserver "sigs.k8s.io/controller-runtime/pkg/metrics/server" + // +kubebuilder:scaffold:imports ) // These tests use Ginkgo (BDD-style Go testing framework). Refer to @@ -79,15 +80,18 @@ var _ = BeforeSuite(func() { Expect(err).NotTo(HaveOccurred()) err = v1.AddToScheme(scheme.Scheme) Expect(err).NotTo(HaveOccurred()) - //+kubebuilder:scaffold:scheme + // +kubebuilder:scaffold:scheme k8sClient, err = client.New(cfg, client.Options{Scheme: scheme.Scheme}) Expect(err).NotTo(HaveOccurred()) Expect(k8sClient).NotTo(BeNil()) + metricsServerOptions := metricsserver.Options{ + BindAddress: "0", // Disable metrics for the test + } mgr, err := ctrl.NewManager(cfg, ctrl.Options{ - Scheme: scheme.Scheme, - MetricsBindAddress: "0", // Disable metrics for the test + Scheme: scheme.Scheme, + Metrics: metricsServerOptions, }) vrpqRec = &VerticaRestorePointsQueryReconciler{ diff --git a/pkg/controllers/vrpq/verticarestorepointsquery_controller.go b/pkg/controllers/vrpq/verticarestorepointsquery_controller.go index f650e332b..41bbdfa41 100644 --- a/pkg/controllers/vrpq/verticarestorepointsquery_controller.go +++ b/pkg/controllers/vrpq/verticarestorepointsquery_controller.go @@ -45,9 +45,9 @@ type VerticaRestorePointsQueryReconciler struct { EVRec record.EventRecorder } -//+kubebuilder:rbac:groups=vertica.com,resources=verticarestorepointsqueries,verbs=get;list;watch;create;update;patch;delete -//+kubebuilder:rbac:groups=vertica.com,resources=verticarestorepointsqueries/status,verbs=get;update;patch -//+kubebuilder:rbac:groups=vertica.com,resources=verticarestorepointsqueries/finalizers,verbs=update +// +kubebuilder:rbac:groups=vertica.com,resources=verticarestorepointsqueries,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=vertica.com,resources=verticarestorepointsqueries/status,verbs=get;update;patch +// +kubebuilder:rbac:groups=vertica.com,resources=verticarestorepointsqueries/finalizers,verbs=update // Reconcile is part of the main kubernetes reconciliation loop which aims to // move the current state of the cluster closer to the desired state. diff --git a/pkg/controllers/vscr/suite_test.go b/pkg/controllers/vscr/suite_test.go index 5b1a8d714..00db0ba63 100644 --- a/pkg/controllers/vscr/suite_test.go +++ b/pkg/controllers/vscr/suite_test.go @@ -37,7 +37,8 @@ import ( v1 "github.com/vertica/vertica-kubernetes/api/v1" "github.com/vertica/vertica-kubernetes/api/v1beta1" - //+kubebuilder:scaffold:imports + metricsserver "sigs.k8s.io/controller-runtime/pkg/metrics/server" + // +kubebuilder:scaffold:imports ) // These tests use Ginkgo (BDD-style Go testing framework). Refer to @@ -75,15 +76,18 @@ var _ = BeforeSuite(func() { Expect(err).NotTo(HaveOccurred()) err = v1.AddToScheme(scheme.Scheme) Expect(err).NotTo(HaveOccurred()) - //+kubebuilder:scaffold:scheme + // +kubebuilder:scaffold:scheme k8sClient, err = client.New(cfg, client.Options{Scheme: scheme.Scheme}) Expect(err).NotTo(HaveOccurred()) Expect(k8sClient).NotTo(BeNil()) + metricsServerOptions := metricsserver.Options{ + BindAddress: "0", // Disable metrics for the test + } mgr, err := ctrl.NewManager(cfg, ctrl.Options{ - Scheme: scheme.Scheme, - MetricsBindAddress: "0", // Disable metrics for the test + Scheme: scheme.Scheme, + Metrics: metricsServerOptions, }) vscrRec = &VerticaScrutinizeReconciler{ diff --git a/pkg/controllers/vscr/verticascrutinize_controller.go b/pkg/controllers/vscr/verticascrutinize_controller.go index 27ebb3ef7..2c64d0baf 100644 --- a/pkg/controllers/vscr/verticascrutinize_controller.go +++ b/pkg/controllers/vscr/verticascrutinize_controller.go @@ -33,7 +33,6 @@ import ( "sigs.k8s.io/controller-runtime/pkg/handler" "sigs.k8s.io/controller-runtime/pkg/predicate" "sigs.k8s.io/controller-runtime/pkg/reconcile" - "sigs.k8s.io/controller-runtime/pkg/source" "github.com/go-logr/logr" v1 "github.com/vertica/vertica-kubernetes/api/v1" @@ -57,10 +56,10 @@ const ( vdbNameField = ".spec.verticaDBName" ) -//+kubebuilder:rbac:groups=vertica.com,resources=verticascrutinizers,verbs=get;list;watch;create;update;patch;delete -//+kubebuilder:rbac:groups=vertica.com,resources=verticascrutinizers/status,verbs=get;update;patch -//+kubebuilder:rbac:groups=vertica.com,resources=verticascrutinizers/finalizers,verbs=update -//+kubebuilder:rbac:groups=vertica.com,resources=verticadbs,verbs=get;list;watch +// +kubebuilder:rbac:groups=vertica.com,resources=verticascrutinizers,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=vertica.com,resources=verticascrutinizers/status,verbs=get;update;patch +// +kubebuilder:rbac:groups=vertica.com,resources=verticascrutinizers/finalizers,verbs=update +// +kubebuilder:rbac:groups=vertica.com,resources=verticadbs,verbs=get;list;watch // Reconcile is part of the main kubernetes reconciliation loop which aims to // move the current state of the cluster closer to the desired state. @@ -121,7 +120,7 @@ func (r *VerticaScrutinizeReconciler) SetupWithManager(mgr ctrl.Manager) error { For(&v1beta1.VerticaScrutinize{}). Owns(&corev1.Pod{}). Watches( - &source.Kind{Type: &v1.VerticaDB{}}, + &v1.VerticaDB{}, handler.EnqueueRequestsFromMapFunc(r.findObjectsForVerticaDB), builder.WithPredicates(predicate.ResourceVersionChangedPredicate{}), ). @@ -130,7 +129,7 @@ func (r *VerticaScrutinizeReconciler) SetupWithManager(mgr ctrl.Manager) error { // findObjectsForVerticaDB will generate requests to reconcile VerticaScrutiners // based on watched VerticaDB. -func (r *VerticaScrutinizeReconciler) findObjectsForVerticaDB(vdb client.Object) []reconcile.Request { +func (r *VerticaScrutinizeReconciler) findObjectsForVerticaDB(_ context.Context, vdb client.Object) []reconcile.Request { scrutinizers := &v1beta1.VerticaScrutinizeList{} listOps := &client.ListOptions{ FieldSelector: fields.OneTermEqualSelector(vdbNameField, vdb.GetName()), diff --git a/pkg/podfacts/suite_test.go b/pkg/podfacts/suite_test.go index e0c155bea..f7eb4f236 100644 --- a/pkg/podfacts/suite_test.go +++ b/pkg/podfacts/suite_test.go @@ -34,6 +34,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/envtest" logf "sigs.k8s.io/controller-runtime/pkg/log" "sigs.k8s.io/controller-runtime/pkg/log/zap" + metricsserver "sigs.k8s.io/controller-runtime/pkg/metrics/server" ) var k8sClient client.Client @@ -111,9 +112,12 @@ var _ = BeforeSuite(func() { k8sClient, err = client.New(restCfg, client.Options{Scheme: scheme.Scheme}) ExpectWithOffset(1, err).NotTo(HaveOccurred()) + metricsServerOptions := metricsserver.Options{ + BindAddress: "0", // Disable metrics for the test + } mgr, err := ctrl.NewManager(cfg, ctrl.Options{ - Scheme: scheme.Scheme, - MetricsBindAddress: "0", // Disable metrics for the test + Scheme: scheme.Scheme, + Metrics: metricsServerOptions, }) Expect(err).NotTo(HaveOccurred()) diff --git a/pkg/vdbconfig/suite_test.go b/pkg/vdbconfig/suite_test.go index 06c7224e7..b107f20b5 100644 --- a/pkg/vdbconfig/suite_test.go +++ b/pkg/vdbconfig/suite_test.go @@ -31,6 +31,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/envtest" logf "sigs.k8s.io/controller-runtime/pkg/log" "sigs.k8s.io/controller-runtime/pkg/log/zap" + metricsserver "sigs.k8s.io/controller-runtime/pkg/metrics/server" ) var k8sClient client.Client @@ -60,9 +61,12 @@ var _ = BeforeSuite(func() { k8sClient, err = client.New(restCfg, client.Options{Scheme: scheme.Scheme}) ExpectWithOffset(1, err).NotTo(HaveOccurred()) + metricsServerOptions := metricsserver.Options{ + BindAddress: "0", // Disable metrics for the test + } mgr, err := ctrl.NewManager(cfg, ctrl.Options{ - Scheme: scheme.Scheme, - MetricsBindAddress: "0", // Disable metrics for the test + Scheme: scheme.Scheme, + Metrics: metricsServerOptions, }) Expect(err).NotTo(HaveOccurred()) diff --git a/pkg/vk8s/suite_test.go b/pkg/vk8s/suite_test.go index 109fa6a88..3b3697a3e 100644 --- a/pkg/vk8s/suite_test.go +++ b/pkg/vk8s/suite_test.go @@ -31,6 +31,7 @@ import ( logf "sigs.k8s.io/controller-runtime/pkg/log" "sigs.k8s.io/controller-runtime/pkg/log/zap" "sigs.k8s.io/controller-runtime/pkg/manager" + metricsserver "sigs.k8s.io/controller-runtime/pkg/metrics/server" ) var k8sClient client.Client @@ -60,9 +61,12 @@ var _ = BeforeSuite(func() { k8sClient, err = client.New(restCfg, client.Options{Scheme: scheme.Scheme}) ExpectWithOffset(1, err).NotTo(HaveOccurred()) + metricsServerOptions := metricsserver.Options{ + BindAddress: "0", // Disable metrics for the test + } mgr, err = ctrl.NewManager(cfg, ctrl.Options{ - Scheme: scheme.Scheme, - MetricsBindAddress: "0", // Disable metrics for the test + Scheme: scheme.Scheme, + Metrics: metricsServerOptions, }) ExpectWithOffset(1, err).NotTo(HaveOccurred()) }) diff --git a/scripts/authorize-metrics.sh b/scripts/authorize-metrics.sh index 0325a9ca3..de71549e5 100755 --- a/scripts/authorize-metrics.sh +++ b/scripts/authorize-metrics.sh @@ -63,9 +63,6 @@ set -o xtrace if [[ -n "$UNDO" ]] then kubectl delete -f $REPO_DIR/config/release-manifests/verticadb-operator-metrics-reader-cr.yaml || : - kubectl delete -f $REPO_DIR/config/release-manifests/verticadb-operator-proxy-role-cr.yaml || : - kubectl delete -f $REPO_DIR/config/release-manifests/verticadb-operator-metrics-reader-crb.yaml || : - kubectl delete -f $REPO_DIR/config/release-manifests/verticadb-operator-proxy-rolebinding-crb.yaml || : echo "Finished undoing action" exit 0 fi diff --git a/scripts/gen-release-artifacts.sh b/scripts/gen-release-artifacts.sh index 7fc47abba..542f49bcb 100755 --- a/scripts/gen-release-artifacts.sh +++ b/scripts/gen-release-artifacts.sh @@ -53,10 +53,7 @@ fi RELEASE_ARTIFACT_TARGET_DIR=$REPO_DIR/config/release-manifests mkdir -p $RELEASE_ARTIFACT_TARGET_DIR for f in verticadb-operator-metrics-monitor-servicemonitor.yaml \ - verticadb-operator-proxy-rolebinding-crb.yaml \ - verticadb-operator-proxy-role-cr.yaml \ - verticadb-operator-metrics-reader-cr.yaml \ - verticadb-operator-metrics-reader-crb.yaml + verticadb-operator-metrics-reader-cr.yaml do cp $MANIFEST_DIR/$f $RELEASE_ARTIFACT_TARGET_DIR # Modify the artifact we are copying over by removing any namespace field. diff --git a/scripts/template-helm-chart.sh b/scripts/template-helm-chart.sh index 178089c63..adad9cdf5 100755 --- a/scripts/template-helm-chart.sh +++ b/scripts/template-helm-chart.sh @@ -41,7 +41,6 @@ fi perl -i -0777 -pe 's/verticadb-operator-system/{{ .Release.Namespace }}/g' $TEMPLATE_DIR/* # 2. Template image names perl -i -0777 -pe "s|image: controller|image: '{{ with .Values.image }}{{ join \"/\" (list .repo .name) }}{{ end }}'|" $TEMPLATE_DIR/verticadb-operator-manager-deployment.yaml -perl -i -0777 -pe "s|image: gcr.io/kubebuilder/kube-rbac-proxy:v.*|image: '{{ with .Values.rbac_proxy_image }}{{ join \"/\" (list .repo .name) }}{{ end }}'|" $TEMPLATE_DIR/verticadb-operator-manager-deployment.yaml # 3. Template imagePullPolicy perl -i -0777 -pe 's/imagePullPolicy: IfNotPresent/imagePullPolicy: {{ default "IfNotPresent" .Values.image.pullPolicy }}/' $TEMPLATE_DIR/verticadb-operator-manager-deployment.yaml # 4. Append imagePullSecrets @@ -93,8 +92,6 @@ cat << EOF >> $TEMPLATE_DIR/verticadb-operator-manager-sa.yaml EOF for f in \ verticadb-operator-leader-election-rolebinding-rb.yaml \ - verticadb-operator-proxy-rolebinding-crb.yaml \ - verticadb-operator-metrics-reader-crb.yaml \ verticadb-operator-manager-clusterrolebinding-crb.yaml \ verticadb-operator-webhook-config-crb.yaml do @@ -117,15 +114,8 @@ do echo "{{- end }}" >> $f done -# 11. Template the prometheus metrics service -perl -i -pe 's/^/{{- if hasPrefix "Enable" .Values.prometheus.expose -}}\n/ if 1 .. 1' $TEMPLATE_DIR/verticadb-operator-metrics-service-svc.yaml -echo "{{- end }}" >> $TEMPLATE_DIR/verticadb-operator-metrics-service-svc.yaml - -# 12. Template the roles/rolebindings for access to the rbac proxy -for f in verticadb-operator-proxy-rolebinding-crb.yaml \ - verticadb-operator-proxy-role-cr.yaml \ - verticadb-operator-metrics-reader-cr.yaml \ - verticadb-operator-metrics-reader-crb.yaml +# 11. Template the roles/rolebindings for access to the rbac proxy +for f in verticadb-operator-metrics-reader-cr.yaml do perl -i -pe 's/^/{{- if and (.Values.prometheus.createProxyRBAC) (eq .Values.prometheus.expose "EnableWithAuthProxy") -}}\n/ if 1 .. 1' $TEMPLATE_DIR/$f echo "{{- end }}" >> $TEMPLATE_DIR/$f @@ -133,36 +123,35 @@ do perl -i -0777 -pe 's/-(proxy-role.*)/-{{ include "vdb-op.metricsRbacPrefix" . }}$1/g' $TEMPLATE_DIR/$f done -# 13. Template the ServiceMonitor object for Promtheus operator +# 12. Template the ServiceMonitor object for Promtheus operator perl -i -pe 's/^/{{- if .Values.prometheus.createServiceMonitor -}}\n/ if 1 .. 1' $TEMPLATE_DIR/verticadb-operator-metrics-monitor-servicemonitor.yaml echo "{{- end }}" >> $TEMPLATE_DIR/verticadb-operator-metrics-monitor-servicemonitor.yaml perl -i -0777 -pe 's/(.*endpoints:)/$1\n{{- if eq "EnableWithAuthProxy" .Values.prometheus.expose }}/g' $TEMPLATE_DIR/verticadb-operator-metrics-monitor-servicemonitor.yaml perl -i -0777 -pe 's/(.*insecureSkipVerify:.*)/$1\n{{- else }}\n - path: \/metrics\n port: metrics\n scheme: http\n{{- end }}/g' $TEMPLATE_DIR/verticadb-operator-metrics-monitor-servicemonitor.yaml -# 14. Template the metrics bind address +# 13. Template the metrics bind address perl -i -0777 -pe 's/(METRICS_ADDR: )(.*)/$1 "{{ if eq "EnableWithAuthProxy" .Values.prometheus.expose }}127.0.0.1{{ end }}:{{ if eq "EnableWithAuthProxy" .Values.prometheus.expose }}8080{{ else }}8443{{ end }}"/' $TEMPLATE_DIR/verticadb-operator-manager-config-cm.yaml perl -i -0777 -pe 's/(.*METRICS_ADDR:.*)/{{- if hasPrefix "Enable" .Values.prometheus.expose }}\n$1\n{{- else }}\n METRICS_ADDR: ""\n{{- end }}/g' $TEMPLATE_DIR/verticadb-operator-manager-config-cm.yaml perl -i -0777 -pe 's/(.*ports:\n.*containerPort: 9443\n.*webhook-server.*\n.*)/$1\n{{- if hasPrefix "EnableWithoutAuth" .Values.prometheus.expose }}\n - name: metrics\n containerPort: 8443\n protocol: TCP\n{{- end }}/g' $TEMPLATE_DIR/verticadb-operator-manager-deployment.yaml -# 15. Template the rbac container +# 14. Template the rbac container perl -i -0777 -pe 's/(.*- args:.*\n.*secure)/{{- if eq .Values.prometheus.expose "EnableWithAuthProxy" }}\n$1/g' $TEMPLATE_DIR/verticadb-operator-manager-deployment.yaml # We need to put the matching end at the end of the container spec. perl -i -0777 -pe 's/(memory: 64Mi)/$1\n{{- end }}/g' $TEMPLATE_DIR/verticadb-operator-manager-deployment.yaml -# 16. Template places that refer to objects by name. Do this in all files. +# 15. Template places that refer to objects by name. Do this in all files. # In the config/ directory we hardcoded everything to start with # verticadb-operator. perl -i -0777 -pe 's/verticadb-operator/{{ include "vdb-op.name" . }}/g' $TEMPLATE_DIR/*yaml -# 17. Mount TLS certs in the rbac proxy +# 16. Mount TLS certs in the rbac proxy for f in $TEMPLATE_DIR/verticadb-operator-manager-deployment.yaml do perl -i -0777 -pe 's/(.*--v=[0-9]+)/$1\n{{- if not (empty .Values.prometheus.tlsSecret) }}\n - --tls-cert-file=\/cert\/tls.crt\n - --tls-private-key-file=\/cert\/tls.key\n - --client-ca-file=\/cert\/ca.crt\n{{- end }}/g' $f perl -i -0777 -pe 's/(volumes:)/$1\n{{- if not (empty .Values.prometheus.tlsSecret) }}\n - name: auth-cert\n secret:\n secretName: {{ .Values.prometheus.tlsSecret }}\n{{- end }}/g' $f - perl -i -0777 -pe 's/(name: kube-rbac-proxy)/$1\n{{- if not (empty .Values.prometheus.tlsSecret) }}\n volumeMounts:\n - mountPath: \/cert\n name: auth-cert\n{{- end }}/g' $f done -# 18. Add pod scheduling options +# 17. Add pod scheduling options cat << EOF >> $TEMPLATE_DIR/verticadb-operator-manager-deployment.yaml {{- if .Values.nodeSelector }} nodeSelector: @@ -181,7 +170,7 @@ cat << EOF >> $TEMPLATE_DIR/verticadb-operator-manager-deployment.yaml {{- end }} EOF -# 19. Template the per-CR concurrency parameters +# 18. Template the per-CR concurrency parameters for f in $TEMPLATE_DIR/verticadb-operator-manager-config-cm.yaml do perl -i -0777 -pe 's/(CONCURRENCY_VERTICADB: ).*/$1\{\{ .Values.reconcileConcurrency.verticadb | quote \}\}/g' $f @@ -193,7 +182,7 @@ do perl -i -0777 -pe 's/(CONCURRENCY_VERTICAREPLICATOR: ).*/$1\{\{ .Values.reconcileConcurrency.verticareplicator | quote \}\}/g' $f done -# 20. Add permissions to manager ClusterRole to allow it to patch the CRD. This +# 19. Add permissions to manager ClusterRole to allow it to patch the CRD. This # is only needed if the webhook cert is generated by the operator or provided # by a Secret. cat << EOF >> $TEMPLATE_DIR/verticadb-operator-webhook-config-cr.yaml @@ -210,7 +199,7 @@ cat << EOF >> $TEMPLATE_DIR/verticadb-operator-webhook-config-cr.yaml {{- end }} EOF -# 21. Change change ClusterRoles/ClusterRoleBindings for the manager to be +# 20. Change change ClusterRoles/ClusterRoleBindings for the manager to be # Roles/RoleBindings if the operator is scoped to a single namespace. for f in $TEMPLATE_DIR/verticadb-operator-manager-clusterrolebinding-crb.yaml \ $TEMPLATE_DIR/verticadb-operator-manager-role-cr.yaml @@ -221,7 +210,7 @@ do echo "{{- end }}" >> $f done -# 22. Template the operator config +# 21. Template the operator config for fn in $TEMPLATE_DIR/verticadb-operator-manager-config-cm.yaml do perl -i -0777 -pe 's/(WEBHOOKS_ENABLED:).*/$1 {{ quote .Values.webhook.enable }}/g' $fn diff --git a/tests/e2e-leg-5/metrics-auth-proxy-cert/05-assert.yaml b/tests/e2e-leg-5/metrics-auth-proxy-cert/05-assert.yaml index 58f1560c0..1b91a2e25 100644 --- a/tests/e2e-leg-5/metrics-auth-proxy-cert/05-assert.yaml +++ b/tests/e2e-leg-5/metrics-auth-proxy-cert/05-assert.yaml @@ -19,7 +19,6 @@ metadata: spec: containers: - name: manager - - name: kube-rbac-proxy status: phase: Running --- diff --git a/tests/e2e-leg-5/metrics-auth-proxy-token-helm/05-assert.yaml b/tests/e2e-leg-5/metrics-auth-proxy-token-helm/05-assert.yaml index e21af97e3..9fb612105 100644 --- a/tests/e2e-leg-5/metrics-auth-proxy-token-helm/05-assert.yaml +++ b/tests/e2e-leg-5/metrics-auth-proxy-token-helm/05-assert.yaml @@ -19,7 +19,6 @@ metadata: spec: containers: - name: manager - - name: kube-rbac-proxy status: phase: Running containerStatuses: diff --git a/tests/e2e-leg-5/metrics-auth-proxy-token-olm/05-assert.yaml b/tests/e2e-leg-5/metrics-auth-proxy-token-olm/05-assert.yaml index e21af97e3..9fb612105 100644 --- a/tests/e2e-leg-5/metrics-auth-proxy-token-olm/05-assert.yaml +++ b/tests/e2e-leg-5/metrics-auth-proxy-token-olm/05-assert.yaml @@ -19,7 +19,6 @@ metadata: spec: containers: - name: manager - - name: kube-rbac-proxy status: phase: Running containerStatuses: diff --git a/tests/e2e-leg-5/operator-pod-scheduling/10-assert.yaml b/tests/e2e-leg-5/operator-pod-scheduling/10-assert.yaml index b85b81828..a0ae96d81 100644 --- a/tests/e2e-leg-5/operator-pod-scheduling/10-assert.yaml +++ b/tests/e2e-leg-5/operator-pod-scheduling/10-assert.yaml @@ -19,7 +19,6 @@ metadata: spec: containers: - name: manager - - name: kube-rbac-proxy priority: 1000000 affinity: nodeAffinity: diff --git a/tests/e2e-leg-5/operator-pod-scheduling/20-assert.yaml b/tests/e2e-leg-5/operator-pod-scheduling/20-assert.yaml index d89eb9a37..cb25bdb07 100644 --- a/tests/e2e-leg-5/operator-pod-scheduling/20-assert.yaml +++ b/tests/e2e-leg-5/operator-pod-scheduling/20-assert.yaml @@ -19,7 +19,6 @@ metadata: spec: containers: - name: manager - - name: kube-rbac-proxy nodeSelector: madeUpLabel: unmatchedValue status: diff --git a/tests/e2e-operator-upgrade-template/from-1.2.0/15-assert.yaml b/tests/e2e-operator-upgrade-template/from-1.2.0/15-assert.yaml index 56a82472d..4c356678e 100644 --- a/tests/e2e-operator-upgrade-template/from-1.2.0/15-assert.yaml +++ b/tests/e2e-operator-upgrade-template/from-1.2.0/15-assert.yaml @@ -20,6 +20,5 @@ spec: containers: - name: manager image: vertica/verticadb-operator:1.2.0 - - name: kube-rbac-proxy status: phase: Running diff --git a/tests/e2e-operator-upgrade-template/from-1.3.1/15-assert.yaml b/tests/e2e-operator-upgrade-template/from-1.3.1/15-assert.yaml index 5ce70abee..1584c986a 100644 --- a/tests/e2e-operator-upgrade-template/from-1.3.1/15-assert.yaml +++ b/tests/e2e-operator-upgrade-template/from-1.3.1/15-assert.yaml @@ -20,6 +20,5 @@ spec: containers: - name: manager image: vertica/verticadb-operator:1.3.1 - - name: kube-rbac-proxy status: phase: Running diff --git a/tests/e2e-operator-upgrade-template/from-1.4.0/15-assert.yaml b/tests/e2e-operator-upgrade-template/from-1.4.0/15-assert.yaml index 69a34d190..ffec13e44 100644 --- a/tests/e2e-operator-upgrade-template/from-1.4.0/15-assert.yaml +++ b/tests/e2e-operator-upgrade-template/from-1.4.0/15-assert.yaml @@ -20,6 +20,5 @@ spec: containers: - name: manager image: vertica/verticadb-operator:1.4.0 - - name: kube-rbac-proxy status: phase: Running diff --git a/tests/e2e-operator-upgrade-template/from-1.6.0/15-assert.yaml b/tests/e2e-operator-upgrade-template/from-1.6.0/15-assert.yaml index 2377c9990..3f52d2724 100644 --- a/tests/e2e-operator-upgrade-template/from-1.6.0/15-assert.yaml +++ b/tests/e2e-operator-upgrade-template/from-1.6.0/15-assert.yaml @@ -20,6 +20,5 @@ spec: containers: - name: manager image: docker.io/vertica/verticadb-operator:1.6.0 - - name: kube-rbac-proxy status: phase: Running diff --git a/tests/e2e-operator-upgrade-template/from-1.7.0/15-assert.yaml b/tests/e2e-operator-upgrade-template/from-1.7.0/15-assert.yaml index 29ef828aa..26c0d20d7 100644 --- a/tests/e2e-operator-upgrade-template/from-1.7.0/15-assert.yaml +++ b/tests/e2e-operator-upgrade-template/from-1.7.0/15-assert.yaml @@ -20,6 +20,5 @@ spec: containers: - name: manager image: docker.io/vertica/verticadb-operator:1.7.0 - - name: kube-rbac-proxy status: phase: Running diff --git a/tests/external-images-common-ci.txt b/tests/external-images-common-ci.txt index ca18a16de..966d58a29 100644 --- a/tests/external-images-common-ci.txt +++ b/tests/external-images-common-ci.txt @@ -9,7 +9,6 @@ amazon/aws-cli:2.2.24 quay.io/helmpack/chart-testing:v3.3.1 bitnami/kubectl:1.20.4 rancher/local-path-provisioner:v0.0.19 -gcr.io/kubebuilder/kube-rbac-proxy:v0.13.1 quay.io/jetstack/cert-manager-controller:v1.5.3 quay.io/jetstack/cert-manager-cainjector:v1.5.3 quay.io/jetstack/cert-manager-webhook:v1.5.3 diff --git a/tests/manifests/rbac/base/rbac.yaml b/tests/manifests/rbac/base/rbac.yaml index 2584edea3..2b7a287f8 100644 --- a/tests/manifests/rbac/base/rbac.yaml +++ b/tests/manifests/rbac/base/rbac.yaml @@ -30,6 +30,7 @@ rules: - list - create - delete + - watch --- apiVersion: rbac.authorization.k8s.io/v1 kind: RoleBinding From d66b9f9ea52efacf0dc78a8d11ccf9702b0e5e92 Mon Sep 17 00:00:00 2001 From: Cai Chen Date: Wed, 22 Jan 2025 23:55:28 +0000 Subject: [PATCH 02/15] fix1 --- cmd/operator/main.go | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/cmd/operator/main.go b/cmd/operator/main.go index e4f11cff0..f0e66c818 100644 --- a/cmd/operator/main.go +++ b/cmd/operator/main.go @@ -314,6 +314,11 @@ func main() { restCfg := ctrl.GetConfigOrDie() + var cacheNamespaces map[string]cache.Config + if opcfg.GetWatchNamespace() != "" { + cacheNamespaces := make(map[string]cache.Config) + cacheNamespaces[opcfg.GetWatchNamespace()] = cache.Config{} + } mgr, err := ctrl.NewManager(restCfg, ctrl.Options{ Scheme: scheme, Metrics: metricsServerOptions, @@ -321,10 +326,8 @@ func main() { HealthProbeBindAddress: ":8081", LeaderElection: true, LeaderElectionID: opcfg.GetLeaderElectionID(), - Cache: cache.Options{DefaultNamespaces: map[string]cache.Config{ - opcfg.GetWatchNamespace(): {}, - }}, - EventBroadcaster: multibroadcaster, + Cache: cache.Options{DefaultNamespaces: cacheNamespaces}, + EventBroadcaster: multibroadcaster, Controller: config.Controller{ GroupKindConcurrency: map[string]int{ vapiB1.GkVDB.String(): opcfg.GetVerticaDBConcurrency(), From 7445bc8c4bb33152cff0a8ade5018fe301aaf879 Mon Sep 17 00:00:00 2001 From: Cai Chen Date: Thu, 23 Jan 2025 19:16:52 +0000 Subject: [PATCH 03/15] fix2 --- cmd/operator/main.go | 7 ++++++- config/manager/operator-envs | 2 +- pkg/opcfg/config.go | 5 +++++ scripts/template-helm-chart.sh | 7 ++++++- 4 files changed, 18 insertions(+), 3 deletions(-) diff --git a/cmd/operator/main.go b/cmd/operator/main.go index f0e66c818..7bc0a5a4b 100644 --- a/cmd/operator/main.go +++ b/cmd/operator/main.go @@ -288,6 +288,10 @@ func main() { }) secureMetrics := strings.HasSuffix(opcfg.GetMetricsAddr(), "8443") + var metricCertDir string + if opcfg.GetMetricsTLSSecret() != "" { + metricCertDir = "/cert" + } // Metrics endpoint is enabled in 'config/default/kustomization.yaml'. The Metrics options configure the server. // More info: // - https://pkg.go.dev/sigs.k8s.io/controller-runtime@v0.18.4/pkg/metrics/server @@ -302,6 +306,7 @@ func main() { // unauthorized access to sensitive metrics data. Consider replacing with CertDir, CertName, and KeyName // to provide certificates, ensuring the server communicates using trusted and secure certificates. TLSOpts: metricsTLSOpts, + CertDir: metricCertDir, } if secureMetrics { @@ -316,7 +321,7 @@ func main() { var cacheNamespaces map[string]cache.Config if opcfg.GetWatchNamespace() != "" { - cacheNamespaces := make(map[string]cache.Config) + cacheNamespaces = make(map[string]cache.Config) cacheNamespaces[opcfg.GetWatchNamespace()] = cache.Config{} } mgr, err := ctrl.NewManager(restCfg, ctrl.Options{ diff --git a/config/manager/operator-envs b/config/manager/operator-envs index bb7f84cfd..dccb580ab 100644 --- a/config/manager/operator-envs +++ b/config/manager/operator-envs @@ -6,7 +6,7 @@ WEBHOOKS_ENABLED CONTROLLERS_ENABLED CONTROLLERS_SCOPE METRICS_ADDR -METRICS_TLS +METRICS_TLS_SECRET METRICS_PROXY_RBAC LOG_LEVEL CONCURRENCY_VERTICADB diff --git a/pkg/opcfg/config.go b/pkg/opcfg/config.go index d78854d08..4a1c4ce72 100644 --- a/pkg/opcfg/config.go +++ b/pkg/opcfg/config.go @@ -91,6 +91,11 @@ func GetMetricsAddr() string { return lookupStringEnvVar("METRICS_ADDR", envCanNotExist) } +// GetMetricsTLSSecret returns TLS secret name of the manager's Prometheus endpoint. +func GetMetricsTLSSecret() string { + return lookupStringEnvVar("METRICS_TLS_SECRET", envCanNotExist) +} + // GetUseCertManager returns true if cert-manager is used to setup the webhook's // TLS certs. func GetUseCertManager() bool { diff --git a/scripts/template-helm-chart.sh b/scripts/template-helm-chart.sh index adad9cdf5..d5e41ff0e 100755 --- a/scripts/template-helm-chart.sh +++ b/scripts/template-helm-chart.sh @@ -114,6 +114,10 @@ do echo "{{- end }}" >> $f done +# 11. Template the prometheus metrics service +perl -i -pe 's/^/{{- if hasPrefix "Enable" .Values.prometheus.expose -}}\n/ if 1 .. 1' $TEMPLATE_DIR/verticadb-operator-controller-manager-metrics-service-svc.yaml +echo "{{- end }}" >> $TEMPLATE_DIR/verticadb-operator-controller-manager-metrics-service-svc.yaml + # 11. Template the roles/rolebindings for access to the rbac proxy for f in verticadb-operator-metrics-reader-cr.yaml do @@ -130,8 +134,9 @@ perl -i -0777 -pe 's/(.*endpoints:)/$1\n{{- if eq "EnableWithAuthProxy" .Values. perl -i -0777 -pe 's/(.*insecureSkipVerify:.*)/$1\n{{- else }}\n - path: \/metrics\n port: metrics\n scheme: http\n{{- end }}/g' $TEMPLATE_DIR/verticadb-operator-metrics-monitor-servicemonitor.yaml # 13. Template the metrics bind address +perl -i -0777 -pe 's/(METRICS_TLS_SECRET: )(.*)/$1 "{{ .Values.prometheus.tlsSecret }}"/' $TEMPLATE_DIR/verticadb-operator-manager-config-cm.yaml perl -i -0777 -pe 's/(METRICS_ADDR: )(.*)/$1 "{{ if eq "EnableWithAuthProxy" .Values.prometheus.expose }}127.0.0.1{{ end }}:{{ if eq "EnableWithAuthProxy" .Values.prometheus.expose }}8080{{ else }}8443{{ end }}"/' $TEMPLATE_DIR/verticadb-operator-manager-config-cm.yaml -perl -i -0777 -pe 's/(.*METRICS_ADDR:.*)/{{- if hasPrefix "Enable" .Values.prometheus.expose }}\n$1\n{{- else }}\n METRICS_ADDR: ""\n{{- end }}/g' $TEMPLATE_DIR/verticadb-operator-manager-config-cm.yaml +perl -i -0777 -pe 's/(.*METRICS_ADDR:.*)/{{- if hasPrefix "Enable" .Values.prometheus.expose }}\n$1\n{{- else }}\n METRICS_ADDR: "0"\n{{- end }}/g' $TEMPLATE_DIR/verticadb-operator-manager-config-cm.yaml perl -i -0777 -pe 's/(.*ports:\n.*containerPort: 9443\n.*webhook-server.*\n.*)/$1\n{{- if hasPrefix "EnableWithoutAuth" .Values.prometheus.expose }}\n - name: metrics\n containerPort: 8443\n protocol: TCP\n{{- end }}/g' $TEMPLATE_DIR/verticadb-operator-manager-deployment.yaml # 14. Template the rbac container From cb5192d7159696e2c343736ea0f0839a0420d5e9 Mon Sep 17 00:00:00 2001 From: Cai Chen Date: Thu, 23 Jan 2025 20:59:49 +0000 Subject: [PATCH 04/15] fix3 --- DEVELOPER.md | 2 +- Makefile | 2 +- cmd/operator/main.go | 3 +- config/default/metrics_service.yaml | 6 ++-- .../tests/metrics-configmap_test.yaml | 6 ++-- scripts/template-helm-chart.sh | 30 +++++++++---------- 6 files changed, 24 insertions(+), 25 deletions(-) diff --git a/DEVELOPER.md b/DEVELOPER.md index 9553d9244..2d0bac6a8 100644 --- a/DEVELOPER.md +++ b/DEVELOPER.md @@ -686,7 +686,7 @@ kubectl logs -c vlogger ... - args: - --health-probe-bind-address=:8081 - - --metrics-bind-address=127.0.0.1:8080 + - --metrics-bind-address=127.0.0.1:8443 - --leader-elect - --health-probe-bind-address=:8081 - --enable-profiler diff --git a/Makefile b/Makefile index 740cd8a96..8fb1e7ddc 100644 --- a/Makefile +++ b/Makefile @@ -231,7 +231,7 @@ export VDB_MAX_BACKOFF_DURATION # # The address the operators Prometheus metrics endpoint binds to. Setting this # to 0 will disable metric serving. -METRICS_ADDR?=127.0.0.1:8080 +METRICS_ADDR?=127.0.0.1:8443 export METRICS_ADDR # # The minimum logging level. Valid values are: debug, info, warn, and error. diff --git a/cmd/operator/main.go b/cmd/operator/main.go index 7bc0a5a4b..58da32302 100644 --- a/cmd/operator/main.go +++ b/cmd/operator/main.go @@ -20,7 +20,6 @@ import ( "crypto/tls" "log" "os" - "strings" "time" // Allows us to pull in things generated from `go generate` @@ -287,7 +286,7 @@ func main() { TLSOpts: webhookTLSOpts, }) - secureMetrics := strings.HasSuffix(opcfg.GetMetricsAddr(), "8443") + secureMetrics := opcfg.GetMetricsAddr() == "127.0.0.1:8443" var metricCertDir string if opcfg.GetMetricsTLSSecret() != "" { metricCertDir = "/cert" diff --git a/config/default/metrics_service.yaml b/config/default/metrics_service.yaml index ca644dfe3..309d1802e 100644 --- a/config/default/metrics_service.yaml +++ b/config/default/metrics_service.yaml @@ -2,10 +2,10 @@ apiVersion: v1 kind: Service metadata: labels: - control-plane: controller-manager + control-plane: verticadb-operator app.kubernetes.io/name: verticadb-operator app.kubernetes.io/managed-by: kustomize - name: controller-manager-metrics-service + name: metrics-service namespace: system spec: ports: @@ -14,4 +14,4 @@ spec: protocol: TCP targetPort: 8443 selector: - control-plane: controller-manager \ No newline at end of file + control-plane: verticadb-operator \ No newline at end of file diff --git a/helm-charts/verticadb-operator/tests/metrics-configmap_test.yaml b/helm-charts/verticadb-operator/tests/metrics-configmap_test.yaml index 1ba3bdb3a..81b626737 100644 --- a/helm-charts/verticadb-operator/tests/metrics-configmap_test.yaml +++ b/helm-charts/verticadb-operator/tests/metrics-configmap_test.yaml @@ -2,7 +2,7 @@ suite: Metrics configmap tests templates: - verticadb-operator-manager-config-cm.yaml tests: - - it: should not include proxy sidecar if expose is without auth + - it: should not contain ip if expose is without auth set: prometheus: expose: EnableWithoutAuth @@ -10,11 +10,11 @@ tests: - equal: path: data.METRICS_ADDR value: :8443 - - it: should include proxy sidecar if expose is with auth + - it: should cotain ip if expose is with auth set: prometheus: expose: EnableWithAuthProxy asserts: - equal: path: data.METRICS_ADDR - value: 127.0.0.1:8080 + value: 127.0.0.1:8443 diff --git a/scripts/template-helm-chart.sh b/scripts/template-helm-chart.sh index d5e41ff0e..05e2f51f8 100755 --- a/scripts/template-helm-chart.sh +++ b/scripts/template-helm-chart.sh @@ -115,10 +115,10 @@ do done # 11. Template the prometheus metrics service -perl -i -pe 's/^/{{- if hasPrefix "Enable" .Values.prometheus.expose -}}\n/ if 1 .. 1' $TEMPLATE_DIR/verticadb-operator-controller-manager-metrics-service-svc.yaml -echo "{{- end }}" >> $TEMPLATE_DIR/verticadb-operator-controller-manager-metrics-service-svc.yaml +perl -i -pe 's/^/{{- if hasPrefix "Enable" .Values.prometheus.expose -}}\n/ if 1 .. 1' $TEMPLATE_DIR/verticadb-operator-metrics-service-svc.yaml +echo "{{- end }}" >> $TEMPLATE_DIR/verticadb-operator-metrics-service-svc.yaml -# 11. Template the roles/rolebindings for access to the rbac proxy +# 12. Template the roles/rolebindings for access to prometheus metrics for f in verticadb-operator-metrics-reader-cr.yaml do perl -i -pe 's/^/{{- if and (.Values.prometheus.createProxyRBAC) (eq .Values.prometheus.expose "EnableWithAuthProxy") -}}\n/ if 1 .. 1' $TEMPLATE_DIR/$f @@ -127,36 +127,36 @@ do perl -i -0777 -pe 's/-(proxy-role.*)/-{{ include "vdb-op.metricsRbacPrefix" . }}$1/g' $TEMPLATE_DIR/$f done -# 12. Template the ServiceMonitor object for Promtheus operator +# 13. Template the ServiceMonitor object for Promtheus operator perl -i -pe 's/^/{{- if .Values.prometheus.createServiceMonitor -}}\n/ if 1 .. 1' $TEMPLATE_DIR/verticadb-operator-metrics-monitor-servicemonitor.yaml echo "{{- end }}" >> $TEMPLATE_DIR/verticadb-operator-metrics-monitor-servicemonitor.yaml perl -i -0777 -pe 's/(.*endpoints:)/$1\n{{- if eq "EnableWithAuthProxy" .Values.prometheus.expose }}/g' $TEMPLATE_DIR/verticadb-operator-metrics-monitor-servicemonitor.yaml perl -i -0777 -pe 's/(.*insecureSkipVerify:.*)/$1\n{{- else }}\n - path: \/metrics\n port: metrics\n scheme: http\n{{- end }}/g' $TEMPLATE_DIR/verticadb-operator-metrics-monitor-servicemonitor.yaml -# 13. Template the metrics bind address +# 14. Template the metrics bind address perl -i -0777 -pe 's/(METRICS_TLS_SECRET: )(.*)/$1 "{{ .Values.prometheus.tlsSecret }}"/' $TEMPLATE_DIR/verticadb-operator-manager-config-cm.yaml -perl -i -0777 -pe 's/(METRICS_ADDR: )(.*)/$1 "{{ if eq "EnableWithAuthProxy" .Values.prometheus.expose }}127.0.0.1{{ end }}:{{ if eq "EnableWithAuthProxy" .Values.prometheus.expose }}8080{{ else }}8443{{ end }}"/' $TEMPLATE_DIR/verticadb-operator-manager-config-cm.yaml +perl -i -0777 -pe 's/(METRICS_ADDR: )(.*)/$1 "{{ if eq "EnableWithAuthProxy" .Values.prometheus.expose }}127.0.0.1{{ end }}:8443"/' $TEMPLATE_DIR/verticadb-operator-manager-config-cm.yaml perl -i -0777 -pe 's/(.*METRICS_ADDR:.*)/{{- if hasPrefix "Enable" .Values.prometheus.expose }}\n$1\n{{- else }}\n METRICS_ADDR: "0"\n{{- end }}/g' $TEMPLATE_DIR/verticadb-operator-manager-config-cm.yaml -perl -i -0777 -pe 's/(.*ports:\n.*containerPort: 9443\n.*webhook-server.*\n.*)/$1\n{{- if hasPrefix "EnableWithoutAuth" .Values.prometheus.expose }}\n - name: metrics\n containerPort: 8443\n protocol: TCP\n{{- end }}/g' $TEMPLATE_DIR/verticadb-operator-manager-deployment.yaml +perl -i -0777 -pe 's/(.*ports:\n.*containerPort: 9443\n.*webhook-server.*\n.*)/$1\n{{- if hasPrefix "Enable" .Values.prometheus.expose }}\n - name: metrics\n containerPort: 8443\n protocol: TCP\n{{- end }}/g' $TEMPLATE_DIR/verticadb-operator-manager-deployment.yaml -# 14. Template the rbac container +# 15. Template the rbac container perl -i -0777 -pe 's/(.*- args:.*\n.*secure)/{{- if eq .Values.prometheus.expose "EnableWithAuthProxy" }}\n$1/g' $TEMPLATE_DIR/verticadb-operator-manager-deployment.yaml # We need to put the matching end at the end of the container spec. perl -i -0777 -pe 's/(memory: 64Mi)/$1\n{{- end }}/g' $TEMPLATE_DIR/verticadb-operator-manager-deployment.yaml -# 15. Template places that refer to objects by name. Do this in all files. +# 16. Template places that refer to objects by name. Do this in all files. # In the config/ directory we hardcoded everything to start with # verticadb-operator. perl -i -0777 -pe 's/verticadb-operator/{{ include "vdb-op.name" . }}/g' $TEMPLATE_DIR/*yaml -# 16. Mount TLS certs in the rbac proxy +# 17. Mount TLS certs for prometheus metrics for f in $TEMPLATE_DIR/verticadb-operator-manager-deployment.yaml do perl -i -0777 -pe 's/(.*--v=[0-9]+)/$1\n{{- if not (empty .Values.prometheus.tlsSecret) }}\n - --tls-cert-file=\/cert\/tls.crt\n - --tls-private-key-file=\/cert\/tls.key\n - --client-ca-file=\/cert\/ca.crt\n{{- end }}/g' $f perl -i -0777 -pe 's/(volumes:)/$1\n{{- if not (empty .Values.prometheus.tlsSecret) }}\n - name: auth-cert\n secret:\n secretName: {{ .Values.prometheus.tlsSecret }}\n{{- end }}/g' $f done -# 17. Add pod scheduling options +# 18. Add pod scheduling options cat << EOF >> $TEMPLATE_DIR/verticadb-operator-manager-deployment.yaml {{- if .Values.nodeSelector }} nodeSelector: @@ -175,7 +175,7 @@ cat << EOF >> $TEMPLATE_DIR/verticadb-operator-manager-deployment.yaml {{- end }} EOF -# 18. Template the per-CR concurrency parameters +# 19. Template the per-CR concurrency parameters for f in $TEMPLATE_DIR/verticadb-operator-manager-config-cm.yaml do perl -i -0777 -pe 's/(CONCURRENCY_VERTICADB: ).*/$1\{\{ .Values.reconcileConcurrency.verticadb | quote \}\}/g' $f @@ -187,7 +187,7 @@ do perl -i -0777 -pe 's/(CONCURRENCY_VERTICAREPLICATOR: ).*/$1\{\{ .Values.reconcileConcurrency.verticareplicator | quote \}\}/g' $f done -# 19. Add permissions to manager ClusterRole to allow it to patch the CRD. This +# 20. Add permissions to manager ClusterRole to allow it to patch the CRD. This # is only needed if the webhook cert is generated by the operator or provided # by a Secret. cat << EOF >> $TEMPLATE_DIR/verticadb-operator-webhook-config-cr.yaml @@ -204,7 +204,7 @@ cat << EOF >> $TEMPLATE_DIR/verticadb-operator-webhook-config-cr.yaml {{- end }} EOF -# 20. Change change ClusterRoles/ClusterRoleBindings for the manager to be +# 21. Change change ClusterRoles/ClusterRoleBindings for the manager to be # Roles/RoleBindings if the operator is scoped to a single namespace. for f in $TEMPLATE_DIR/verticadb-operator-manager-clusterrolebinding-crb.yaml \ $TEMPLATE_DIR/verticadb-operator-manager-role-cr.yaml @@ -215,7 +215,7 @@ do echo "{{- end }}" >> $f done -# 21. Template the operator config +# 22. Template the operator config for fn in $TEMPLATE_DIR/verticadb-operator-manager-config-cm.yaml do perl -i -0777 -pe 's/(WEBHOOKS_ENABLED:).*/$1 {{ quote .Values.webhook.enable }}/g' $fn From f22c54a279f3cd9062293fa64dad60ec9cdd479d Mon Sep 17 00:00:00 2001 From: cchen-vertica Date: Thu, 23 Jan 2025 18:05:13 -0500 Subject: [PATCH 05/15] fix4 --- cmd/operator/main.go | 3 ++- config/default/manager_metrics_patch.yaml | 7 ++++++- scripts/template-helm-chart.sh | 1 + 3 files changed, 9 insertions(+), 2 deletions(-) diff --git a/cmd/operator/main.go b/cmd/operator/main.go index 58da32302..6159f8290 100644 --- a/cmd/operator/main.go +++ b/cmd/operator/main.go @@ -291,12 +291,13 @@ func main() { if opcfg.GetMetricsTLSSecret() != "" { metricCertDir = "/cert" } + // Metrics endpoint is enabled in 'config/default/kustomization.yaml'. The Metrics options configure the server. // More info: // - https://pkg.go.dev/sigs.k8s.io/controller-runtime@v0.18.4/pkg/metrics/server // - https://book.kubebuilder.io/reference/metrics.html metricsServerOptions := metricsserver.Options{ - BindAddress: opcfg.GetMetricsAddr(), + BindAddress: ":8443", SecureServing: secureMetrics, // TODO(user): TLSOpts is used to allow configuring the TLS config used for the server. If certificates are // not provided, self-signed certificates will be generated by default. This option is not recommended for diff --git a/config/default/manager_metrics_patch.yaml b/config/default/manager_metrics_patch.yaml index 488f13693..239dedeb8 100644 --- a/config/default/manager_metrics_patch.yaml +++ b/config/default/manager_metrics_patch.yaml @@ -1,4 +1,9 @@ # This patch adds the args to allow exposing the metrics endpoint using HTTPS - op: add path: /spec/template/spec/containers/0/args/0 - value: --metrics-bind-address=:8443 \ No newline at end of file + value: --metrics-bind-address=:8443 +- op: add + path: /spec/template/spec/containers/0/volumeMounts/1 + value: + mountPath: /cert + name: auth-cert \ No newline at end of file diff --git a/scripts/template-helm-chart.sh b/scripts/template-helm-chart.sh index 05e2f51f8..717682168 100755 --- a/scripts/template-helm-chart.sh +++ b/scripts/template-helm-chart.sh @@ -154,6 +154,7 @@ for f in $TEMPLATE_DIR/verticadb-operator-manager-deployment.yaml do perl -i -0777 -pe 's/(.*--v=[0-9]+)/$1\n{{- if not (empty .Values.prometheus.tlsSecret) }}\n - --tls-cert-file=\/cert\/tls.crt\n - --tls-private-key-file=\/cert\/tls.key\n - --client-ca-file=\/cert\/ca.crt\n{{- end }}/g' $f perl -i -0777 -pe 's/(volumes:)/$1\n{{- if not (empty .Values.prometheus.tlsSecret) }}\n - name: auth-cert\n secret:\n secretName: {{ .Values.prometheus.tlsSecret }}\n{{- end }}/g' $f + perl -i -0777 -pe 's/(.*- mountPath: .*\n.*name: auth-cert.*)/\{\{- if not (empty .Values.prometheus.tlsSecret) }}\n - mountPath: \/cert\n name: auth-cert\n{{- end }}/g' $f done # 18. Add pod scheduling options From 7af03ba090984a08542cf4d541046f68c3c1e352 Mon Sep 17 00:00:00 2001 From: cchen-vertica Date: Fri, 24 Jan 2025 11:46:48 -0500 Subject: [PATCH 06/15] fix5 --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index 8fb1e7ddc..9fc5e3dfe 100644 --- a/Makefile +++ b/Makefile @@ -782,7 +782,7 @@ $(OPM): chmod +x $(OPM) OPERATOR_SDK = $(shell pwd)/bin/operator-sdk -OPERATOR_SDK_VERSION = 1.28.0 +OPERATOR_SDK_VERSION = 1.38.0 operator-sdk: $(OPERATOR_SDK) ## Download operator-sdk locally if necessary $(OPERATOR_SDK): curl --silent --show-error --retry 10 --retry-max-time 1800 --location --fail "https://github.com/operator-framework/operator-sdk/releases/download/v$(OPERATOR_SDK_VERSION)/operator-sdk_$(GOOS)_$(GOARCH)" --output $(OPERATOR_SDK) From e0e4315f9ae9faad94addf47d3f0251334ab2749 Mon Sep 17 00:00:00 2001 From: cchen-vertica Date: Fri, 24 Jan 2025 11:51:08 -0500 Subject: [PATCH 07/15] added changie --- changes/unreleased/Changed-20250124-115055.yaml | 5 +++++ 1 file changed, 5 insertions(+) create mode 100644 changes/unreleased/Changed-20250124-115055.yaml diff --git a/changes/unreleased/Changed-20250124-115055.yaml b/changes/unreleased/Changed-20250124-115055.yaml new file mode 100644 index 000000000..b7d727344 --- /dev/null +++ b/changes/unreleased/Changed-20250124-115055.yaml @@ -0,0 +1,5 @@ +kind: Changed +body: Moved to operator-sdk v1.38.0 +time: 2025-01-24T11:50:55.710375605-05:00 +custom: + Issue: "1040" From f8ac1deec2bead080540d72cec2ec3fbbb035140 Mon Sep 17 00:00:00 2001 From: cchen-vertica Date: Fri, 24 Jan 2025 17:10:02 -0500 Subject: [PATCH 08/15] fix6 --- scripts/setup-kustomize.sh | 82 +++++++++++++++++++ scripts/wait-for-verticadb-steady-state.sh | 2 +- .../15-assert.yaml | 8 +- .../20-assert.yaml | 20 ++--- .../30-initiate-upgrade.yaml | 2 +- .../31-assert.yaml | 20 ++--- .../32-assert.yaml | 2 +- .../35-assert.yaml | 18 ++-- .../36-verify-non-replicatable-queries.yaml | 4 +- .../37-create-data-in-main-cluster.yaml | 2 +- .../39-verify-data-in-sandbox.yaml | 2 +- .../40-assert.yaml | 20 ++--- .../40-errors.yaml | 22 ++--- .../45-verify-new-main-cluster.yaml | 2 +- .../51-verify-new-connection-sessions.yaml | 4 +- .../55-assert.yaml | 2 +- .../55-kill-one-pod.yaml | 2 +- .../56-assert.yaml | 2 +- .../61-assert.yaml | 2 +- .../62-verify-traffic-routing.yaml | 8 +- .../setup-vdb/base/setup-vdb.yaml | 2 +- .../base/start-connection-sessions.yaml | 6 +- .../verify-new-main-cluster-connection.yaml | 2 +- 23 files changed, 159 insertions(+), 77 deletions(-) diff --git a/scripts/setup-kustomize.sh b/scripts/setup-kustomize.sh index e5f0d6d20..dbeebe217 100755 --- a/scripts/setup-kustomize.sh +++ b/scripts/setup-kustomize.sh @@ -285,6 +285,12 @@ replacements: targets: - select: kind: VerticaDB + name: v-client-proxy + fieldPaths: + - spec.proxy.image + - select: + kind: VerticaDB + name: v-client-proxy-upgrade fieldPaths: - spec.proxy.image - source: @@ -294,6 +300,82 @@ replacements: targets: - select: kind: VerticaDB + name: vertica-sample + fieldPaths: + - spec.sidecars.[name=vlogger].image + - select: + kind: VerticaDB + name: v-k-safety-0-scaling + fieldPaths: + - spec.sidecars.[name=vlogger].image + - select: + kind: VerticaDB + name: v-scale-up-and-down + fieldPaths: + - spec.sidecars.[name=vlogger].image + - select: + kind: VerticaDB + name: v-kill + fieldPaths: + - spec.sidecars.[name=vlogger].image + - select: + kind: VerticaDB + name: v-auto-restart-vertica + fieldPaths: + - spec.sidecars.[name=vlogger].image + - select: + kind: VerticaDB + name: v-client-proxy-upgrade + fieldPaths: + - spec.sidecars.[name=vlogger].image + - select: + kind: VerticaDB + name: v-mc-restart + fieldPaths: + - spec.sidecars.[name=vlogger].image + - select: + kind: VerticaDB + name: v-webhook + fieldPaths: + - spec.sidecars.[name=vlogger].image + - select: + kind: VerticaDB + name: v-managed + fieldPaths: + - spec.sidecars.[name=vlogger].image + - select: + kind: VerticaDB + name: v-pvc-expansion + fieldPaths: + - spec.sidecars.[name=vlogger].image + - select: + kind: VerticaDB + name: v-create-multi-sc + fieldPaths: + - spec.sidecars.[name=vlogger].image + - select: + kind: VerticaDB + name: v-revive-multi-sc + fieldPaths: + - spec.sidecars.[name=vlogger].image + - select: + kind: VerticaDB + name: v-verify-server-logrotate + fieldPaths: + - spec.sidecars.[name=vlogger].image + - select: + kind: VerticaDB + name: v-pending-pod + fieldPaths: + - spec.sidecars.[name=vlogger].image + - select: + kind: VerticaDB + name: v-restart-with-sidecars + fieldPaths: + - spec.sidecars.[name=vlogger].image + - select: + kind: VerticaDB + name: v-base-upgrade fieldPaths: - spec.sidecars.[name=vlogger].image EOF diff --git a/scripts/wait-for-verticadb-steady-state.sh b/scripts/wait-for-verticadb-steady-state.sh index 0dc262636..e72cd2f5c 100755 --- a/scripts/wait-for-verticadb-steady-state.sh +++ b/scripts/wait-for-verticadb-steady-state.sh @@ -59,7 +59,7 @@ then # All entries will have a key/value like this: # "verticadb": "kuttl-test-sterling-coyote/v-auto-restart", # We are going to look for the namespace portion. - VDB_FILTER="${@:$OPTIND:1}/" + VDB_FILTER="${@:$OPTIND:1}" else # No verticadb namespace, so include everything in the vdb filter VDB_FILTER="." diff --git a/tests/e2e-leg-11/online-upgrade-session-transfer/15-assert.yaml b/tests/e2e-leg-11/online-upgrade-session-transfer/15-assert.yaml index 34f31b0f5..c3f8dcb26 100644 --- a/tests/e2e-leg-11/online-upgrade-session-transfer/15-assert.yaml +++ b/tests/e2e-leg-11/online-upgrade-session-transfer/15-assert.yaml @@ -14,25 +14,25 @@ apiVersion: apps/v1 kind: StatefulSet metadata: - name: v-base-upgrade-pri1 + name: v-client-proxy-upgrade-pri1 status: currentReplicas: 2 --- apiVersion: apps/v1 kind: StatefulSet metadata: - name: v-base-upgrade-pri-2 + name: v-client-proxy-upgrade-pri-2 status: currentReplicas: 1 --- apiVersion: apps/v1 kind: StatefulSet metadata: - name: v-base-upgrade-sec1 + name: v-client-proxy-upgrade-sec1 status: currentReplicas: 3 --- apiVersion: vertica.com/v1 kind: VerticaDB metadata: - name: v-base-upgrade + name: v-client-proxy-upgrade diff --git a/tests/e2e-leg-11/online-upgrade-session-transfer/20-assert.yaml b/tests/e2e-leg-11/online-upgrade-session-transfer/20-assert.yaml index efbe1366c..e3b9ba503 100644 --- a/tests/e2e-leg-11/online-upgrade-session-transfer/20-assert.yaml +++ b/tests/e2e-leg-11/online-upgrade-session-transfer/20-assert.yaml @@ -14,7 +14,7 @@ apiVersion: apps/v1 kind: StatefulSet metadata: - name: v-base-upgrade-pri1 + name: v-client-proxy-upgrade-pri1 status: currentReplicas: 2 readyReplicas: 2 @@ -22,7 +22,7 @@ status: apiVersion: apps/v1 kind: StatefulSet metadata: - name: v-base-upgrade-pri-2 + name: v-client-proxy-upgrade-pri-2 status: currentReplicas: 1 readyReplicas: 1 @@ -30,7 +30,7 @@ status: apiVersion: apps/v1 kind: StatefulSet metadata: - name: v-base-upgrade-sec1 + name: v-client-proxy-upgrade-sec1 status: currentReplicas: 3 readyReplicas: 3 @@ -38,7 +38,7 @@ status: apiVersion: vertica.com/v1 kind: VerticaDB metadata: - name: v-base-upgrade + name: v-client-proxy-upgrade status: subclusters: - addedToDBCount: 3 @@ -51,7 +51,7 @@ status: apiVersion: apps/v1 kind: Deployment metadata: - name: v-base-upgrade-pri1-proxy + name: v-client-proxy-upgrade-pri1-proxy status: replicas: 1 readyReplicas: 1 @@ -59,12 +59,12 @@ status: apiVersion: v1 kind: ConfigMap metadata: - name: v-base-upgrade-pri1-proxy-cm + name: v-client-proxy-upgrade-pri1-proxy-cm --- apiVersion: apps/v1 kind: Deployment metadata: - name: v-base-upgrade-pri-2-proxy + name: v-client-proxy-upgrade-pri-2-proxy status: replicas: 1 readyReplicas: 1 @@ -72,13 +72,13 @@ status: apiVersion: v1 kind: ConfigMap metadata: - name: v-base-upgrade-pri-2-proxy-cm + name: v-client-proxy-upgrade-pri-2-proxy-cm immutable: false --- apiVersion: apps/v1 kind: Deployment metadata: - name: v-base-upgrade-sec1-proxy + name: v-client-proxy-upgrade-sec1-proxy status: replicas: 1 readyReplicas: 1 @@ -86,4 +86,4 @@ status: apiVersion: v1 kind: ConfigMap metadata: - name: v-base-upgrade-sec1-proxy-cm + name: v-client-proxy-upgrade-sec1-proxy-cm diff --git a/tests/e2e-leg-11/online-upgrade-session-transfer/30-initiate-upgrade.yaml b/tests/e2e-leg-11/online-upgrade-session-transfer/30-initiate-upgrade.yaml index 2b0413793..caf636ddf 100644 --- a/tests/e2e-leg-11/online-upgrade-session-transfer/30-initiate-upgrade.yaml +++ b/tests/e2e-leg-11/online-upgrade-session-transfer/30-initiate-upgrade.yaml @@ -14,4 +14,4 @@ apiVersion: kuttl.dev/v1beta1 kind: TestStep commands: - - command: ../../../scripts/patch-image-in-vdb.sh -n $NAMESPACE v-base-upgrade + - command: ../../../scripts/patch-image-in-vdb.sh -n $NAMESPACE v-client-proxy-upgrade diff --git a/tests/e2e-leg-11/online-upgrade-session-transfer/31-assert.yaml b/tests/e2e-leg-11/online-upgrade-session-transfer/31-assert.yaml index 84c94b14c..d8cec5419 100644 --- a/tests/e2e-leg-11/online-upgrade-session-transfer/31-assert.yaml +++ b/tests/e2e-leg-11/online-upgrade-session-transfer/31-assert.yaml @@ -14,7 +14,7 @@ apiVersion: apps/v1 kind: StatefulSet metadata: - name: v-base-upgrade-pri1-sb + name: v-client-proxy-upgrade-pri1-sb status: currentReplicas: 2 readyReplicas: 2 @@ -22,7 +22,7 @@ status: apiVersion: apps/v1 kind: StatefulSet metadata: - name: v-base-upgrade-pri-2-sb + name: v-client-proxy-upgrade-pri-2-sb status: currentReplicas: 1 readyReplicas: 1 @@ -30,7 +30,7 @@ status: apiVersion: apps/v1 kind: StatefulSet metadata: - name: v-base-upgrade-sec1-sb + name: v-client-proxy-upgrade-sec1-sb status: currentReplicas: 3 readyReplicas: 3 @@ -38,7 +38,7 @@ status: apiVersion: vertica.com/v1 kind: VerticaDB metadata: - name: v-base-upgrade + name: v-client-proxy-upgrade status: subclusters: - addedToDBCount: 3 @@ -57,7 +57,7 @@ status: apiVersion: apps/v1 kind: Deployment metadata: - name: v-base-upgrade-pri1-sb-proxy + name: v-client-proxy-upgrade-pri1-sb-proxy status: replicas: 1 readyReplicas: 1 @@ -65,12 +65,12 @@ status: apiVersion: v1 kind: ConfigMap metadata: - name: v-base-upgrade-pri1-sb-proxy-cm + name: v-client-proxy-upgrade-pri1-sb-proxy-cm --- apiVersion: apps/v1 kind: Deployment metadata: - name: v-base-upgrade-pri-2-sb-proxy + name: v-client-proxy-upgrade-pri-2-sb-proxy status: replicas: 1 readyReplicas: 1 @@ -78,13 +78,13 @@ status: apiVersion: v1 kind: ConfigMap metadata: - name: v-base-upgrade-pri-2-sb-proxy-cm + name: v-client-proxy-upgrade-pri-2-sb-proxy-cm immutable: false --- apiVersion: apps/v1 kind: Deployment metadata: - name: v-base-upgrade-sec1-sb-proxy + name: v-client-proxy-upgrade-sec1-sb-proxy status: replicas: 1 readyReplicas: 1 @@ -92,4 +92,4 @@ status: apiVersion: v1 kind: ConfigMap metadata: - name: v-base-upgrade-sec1-sb-proxy-cm + name: v-client-proxy-upgrade-sec1-sb-proxy-cm diff --git a/tests/e2e-leg-11/online-upgrade-session-transfer/32-assert.yaml b/tests/e2e-leg-11/online-upgrade-session-transfer/32-assert.yaml index 5ce597a8a..f8d614e24 100644 --- a/tests/e2e-leg-11/online-upgrade-session-transfer/32-assert.yaml +++ b/tests/e2e-leg-11/online-upgrade-session-transfer/32-assert.yaml @@ -19,4 +19,4 @@ source: involvedObject: apiVersion: vertica.com/v1 kind: VerticaDB - name: v-base-upgrade \ No newline at end of file + name: v-client-proxy-upgrade \ No newline at end of file diff --git a/tests/e2e-leg-11/online-upgrade-session-transfer/35-assert.yaml b/tests/e2e-leg-11/online-upgrade-session-transfer/35-assert.yaml index 563ea34b4..21ea5192e 100644 --- a/tests/e2e-leg-11/online-upgrade-session-transfer/35-assert.yaml +++ b/tests/e2e-leg-11/online-upgrade-session-transfer/35-assert.yaml @@ -14,9 +14,9 @@ apiVersion: apps/v1 kind: StatefulSet metadata: - name: v-base-upgrade-pri1-sb + name: v-client-proxy-upgrade-pri1-sb annotations: - vertica.com/statefulset-name-override: "v-base-upgrade-pri1-sb" + vertica.com/statefulset-name-override: "v-client-proxy-upgrade-pri1-sb" labels: vertica.com/sandbox: replica-group-b status: @@ -26,9 +26,9 @@ status: apiVersion: apps/v1 kind: StatefulSet metadata: - name: v-base-upgrade-pri-2-sb + name: v-client-proxy-upgrade-pri-2-sb annotations: - vertica.com/statefulset-name-override: "v-base-upgrade-pri-2-sb" + vertica.com/statefulset-name-override: "v-client-proxy-upgrade-pri-2-sb" labels: vertica.com/sandbox: replica-group-b status: @@ -38,9 +38,9 @@ status: apiVersion: apps/v1 kind: StatefulSet metadata: - name: v-base-upgrade-sec1-sb + name: v-client-proxy-upgrade-sec1-sb annotations: - vertica.com/statefulset-name-override: "v-base-upgrade-sec1-sb" + vertica.com/statefulset-name-override: "v-client-proxy-upgrade-sec1-sb" labels: vertica.com/sandbox: replica-group-b status: @@ -50,16 +50,16 @@ status: apiVersion: v1 kind: ConfigMap metadata: - name: v-base-upgrade-replica-group-b + name: v-client-proxy-upgrade-replica-group-b data: sandboxName: replica-group-b - verticaDBName: v-base-upgrade + verticaDBName: v-client-proxy-upgrade immutable: true --- apiVersion: vertica.com/v1 kind: VerticaDB metadata: - name: v-base-upgrade + name: v-client-proxy-upgrade spec: subclusters: - name: sec1 diff --git a/tests/e2e-leg-11/online-upgrade-session-transfer/36-verify-non-replicatable-queries.yaml b/tests/e2e-leg-11/online-upgrade-session-transfer/36-verify-non-replicatable-queries.yaml index 4f7f761ca..094dfd7c9 100644 --- a/tests/e2e-leg-11/online-upgrade-session-transfer/36-verify-non-replicatable-queries.yaml +++ b/tests/e2e-leg-11/online-upgrade-session-transfer/36-verify-non-replicatable-queries.yaml @@ -15,12 +15,12 @@ apiVersion: kuttl.dev/v1beta1 kind: TestStep commands: - script: | - val=$(kubectl exec -n $NAMESPACE v-base-upgrade-pri1-0 -c server -- vsql -tAc "alter database default set DataSSLMaxBufSize = 32768"); \ + val=$(kubectl exec -n $NAMESPACE v-client-proxy-upgrade-pri1-0 -c server -- vsql -tAc "alter database default set DataSSLMaxBufSize = 32768"); \ if [ $val == 0 ]; then \ exit 1; \ fi - script: | - val=$(kubectl exec -n $NAMESPACE v-base-upgrade-pri1-sb-0 -c server -- vsql -tAc "alter database default set DataSSLMaxBufSize = 32768"); \ + val=$(kubectl exec -n $NAMESPACE v-client-proxy-upgrade-pri1-sb-0 -c server -- vsql -tAc "alter database default set DataSSLMaxBufSize = 32768"); \ if [ $val == 1 ]; then \ exit 1; \ fi \ No newline at end of file diff --git a/tests/e2e-leg-11/online-upgrade-session-transfer/37-create-data-in-main-cluster.yaml b/tests/e2e-leg-11/online-upgrade-session-transfer/37-create-data-in-main-cluster.yaml index 86abf1b0b..4ee16023e 100644 --- a/tests/e2e-leg-11/online-upgrade-session-transfer/37-create-data-in-main-cluster.yaml +++ b/tests/e2e-leg-11/online-upgrade-session-transfer/37-create-data-in-main-cluster.yaml @@ -21,7 +21,7 @@ data: set -o errexit set -o xtrace - POD_NAME=v-base-upgrade-pri1-0 + POD_NAME=v-client-proxy-upgrade-pri1-0 kubectl exec $POD_NAME -i -c server -- bash -c "vsql -U dbadmin -w superuser -tAc \"CREATE TABLE public.test_table (val INTEGER);\"" kubectl exec $POD_NAME -i -c server -- bash -c "vsql -U dbadmin -w superuser -tAc \"INSERT INTO public.test_table VALUES (99); COMMIT;\"" diff --git a/tests/e2e-leg-11/online-upgrade-session-transfer/39-verify-data-in-sandbox.yaml b/tests/e2e-leg-11/online-upgrade-session-transfer/39-verify-data-in-sandbox.yaml index e4d7c5567..af83fc95b 100644 --- a/tests/e2e-leg-11/online-upgrade-session-transfer/39-verify-data-in-sandbox.yaml +++ b/tests/e2e-leg-11/online-upgrade-session-transfer/39-verify-data-in-sandbox.yaml @@ -21,7 +21,7 @@ data: set -o errexit set -o xtrace - POD_NAME=v-base-upgrade-pri1-sb-0 + POD_NAME=v-client-proxy-upgrade-pri1-sb-0 result=$(kubectl exec $POD_NAME -i -c server -- bash -c "vsql -U dbadmin -tAc \"SELECT * FROM public.test_table ORDER BY val;\"") echo "$result" | grep -Pzo "^99\n$" > /dev/null diff --git a/tests/e2e-leg-11/online-upgrade-session-transfer/40-assert.yaml b/tests/e2e-leg-11/online-upgrade-session-transfer/40-assert.yaml index ce4b346ae..99f5b6386 100644 --- a/tests/e2e-leg-11/online-upgrade-session-transfer/40-assert.yaml +++ b/tests/e2e-leg-11/online-upgrade-session-transfer/40-assert.yaml @@ -14,7 +14,7 @@ apiVersion: apps/v1 kind: StatefulSet metadata: - name: v-base-upgrade-pri1-sb + name: v-client-proxy-upgrade-pri1-sb status: currentReplicas: 2 readyReplicas: 2 @@ -22,7 +22,7 @@ status: apiVersion: apps/v1 kind: StatefulSet metadata: - name: v-base-upgrade-pri-2-sb + name: v-client-proxy-upgrade-pri-2-sb status: currentReplicas: 1 readyReplicas: 1 @@ -30,7 +30,7 @@ status: apiVersion: apps/v1 kind: StatefulSet metadata: - name: v-base-upgrade-sec1-sb + name: v-client-proxy-upgrade-sec1-sb status: currentReplicas: 3 readyReplicas: 3 @@ -38,7 +38,7 @@ status: apiVersion: vertica.com/v1 kind: VerticaDB metadata: - name: v-base-upgrade + name: v-client-proxy-upgrade spec: subclusters: - name: pri1 @@ -54,7 +54,7 @@ spec: apiVersion: apps/v1 kind: Deployment metadata: - name: v-base-upgrade-pri1-sb-proxy + name: v-client-proxy-upgrade-pri1-sb-proxy status: replicas: 1 readyReplicas: 1 @@ -62,12 +62,12 @@ status: apiVersion: v1 kind: ConfigMap metadata: - name: v-base-upgrade-pri1-sb-proxy-cm + name: v-client-proxy-upgrade-pri1-sb-proxy-cm --- apiVersion: apps/v1 kind: Deployment metadata: - name: v-base-upgrade-pri-2-sb-proxy + name: v-client-proxy-upgrade-pri-2-sb-proxy status: replicas: 1 readyReplicas: 1 @@ -75,13 +75,13 @@ status: apiVersion: v1 kind: ConfigMap metadata: - name: v-base-upgrade-pri-2-sb-proxy-cm + name: v-client-proxy-upgrade-pri-2-sb-proxy-cm immutable: false --- apiVersion: apps/v1 kind: Deployment metadata: - name: v-base-upgrade-sec1-sb-proxy + name: v-client-proxy-upgrade-sec1-sb-proxy status: replicas: 1 readyReplicas: 1 @@ -89,4 +89,4 @@ status: apiVersion: v1 kind: ConfigMap metadata: - name: v-base-upgrade-sec1-sb-proxy-cm + name: v-client-proxy-upgrade-sec1-sb-proxy-cm diff --git a/tests/e2e-leg-11/online-upgrade-session-transfer/40-errors.yaml b/tests/e2e-leg-11/online-upgrade-session-transfer/40-errors.yaml index ba6d547e6..cef04677d 100644 --- a/tests/e2e-leg-11/online-upgrade-session-transfer/40-errors.yaml +++ b/tests/e2e-leg-11/online-upgrade-session-transfer/40-errors.yaml @@ -14,31 +14,31 @@ apiVersion: apps/v1 kind: StatefulSet metadata: - name: v-base-upgrade-pri1 + name: v-client-proxy-upgrade-pri1 --- apiVersion: apps/v1 kind: StatefulSet metadata: - name: v-base-upgrade-pri-2 + name: v-client-proxy-upgrade-pri-2 --- apiVersion: apps/v1 kind: StatefulSet metadata: - name: v-base-upgrade-sec1 + name: v-client-proxy-upgrade-sec1 --- apiVersion: v1 kind: ConfigMap metadata: - name: v-base-upgrade-replica-group-b + name: v-client-proxy-upgrade-replica-group-b data: sandboxName: replica-group-b - verticaDBName: v-base-upgrade + verticaDBName: v-client-proxy-upgrade immutable: true --- apiVersion: apps/v1 kind: Deployment metadata: - name: v-base-upgrade-pri1-proxy + name: v-client-proxy-upgrade-pri1-proxy status: replicas: 1 readyReplicas: 1 @@ -46,12 +46,12 @@ status: apiVersion: v1 kind: ConfigMap metadata: - name: v-base-upgrade-pri1-proxy-cm + name: v-client-proxy-upgrade-pri1-proxy-cm --- apiVersion: apps/v1 kind: Deployment metadata: - name: v-base-upgrade-pri-2-proxy + name: v-client-proxy-upgrade-pri-2-proxy status: replicas: 1 readyReplicas: 1 @@ -59,13 +59,13 @@ status: apiVersion: v1 kind: ConfigMap metadata: - name: v-base-upgrade-pri-2-proxy-cm + name: v-client-proxy-upgrade-pri-2-proxy-cm immutable: false --- apiVersion: apps/v1 kind: Deployment metadata: - name: v-base-upgrade-sec1-proxy + name: v-client-proxy-upgrade-sec1-proxy status: replicas: 1 readyReplicas: 1 @@ -73,4 +73,4 @@ status: apiVersion: v1 kind: ConfigMap metadata: - name: v-base-upgrade-sec1-proxy-cm + name: v-client-proxy-upgrade-sec1-proxy-cm diff --git a/tests/e2e-leg-11/online-upgrade-session-transfer/45-verify-new-main-cluster.yaml b/tests/e2e-leg-11/online-upgrade-session-transfer/45-verify-new-main-cluster.yaml index bac5dc29d..2cbca2c08 100644 --- a/tests/e2e-leg-11/online-upgrade-session-transfer/45-verify-new-main-cluster.yaml +++ b/tests/e2e-leg-11/online-upgrade-session-transfer/45-verify-new-main-cluster.yaml @@ -21,7 +21,7 @@ data: set -o errexit set -o xtrace - POD_NAMES=("v-base-upgrade-pri1-sb-0" "v-base-upgrade-pri-2-sb-0" "v-base-upgrade-sec1-sb-0") + POD_NAMES=("v-client-proxy-upgrade-pri1-sb-0" "v-client-proxy-upgrade-pri-2-sb-0" "v-client-proxy-upgrade-sec1-sb-0") for POD_NAME in "${POD_NAMES[@]}"; do result=$(kubectl exec $POD_NAME -i -c server -- bash -c "vsql -U dbadmin -tAc \"SELECT COUNT(*) FROM nodes WHERE node_state = 'UP' and subcluster_name not like '%sb' and sandbox = '';\"") diff --git a/tests/e2e-leg-11/online-upgrade-session-transfer/51-verify-new-connection-sessions.yaml b/tests/e2e-leg-11/online-upgrade-session-transfer/51-verify-new-connection-sessions.yaml index 821e4b7d8..d69774e07 100644 --- a/tests/e2e-leg-11/online-upgrade-session-transfer/51-verify-new-connection-sessions.yaml +++ b/tests/e2e-leg-11/online-upgrade-session-transfer/51-verify-new-connection-sessions.yaml @@ -22,9 +22,9 @@ data: set -o xtrace # get ips of proxy pods - PROXY_POD_IP=$(kubectl get pod -l pod-template-hash,vertica.com/deployment-selector-name=v-base-upgrade-sec1-sb-proxy -o jsonpath='{.items[0].status.podIP}') + PROXY_POD_IP=$(kubectl get pod -l pod-template-hash,vertica.com/deployment-selector-name=v-client-proxy-upgrade-sec1-sb-proxy -o jsonpath='{.items[0].status.podIP}') - POD_NAME=v-base-upgrade-sec1-sb-0 + POD_NAME=v-client-proxy-upgrade-sec1-sb-0 # verify the connection started at step 37 still continues LAST_CONN_1=$(kubectl exec $POD_NAME -i -c server -- bash -c "vsql -U user1 -w 'user1s3cr3t' -tAc \"select ts from user1_schema.test order by ts desc limit 1;\"") diff --git a/tests/e2e-leg-11/online-upgrade-session-transfer/55-assert.yaml b/tests/e2e-leg-11/online-upgrade-session-transfer/55-assert.yaml index 71dd5a0c2..e143c7820 100644 --- a/tests/e2e-leg-11/online-upgrade-session-transfer/55-assert.yaml +++ b/tests/e2e-leg-11/online-upgrade-session-transfer/55-assert.yaml @@ -14,7 +14,7 @@ apiVersion: apps/v1 kind: StatefulSet metadata: - name: v-base-upgrade-sec1-sb + name: v-client-proxy-upgrade-sec1-sb status: currentReplicas: 3 readyReplicas: 2 diff --git a/tests/e2e-leg-11/online-upgrade-session-transfer/55-kill-one-pod.yaml b/tests/e2e-leg-11/online-upgrade-session-transfer/55-kill-one-pod.yaml index 9a93fc9be..2bd3f00cf 100644 --- a/tests/e2e-leg-11/online-upgrade-session-transfer/55-kill-one-pod.yaml +++ b/tests/e2e-leg-11/online-upgrade-session-transfer/55-kill-one-pod.yaml @@ -14,5 +14,5 @@ apiVersion: kuttl.dev/v1beta1 kind: TestStep commands: - - command: kubectl delete pod v-base-upgrade-sec1-sb-0 + - command: kubectl delete pod v-client-proxy-upgrade-sec1-sb-0 namespaced: true diff --git a/tests/e2e-leg-11/online-upgrade-session-transfer/56-assert.yaml b/tests/e2e-leg-11/online-upgrade-session-transfer/56-assert.yaml index cf86ae9b8..f0b2b569e 100644 --- a/tests/e2e-leg-11/online-upgrade-session-transfer/56-assert.yaml +++ b/tests/e2e-leg-11/online-upgrade-session-transfer/56-assert.yaml @@ -14,7 +14,7 @@ apiVersion: apps/v1 kind: StatefulSet metadata: - name: v-base-upgrade-sec1-sb + name: v-client-proxy-upgrade-sec1-sb status: currentReplicas: 3 readyReplicas: 3 diff --git a/tests/e2e-leg-11/online-upgrade-session-transfer/61-assert.yaml b/tests/e2e-leg-11/online-upgrade-session-transfer/61-assert.yaml index 8fc369f16..2cb9376b2 100644 --- a/tests/e2e-leg-11/online-upgrade-session-transfer/61-assert.yaml +++ b/tests/e2e-leg-11/online-upgrade-session-transfer/61-assert.yaml @@ -14,7 +14,7 @@ apiVersion: apps/v1 kind: Deployment metadata: - name: v-base-upgrade-pri1-sb-proxy + name: v-client-proxy-upgrade-pri1-sb-proxy status: replicas: 1 readyReplicas: 1 diff --git a/tests/e2e-leg-11/online-upgrade-session-transfer/62-verify-traffic-routing.yaml b/tests/e2e-leg-11/online-upgrade-session-transfer/62-verify-traffic-routing.yaml index b1c6683b4..69656397c 100644 --- a/tests/e2e-leg-11/online-upgrade-session-transfer/62-verify-traffic-routing.yaml +++ b/tests/e2e-leg-11/online-upgrade-session-transfer/62-verify-traffic-routing.yaml @@ -22,12 +22,12 @@ data: set -o xtrace # get ips of proxy pods - PROXY_POD_IP=$(kubectl get pod -l pod-template-hash,vertica.com/deployment-selector-name=v-base-upgrade-pri1-sb-proxy -o jsonpath='{.items[0].status.podIP}') + PROXY_POD_IP=$(kubectl get pod -l pod-template-hash,vertica.com/deployment-selector-name=v-client-proxy-upgrade-pri1-sb-proxy -o jsonpath='{.items[0].status.podIP}') # use vsql to connect to service "pri1" inside a vertica pod and get the client ip - NAMESPACE=$(kubectl get pod v-base-upgrade-pri1-sb-0 -o=jsonpath='{.metadata.namespace}') - SERVICE_DNS=v-base-upgrade-pri1.${NAMESPACE}.svc.cluster.local - CLIENT_IP=$(kubectl exec v-base-upgrade-pri1-sb-0 -i -c server -- bash -c "vsql -h $SERVICE_DNS -U dbadmin -tAc 'SELECT client_hostname FROM current_session;'" | cut -d':' -f1) + NAMESPACE=$(kubectl get pod v-client-proxy-upgrade-pri1-sb-0 -o=jsonpath='{.metadata.namespace}') + SERVICE_DNS=v-client-proxy-upgrade-pri1.${NAMESPACE}.svc.cluster.local + CLIENT_IP=$(kubectl exec v-client-proxy-upgrade-pri1-sb-0 -i -c server -- bash -c "vsql -h $SERVICE_DNS -U dbadmin -tAc 'SELECT client_hostname FROM current_session;'" | cut -d':' -f1) # verify the connection is made by proxy pods if [[ "$CLIENT_IP" != "$PROXY_POD_IP" ]]; then diff --git a/tests/e2e-leg-11/online-upgrade-session-transfer/setup-vdb/base/setup-vdb.yaml b/tests/e2e-leg-11/online-upgrade-session-transfer/setup-vdb/base/setup-vdb.yaml index 730730634..6bf3430b6 100644 --- a/tests/e2e-leg-11/online-upgrade-session-transfer/setup-vdb/base/setup-vdb.yaml +++ b/tests/e2e-leg-11/online-upgrade-session-transfer/setup-vdb/base/setup-vdb.yaml @@ -14,7 +14,7 @@ apiVersion: vertica.com/v1 kind: VerticaDB metadata: - name: v-base-upgrade + name: v-client-proxy-upgrade annotations: vertica.com/include-uid-in-path: true vertica.com/online-upgrade-preferred-sandbox: "replica-group-b" diff --git a/tests/e2e-leg-11/online-upgrade-session-transfer/start-connection-sessions/base/start-connection-sessions.yaml b/tests/e2e-leg-11/online-upgrade-session-transfer/start-connection-sessions/base/start-connection-sessions.yaml index c818b4e36..66f5f1c9e 100644 --- a/tests/e2e-leg-11/online-upgrade-session-transfer/start-connection-sessions/base/start-connection-sessions.yaml +++ b/tests/e2e-leg-11/online-upgrade-session-transfer/start-connection-sessions/base/start-connection-sessions.yaml @@ -22,11 +22,11 @@ data: set -o xtrace # Access to subclusters through seconday service should route to secondary subcluster client proxy pod - vsql -U dbadmin -h v-base-upgrade-sec1 -tAc "CREATE USER user1 IDENTIFIED BY 'user1s3cr3t'; CREATE SCHEMA user1_schema; GRANT ALL ON SCHEMA user1_schema TO user1;" - vsql -U user1 -w 'user1s3cr3t' -h v-base-upgrade-sec1 -tAc 'create table user1_schema.test (ts timestamp, sc_name varchar, node_name varchar, version varchar);' + vsql -U dbadmin -h v-client-proxy-upgrade-sec1 -tAc "CREATE USER user1 IDENTIFIED BY 'user1s3cr3t'; CREATE SCHEMA user1_schema; GRANT ALL ON SCHEMA user1_schema TO user1;" + vsql -U user1 -w 'user1s3cr3t' -h v-client-proxy-upgrade-sec1 -tAc 'create table user1_schema.test (ts timestamp, sc_name varchar, node_name varchar, version varchar);' # Start new connection every 2 seconds - while [ 1 ]; do date; vsql -U user1 -w 'user1s3cr3t' -h v-base-upgrade-sec1 -c "insert into user1_schema.test values ( sysdate(), current_subcluster_name(), local_node_name(), version()); commit;"; sleep 2; done + while [ 1 ]; do date; vsql -U user1 -w 'user1s3cr3t' -h v-client-proxy-upgrade-sec1 -c "insert into user1_schema.test values ( sysdate(), current_subcluster_name(), local_node_name(), version()); commit;"; sleep 2; done --- apiVersion: batch/v1 kind: Job diff --git a/tests/e2e-leg-11/online-upgrade-session-transfer/verify-new-main-cluster-connection/base/verify-new-main-cluster-connection.yaml b/tests/e2e-leg-11/online-upgrade-session-transfer/verify-new-main-cluster-connection/base/verify-new-main-cluster-connection.yaml index a889ae285..c9cfa7359 100644 --- a/tests/e2e-leg-11/online-upgrade-session-transfer/verify-new-main-cluster-connection/base/verify-new-main-cluster-connection.yaml +++ b/tests/e2e-leg-11/online-upgrade-session-transfer/verify-new-main-cluster-connection/base/verify-new-main-cluster-connection.yaml @@ -21,7 +21,7 @@ data: set -o errexit set -o xtrace # Access to subclusters through secondary service should route to the new main cluster. - CONNECTION_NODE=$(vsql -U dbadmin -h v-base-upgrade-sec1 -tAc "select node_name from current_session") + CONNECTION_NODE=$(vsql -U dbadmin -h v-client-proxy-upgrade-sec1 -tAc "select node_name from current_session") echo $CONNECTION_NODE if [[ $CONNECTION_NODE == "v_repup_node0010" ]] || \ [[ $CONNECTION_NODE == "v_repup_node0011" ]] || \ From 40d0c0b3b160df9ff6e4727d0d4270b895890397 Mon Sep 17 00:00:00 2001 From: Cai Chen Date: Sun, 26 Jan 2025 02:42:34 +0000 Subject: [PATCH 09/15] fixed olm deploy --- config/default/manager_metrics_patch.yaml | 8 +- ...ticadb-operator.clusterserviceversion.yaml | 2363 ++++++++++------- pkg/opcfg/config.go | 10 +- scripts/gen-csv.sh | 4 + scripts/setup-olm.sh | 2 +- scripts/template-helm-chart.sh | 2 +- 6 files changed, 1465 insertions(+), 924 deletions(-) diff --git a/config/default/manager_metrics_patch.yaml b/config/default/manager_metrics_patch.yaml index 239dedeb8..77f0d50f6 100644 --- a/config/default/manager_metrics_patch.yaml +++ b/config/default/manager_metrics_patch.yaml @@ -6,4 +6,10 @@ path: /spec/template/spec/containers/0/volumeMounts/1 value: mountPath: /cert - name: auth-cert \ No newline at end of file + name: auth-cert +- op: add + path: /spec/template/spec/volumes/0 + value: + name: auth-cert + secret: + secretName: custom-cert \ No newline at end of file diff --git a/config/manifests/bases/verticadb-operator.clusterserviceversion.yaml b/config/manifests/bases/verticadb-operator.clusterserviceversion.yaml index 1ca211b76..954f58a41 100644 --- a/config/manifests/bases/verticadb-operator.clusterserviceversion.yaml +++ b/config/manifests/bases/verticadb-operator.clusterserviceversion.yaml @@ -35,7 +35,8 @@ spec: name: "" version: batch/v1 specDescriptors: - - description: List of things that must be matched in order for the Job to be + - description: |- + List of things that must be matched in order for the Job to be created. Multiple matches are combined with AND logic. displayName: Matches path: matches @@ -51,8 +52,9 @@ spec: - description: Objects that this event trigger will apply too. displayName: References path: references - - description: A single object, given by GVK + namespace + name, that this event - trigger will apply too. + - description: |- + A single object, given by GVK + namespace + name, that this event trigger + will apply too. displayName: Object path: references[0].object - description: The API version of the reference object @@ -61,32 +63,38 @@ spec: - description: The kind of the reference object displayName: Kind path: references[0].object.kind - - description: The name of the reference object. This doesn't have to exist - prior to creating the CR. + - description: |- + The name of the reference object. This doesn't have to exist prior to + creating the CR. displayName: Name path: references[0].object.name - description: The namespace that the reference object exists in. displayName: Namespace path: references[0].object.namespace - - description: A template of a Job that will get created when the conditions - are met for any reference object. + - description: |- + A template of a Job that will get created when the conditions are met for + any reference object. displayName: Template path: template - - description: The job's object meta data. At a minimum, the name or generateName - must be set. + - description: |- + The job's object meta data. At a minimum, the name or generateName must + be set. displayName: Metadata path: template.metadata - - description: Annotations is an unstructured key value map stored with a resource - that may be set by external tools to store and retrieve arbitrary metadata. - They are not queryable and should be preserved when modifying objects. + - description: |- + Annotations is an unstructured key value map stored with a resource that may be + set by external tools to store and retrieve arbitrary metadata. They are not + queryable and should be preserved when modifying objects. displayName: Annotations path: template.metadata.annotations - - description: GenerateName is an optional prefix, used by the server, to generate - a unique name ONLY IF the Name field has not been provided. + - description: |- + GenerateName is an optional prefix, used by the server, to generate a unique + name ONLY IF the Name field has not been provided. displayName: Generate Name path: template.metadata.generateName - - description: Map of string keys and values that can be used to organize and - categorize (scope and select) objects. + - description: |- + Map of string keys and values that can be used to organize and categorize + (scope and select) objects. displayName: Labels path: template.metadata.labels - description: Name must be unique within a namespace. Can be omitted if GenerateName @@ -103,13 +111,15 @@ spec: - description: The API version of the reference object displayName: APIVersion path: references[0].apiVersion - - description: If a job was created because a match was found for this reference - object, this is the name of the last job that was created. This pairs with - the jobNamespace parameter to uniquely identify the job. + - description: |- + If a job was created because a match was found for this reference object, + this is the name of the last job that was created. This pairs with the + jobNamespace parameter to uniquely identify the job. displayName: Job Name path: references[0].jobName - - description: If a job was created because a match was found for this reference - object, this is the namespace the job is found in. This pairs with the jobName + - description: |- + If a job was created because a match was found for this reference object, + this is the namespace the job is found in. This pairs with the jobName parameter to uniquely identify the job. displayName: Job Namespace path: references[0].jobNamespace @@ -120,8 +130,9 @@ spec: - description: The kind of the reference object displayName: Kind path: references[0].kind - - description: The name of the reference object. This doesn't have to exist - prior to creating the CR. + - description: |- + The name of the reference object. This doesn't have to exist prior to + creating the CR. displayName: Name path: references[0].name - description: The namespace that the reference object exists in. @@ -134,7 +145,8 @@ spec: displayName: UID path: references[0].uid version: v1beta1 - - description: VerticaAutoscaler is a CR that allows you to autoscale one or more + - description: |- + VerticaAutoscaler is a CR that allows you to autoscale one or more subclusters in a VerticaDB. displayName: Vertica Autoscaler kind: VerticaAutoscaler @@ -144,46 +156,83 @@ spec: name: "" version: vertica.com/v1beta1 specDescriptors: - - description: 'This defines how the scaling will happen. This can be one of - the following: - Subcluster: Scaling will be achieved by creating or deleting - entire subclusters. The template for new subclusters are either the template - if filled out or an existing subcluster that matches the service name. - - Pod: Only increase or decrease the size of an existing subcluster. If multiple - subclusters are selected by the serviceName, this will grow the last subcluster - only.' + - description: |- + This struct allows customization of autoscaling. Custom metrics can be used instead of the memory and cpu metrics. + The scaling behavior can also be customized to meet different performance requirements. The maximum and mininum of + sizes of the replica sets can be specified to limit the use of resources. + displayName: Custom Autoscaler + path: customAutoscaler + - description: Specifies the scaling behavior for both scale up and down. + displayName: Behavior + path: customAutoscaler.behavior + - description: The maximum number of pods when scaling. + displayName: Max Replicas + path: customAutoscaler.maxReplicas + - description: The custom metric definition to be used for autocaling. + displayName: Metrics + path: customAutoscaler.metrics + - description: The custom metric to be used for autocaling. + displayName: Metric + path: customAutoscaler.metrics[0].metric + - description: |- + The threshold to use for scaling down. It must be of the same type as + the one used for scaling up, defined in the metric field. + displayName: Scale Down Threshold + path: customAutoscaler.metrics[0].scaleDownThreshold + - description: The value used to increase the threshold after a scale up or + a scale down. + displayName: Threshold Adjustment Value + path: customAutoscaler.metrics[0].thresholdAdjustmentValue + - description: The miminum number of pods when scaling. + displayName: Min Replicas + path: customAutoscaler.minReplicas + - description: |- + This defines how the scaling will happen. This can be one of the following: + - Subcluster: Scaling will be achieved by creating or deleting entire subclusters. + The template for new subclusters are either the template if filled out + or an existing subcluster that matches the service name. + - Pod: Only increase or decrease the size of an existing subcluster. + If multiple subclusters are selected by the serviceName, this will grow + the last subcluster only. displayName: Scaling Granularity path: scalingGranularity x-descriptors: - urn:alm:descriptor:com.tectonic.ui:select:Pod - urn:alm:descriptor:com.tectonic.ui:select:Subcluster - - description: This acts as a selector for the subclusters that are being scaled - together. Each subcluster has a service name field, which if omitted is - the same name as the subcluster name. Multiple subclusters that have the - same service name use the same service object. + - description: |- + This acts as a selector for the subclusters that are being scaled together. + Each subcluster has a service name field, which if omitted is the same + name as the subcluster name. Multiple subclusters that have the same + service name use the same service object. displayName: Service Name path: serviceName x-descriptors: - urn:alm:descriptor:com.tectonic.ui:text - - description: This is the total pod count for all subclusters that match the - serviceName. Changing this value may trigger a change in the VerticaDB - that is associated with this object. This value is generally left as zero. It - will get initialized in the operator and then modified via the /scale subresource. + - description: |- + This is the total pod count for all subclusters that match the + serviceName. Changing this value may trigger a change in the + VerticaDB that is associated with this object. This value is generally + left as zero. It will get initialized in the operator and then modified + via the /scale subresource. displayName: Target Size path: targetSize x-descriptors: - urn:alm:descriptor:com.tectonic.ui:podCount - - description: When the scaling granularity is Subcluster, this field defines - a template to use for when a new subcluster needs to be created. If size - is 0, then the operator will use an existing subcluster to use as the template. If + - description: |- + When the scaling granularity is Subcluster, this field defines a template + to use for when a new subcluster needs to be created. If size is 0, then + the operator will use an existing subcluster to use as the template. If size is > 0, the service name must match the serviceName parameter. The name of the new subcluster is always auto generated. If the name is set here it will be used as a prefix for the new subcluster. Otherwise, we - use the name of this VerticaAutoscaler object as a prefix for all subclusters. + use the name of this VerticaAutoscaler object as a prefix for all + subclusters. displayName: Template path: template - - description: 'Like nodeSelector this allows you to constrain the pod only - to certain pods. It is more expressive than just using node selectors. More - info: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#affinity-and-anti-affinity' + - description: |- + Like nodeSelector this allows you to constrain the pod only to certain + pods. It is more expressive than just using node selectors. + More info: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#affinity-and-anti-affinity displayName: Affinity path: template.affinity - description: Describes node affinity scheduling rules for the pod. @@ -203,79 +252,114 @@ spec: path: template.affinity.podAntiAffinity x-descriptors: - urn:alm:descriptor:com.tectonic.ui:podAntiAffinity - - description: A map of key/value pairs appended to the stateful metadata.annotations - of the subcluster. + - description: |- + A map of key/value pairs appended to the stateful metadata.annotations of + the subcluster. displayName: Annotations path: template.annotations - - description: 'Allows the service object to be attached to a list of external - IPs that you specify. If not set, the external IP list is left empty in - the service object. More info: https://kubernetes.io/docs/concepts/services-networking/service/#external-ips' + - description: |- + Allows the service object to be attached to a list of external IPs that you + specify. If not set, the external IP list is left empty in the service object. + More info: https://kubernetes.io/docs/concepts/services-networking/service/#external-ips displayName: External IPs path: template.externalIPs x-descriptors: - urn:alm:descriptor:com.tectonic.ui:advanced - - description: This allows a different image to be used for the subcluster than - the one in VerticaDB. This is intended to be used internally by the online - image change process. + - description: |- + This allows a different image to be used for the subcluster than the one + in VerticaDB. This is intended to be used internally by the online image + change process. displayName: Image Override path: template.imageOverride x-descriptors: - urn:alm:descriptor:com.tectonic.ui:hidden - - description: Indicates whether the subcluster is a primary or secondary. You - must have at least one primary subcluster in the database. + - description: |- + Indicates whether the subcluster is a primary or secondary. You must have + at least one primary subcluster in the database. displayName: Is Primary path: template.isPrimary x-descriptors: - urn:alm:descriptor:com.tectonic.ui:booleanSwitch - - description: A sandbox primary subcluster is a secondary subcluster that was - the first subcluster in a sandbox. These subclusters are primaries when - they are sandboxed. When unsandboxed, they will go back to being just a - secondary subcluster + - description: |- + A sandbox primary subcluster is a secondary subcluster that was the first + subcluster in a sandbox. These subclusters are primaries when they are + sandboxed. When unsandboxed, they will go back to being just a secondary + subcluster displayName: Is Sandbox Primary path: template.isSandboxPrimary x-descriptors: - urn:alm:descriptor:com.tectonic.ui:hidden - - description: Internal state that indicates whether this is a transient read-only - subcluster used for online upgrade. A subcluster that exists temporarily - to serve traffic for subclusters that are restarting with the new image. + - description: |- + Internal state that indicates whether this is a transient read-only + subcluster used for online upgrade. A subcluster that exists + temporarily to serve traffic for subclusters that are restarting with the + new image. displayName: Is Transient path: template.isTransient x-descriptors: - urn:alm:descriptor:com.tectonic.ui:hidden - - description: 'Specify IP address of LoadBalancer service for this subcluster. - This field is ignored when serviceType != "LoadBalancer". More info: https://kubernetes.io/docs/concepts/services-networking/service/#loadbalancer' + - description: |- + Specify IP address of LoadBalancer service for this subcluster. + This field is ignored when serviceType != "LoadBalancer". + More info: https://kubernetes.io/docs/concepts/services-networking/service/#loadbalancer displayName: Load Balancer IP path: template.loadBalancerIP x-descriptors: - urn:alm:descriptor:com.tectonic.ui:advanced - - description: The name of the subcluster. This is a required parameter. This - cannot change after CRD creation. + - description: |- + The name of the subcluster. This is a required parameter. This cannot + change after CRD creation. displayName: Name path: template.name - - description: When setting serviceType to NodePort, this parameter allows you - to define the port that is opened at each node for Vertica client connections. - If using NodePort and this is omitted, Kubernetes will choose the port automatically. - This port must be from within the defined range allocated by the control - plane (default is 30000-32767). + - description: |- + When setting serviceType to NodePort, this parameter allows you to define the + port that is opened at each node for Vertica client connections. If using + NodePort and this is omitted, Kubernetes will choose the port + automatically. This port must be from within the defined range allocated + by the control plane (default is 30000-32767). displayName: Node Port path: template.nodePort x-descriptors: - urn:alm:descriptor:com.tectonic.ui:number - urn:alm:descriptor:com.tectonic.ui:advanced - - description: 'A map of label keys and values to restrict Vertica node scheduling - to workers with matching labels. More info: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodeselector' + - description: |- + A map of label keys and values to restrict Vertica node scheduling to workers + with matching labels. + More info: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodeselector displayName: Node Selector path: template.nodeSelector - - description: 'The priority class name given to pods in this subcluster. This - affects where the pod gets scheduled. More info: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/#priorityclass' + - description: |- + The priority class name given to pods in this subcluster. This affects + where the pod gets scheduled. + More info: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/#priorityclass displayName: Priority Class Name path: template.priorityClassName x-descriptors: - urn:alm:descriptor:com.tectonic.ui:advanced - - description: 'This defines the resource requests and limits for pods in the - subcluster. It is advisable that the request and limits match as this ensures - the pods are assigned to the guaranteed QoS class. This will reduces the - chance that pods are chosen by the OOM killer. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + - description: |- + Create client proxy pods for the subcluster if defined + All incoming connections to the subclusters will be routed through the proxy pods + displayName: Proxy + path: template.proxy + - description: The number of replicas that the proxy server will have. + displayName: Replicas + path: template.proxy.replicas + - description: |- + This defines the resource requests and limits for the client proxy pods in the subcluster. + It is advisable that the request and limits match as this ensures the + pods are assigned to the guaranteed QoS class. This will reduces the + chance that pods are chosen by the OOM killer. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + displayName: Resources + path: template.proxy.resources + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:resourceRequirements + - description: |- + This defines the resource requests and limits for pods in the subcluster. + It is advisable that the request and limits match as this ensures the + pods are assigned to the guaranteed QoS class. This will reduces the + chance that pods are chosen by the OOM killer. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ displayName: Resources path: template.resources x-descriptors: @@ -283,61 +367,75 @@ spec: - description: A map of key/value pairs appended to service metadata.annotations. displayName: Service Annotations path: template.serviceAnnotations - - description: Identifies the name of the service object that will serve this - subcluster. If multiple subclusters share the same service name then they - all share the same service object. This allows for a single service object - to round robin between multiple subclusters. If this is left blank, a service - object matching the subcluster name is used. The actual name of the service - object is always prefixed with the name of the owning VerticaDB. + - description: |- + Identifies the name of the service object that will serve this + subcluster. If multiple subclusters share the same service name then + they all share the same service object. This allows for a single service + object to round robin between multiple subclusters. If this is left + blank, a service object matching the subcluster name is used. The actual + name of the service object is always prefixed with the name of the owning + VerticaDB. displayName: Service Name path: template.serviceName x-descriptors: - urn:alm:descriptor:com.tectonic.ui:advanced - - description: 'Identifies the type of Kubernetes service to use for external - client connectivity. The default is to use a ClusterIP, which sets a stable - IP and port to use that is accessible only from within Kubernetes itself. - Depending on the service type chosen the user may need to set other config - knobs to further config it. These other knobs follow this one. More info: - https://kubernetes.io/docs/concepts/services-networking/service/#publishing-services-service-types' + - description: |- + Identifies the type of Kubernetes service to use for external client + connectivity. The default is to use a ClusterIP, which sets a stable IP + and port to use that is accessible only from within Kubernetes itself. + Depending on the service type chosen the user may need to set other + config knobs to further config it. These other knobs follow this one. + More info: https://kubernetes.io/docs/concepts/services-networking/service/#publishing-services-service-types displayName: Service Type path: template.serviceType x-descriptors: - urn:alm:descriptor:com.tectonic.ui:select:ClusterIP - urn:alm:descriptor:com.tectonic.ui:select:NodePort - urn:alm:descriptor:com.tectonic.ui:select:LoadBalancer - - description: State to indicate whether the operator must shut down the subcluster + - description: |- + State to indicate whether the operator must shut down the subcluster and not try to restart it. displayName: Shutdown path: template.shutdown - - description: "The number of pods that the subcluster will have. This determines - the number of Vertica nodes that it will have. Changing this number will - either delete or schedule new pods. \n The database has a k-safety of 1. - So, if this is a primary subcluster, the minimum value is 3. If this is - a secondary subcluster, the minimum is 0. \n Note, you must have a valid - license to pick a value larger than 3. The default license that comes in - the vertica container is for the community edition, which can only have - 3 nodes. The license can be set with the db.licenseSecret parameter." + - description: |- + The number of pods that the subcluster will have. This determines the + number of Vertica nodes that it will have. Changing this number will + either delete or schedule new pods. + + + The database has a k-safety of 1. So, if this is a primary subcluster, + the minimum value is 3. If this is a secondary subcluster, the minimum is + 0. + + + Note, you must have a valid license to pick a value larger than 3. The + default license that comes in the vertica container is for the community + edition, which can only have 3 nodes. The license can be set with the + db.licenseSecret parameter. displayName: Size path: template.size x-descriptors: - urn:alm:descriptor:com.tectonic.ui:podCount - - description: 'Any tolerations and taints to use to aid in where to schedule - a pod. More info: https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/' + - description: |- + Any tolerations and taints to use to aid in where to schedule a pod. + More info: https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/ displayName: Tolerations path: template.tolerations x-descriptors: - urn:alm:descriptor:com.tectonic.ui:advanced - - description: 'Like the nodePort parameter, except this controls the node port - to use for the http endpoint in the Vertica server. The same rules apply: - it must be defined within the range allocated by the control plane, if omitted - Kubernetes will choose the port automatically.' + - description: |- + Like the nodePort parameter, except this controls the node port to use + for the http endpoint in the Vertica server. The same rules apply: it + must be defined within the range allocated by the control plane, if + omitted Kubernetes will choose the port automatically. displayName: Vertica HTTPNode Port path: template.verticaHTTPNodePort x-descriptors: - urn:alm:descriptor:com.tectonic.ui:number - urn:alm:descriptor:com.tectonic.ui:advanced - - description: The name of the VerticaDB CR that this autoscaler is defined - for. The VerticaDB object must exist in the same namespace as this object. + - description: |- + The name of the VerticaDB CR that this autoscaler is defined for. The + VerticaDB object must exist in the same namespace as this object. displayName: Vertica DBName path: verticaDBName x-descriptors: @@ -349,7 +447,9 @@ spec: - description: Last time the condition transitioned from one status to another. displayName: Last Transition Time path: conditions[0].lastTransitionTime - - description: Status is the status of the condition can be True, False or Unknown + - description: |- + Status is the status of the condition + can be True, False or Unknown displayName: Status path: conditions[0].status - description: Type is the type of the condition @@ -383,28 +483,34 @@ spec: name: "" version: apps/v1 specDescriptors: - - description: Custom annotations that will be added to all of the objects that - the operator will create. + - description: |- + Custom annotations that will be added to all of the objects that the + operator will create. displayName: Annotations path: annotations - - description: State to indicate whether the operator will restart Vertica if - the process is not running. Under normal cicumstances this is set to true. - The purpose of this is to allow a maintenance window, such as a manual upgrade, - without the operator interfering. + - description: |- + State to indicate whether the operator will restart Vertica if the + process is not running. Under normal cicumstances this is set to true. + The purpose of this is to allow a maintenance window, such as a + manual upgrade, without the operator interfering. displayName: Auto Restart Vertica path: autoRestartVertica x-descriptors: - urn:alm:descriptor:com.tectonic.ui:booleanSwitch - urn:alm:descriptor:com.tectonic.ui:advanced - - description: 'Secrets that will be mounted in the vertica container. The - purpose of this is to allow custom certs to be available. The full path - is: /certs// Where is the name provided - in the secret and is one of the keys in the secret.' + - description: |- + Secrets that will be mounted in the vertica container. The purpose of + this is to allow custom certs to be available. The full path is: + /certs// + Where is the name provided in the secret and is one + of the keys in the secret. displayName: Cert Secrets path: certSecrets x-descriptors: - urn:alm:descriptor:com.tectonic.ui:advanced - - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + - description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names displayName: Name path: certSecrets[0].name x-descriptors: @@ -412,72 +518,78 @@ spec: - description: Contains details about the communal storage. displayName: Communal path: communal - - description: Contains a map of server configuration parameters. To avoid duplicate - values, if a parameter is already set through another CR field, (like S3ServerSideEncryption - through communal.s3ServerSideEncryption), the corresponding key/value pair - is skipped. If a config value is set that isn't supported by the server - version you are running, the server will fail to start. These are set only - during initial bootstrap. After the database has been initialized, changing - the options in the CR will have no affect in the server. + - description: |- + Contains a map of server configuration parameters. + To avoid duplicate values, if a parameter is already set through another CR field, + (like S3ServerSideEncryption through communal.s3ServerSideEncryption), the corresponding + key/value pair is skipped. If a config value is set that isn't supported by the server version + you are running, the server will fail to start. These are set only during initial bootstrap. After + the database has been initialized, changing the options in the CR will have no affect in the server. displayName: Additional Config path: communal.additionalConfig - - description: The absolute path to a certificate bundle of trusted CAs. This - CA bundle is used when establishing TLS connections to external services - such as AWS, Azure or swebhdf:// scheme. Typically this would refer to - a path to one of the certSecrets. + - description: |- + The absolute path to a certificate bundle of trusted CAs. This CA bundle + is used when establishing TLS connections to external services such as + AWS, Azure or swebhdf:// scheme. Typically this would refer to a path to + one of the certSecrets. displayName: Ca File path: communal.caFile - - description: 'The name of an optional secret that contains the credentials - to connect to the communal endpoint. This can be omitted if the communal - storage uses some other form of authentication such as an attached IAM profile - in AWS. Certain keys need to be set, depending on the endpoint type. If - the communal storage starts with s3:// or gs://, the secret must have the - following keys set: accesskey and secretkey. If the communal storage starts - with azb://, the secret can have the following keys: accountName, blobEndpoint, - accountKey, or sharedAccessSignature. To store this secret outside of Kubernetes, - you can use a secret path reference prefix, such as gsm://. Everything after - the prefix is the name of the secret in the service you are storing.' + - description: |- + The name of an optional secret that contains the credentials to connect to the + communal endpoint. This can be omitted if the communal storage uses some + other form of authentication such as an attached IAM profile in AWS. + Certain keys need to be set, depending on the endpoint type. If the + communal storage starts with s3:// or gs://, the secret must have the + following keys set: accesskey and secretkey. If the communal storage + starts with azb://, the secret can have the following keys: accountName, + blobEndpoint, accountKey, or sharedAccessSignature. To store this secret + outside of Kubernetes, you can use a secret path reference prefix, such + as gsm://. Everything after the prefix is the name of the secret in the + service you are storing. displayName: Credential Secret path: communal.credentialSecret x-descriptors: - urn:alm:descriptor:io.kubernetes:Secret - - description: The URL to the communal endpoint. The endpoint must be prefaced - with http:// or https:// to know what protocol to connect with. If using - S3 or Google Cloud Storage as communal storage and initPolicy is Create - or Revive, this field is required and cannot change after creation. + - description: |- + The URL to the communal endpoint. The endpoint must be prefaced with http:// or + https:// to know what protocol to connect with. If using S3 or Google + Cloud Storage as communal storage and initPolicy is Create or Revive, + this field is required and cannot change after creation. displayName: Endpoint path: communal.endpoint - - description: The path to the communal storage. We support S3, Google Cloud - Storage, and HDFS paths. The protocol in the path (e.g. s3:// or webhdfs://) - dictates the type of storage. The path, whether it be a S3 bucket or HDFS - path, must exist prior to creating the VerticaDB. When initPolicy is Create, - this field is required and the path must be empty. When initPolicy is Revive, - this field is required and must be non-empty. + - description: |- + The path to the communal storage. We support S3, Google Cloud Storage, + and HDFS paths. The protocol in the path (e.g. s3:// or webhdfs://) + dictates the type of storage. The path, whether it be a S3 bucket or + HDFS path, must exist prior to creating the VerticaDB. When initPolicy + is Create, this field is required and the path must be empty. When + initPolicy is Revive, this field is required and must be non-empty. displayName: Path path: communal.path - - description: The region containing the bucket. If you do not set the correct + - description: |- + The region containing the bucket. If you do not set the correct region, you might experience a delay before the bootstrap fails because Vertica retries several times before giving up. displayName: Region path: communal.region - - description: 'The server-side encryption type Vertica will use to read/write - from encrypted S3 communal storage. Available values are: SSE-S3, SSE-KMS, - SSE-C and empty string (""). - SSE-S3: the S3 service manages encryption - keys. - SSE-KMS: encryption keys are managed by the Key Management Service - (KMS). KMS key identifier must be supplied through communal.additionalConfig - map. - SSE-C: the client manages encryption keys and provides them to S3 - for each operation. The client key must be supplied through communal.s3SseCustomerKeySecret. - - Empty string (""): No encryption. This is the default value. This value - cannot change after the initial creation of the VerticaDB.' + - description: "The server-side encryption type Vertica will use to read/write + from encrypted S3 communal storage.\nAvailable values are: SSE-S3, SSE-KMS, + SSE-C and empty string (\"\").\n- SSE-S3: the S3 service manages encryption + keys.\n- SSE-KMS: encryption keys are managed by the Key Management Service + (KMS).\n\t KMS key identifier must be supplied through communal.additionalConfig + map.\n- SSE-C: the client manages encryption keys and provides them to S3 + for each operation.\n\t The client key must be supplied through communal.s3SseCustomerKeySecret.\n- + Empty string (\"\"): No encryption. This is the default value.\nThis value + cannot change after the initial creation of the VerticaDB." displayName: S3 Server Side Encryption path: communal.s3ServerSideEncryption x-descriptors: - urn:alm:descriptor:com.tectonic.ui:select:SSE-S3 - urn:alm:descriptor:com.tectonic.ui:select:SSE-KMS - urn:alm:descriptor:com.tectonic.ui:select:SSE-C - - description: The name of a secret that contains the key to use for the S3SseCustomerKey - config option in the server. It is required when S3ServerSideEncryption - is SSE-C. When set, the secret must have a key named clientKey. + - description: |- + The name of a secret that contains the key to use for the S3SseCustomerKey config option in the server. + It is required when S3ServerSideEncryption is SSE-C. When set, the secret must have a key named clientKey. displayName: S3 Sse Customer Key Secret path: communal.s3SseCustomerKeySecret x-descriptors: @@ -488,18 +600,20 @@ spec: path: dbName x-descriptors: - urn:alm:descriptor:com.tectonic.ui:advanced - - description: Controls if the spread communication between pods is encrypted. Valid - values are 'vertica', 'disabled', or an empty string. By default, the value - is set to 'vertica' unless the user explicitly set it to 'disabled'. When - set to 'vertica' or an empty string, Vertica generates the spread encryption - key for the cluster when the database starts up. This can only be set during - initial creation of the CR. If set for initPolicy other than Create, then - it has no effect. + - description: |- + Controls if the spread communication between pods is encrypted. Valid + values are 'vertica', 'disabled', or an empty string. By default, + the value is set to 'vertica' unless the user explicitly set it to + 'disabled'. When set to 'vertica' or an empty string, Vertica generates + the spread encryption key for the cluster when the database starts up. + This can only be set during initial creation of the CR. If set for + initPolicy other than Create, then it has no effect. displayName: Encrypt Spread Comm path: encryptSpreadComm x-descriptors: - urn:alm:descriptor:com.tectonic.ui:advanced - - description: A config map that contains the contents of the /etc/hadoop directory. + - description: |- + A config map that contains the contents of the /etc/hadoop directory. This gets mounted in the container and is used to configure connections to an HDFS communal path displayName: Hadoop Config @@ -507,10 +621,11 @@ spec: x-descriptors: - urn:alm:descriptor:io.kubernetes:ConfigMap - urn:alm:descriptor:com.tectonic.ui:advanced - - description: The docker image name that contains the Vertica server. Whenever - this changes, the operator treats this as an upgrade. The upgrade can be - done either in an online or offline fashion. See the upgradePolicy to understand - how to control the behavior. + - description: |- + The docker image name that contains the Vertica server. Whenever this + changes, the operator treats this as an upgrade. The upgrade can be done + either in an online or offline fashion. See the upgradePolicy to + understand how to control the behavior. displayName: Image path: image - description: This dictates the image pull policy to use @@ -518,56 +633,67 @@ spec: path: imagePullPolicy x-descriptors: - urn:alm:descriptor:com.tectonic.ui:imagePullPolicy - - description: 'ImagePullSecrets is an optional list of references to secrets - in the same namespace to use for pulling the image. If specified, these - secrets will be passed to individual puller implementations for them to - use. For example, in the case of docker, only DockerConfig type secrets - are honored. More info: https://kubernetes.io/docs/concepts/containers/images#specifying-imagepullsecrets-on-a-pod' + - description: |- + ImagePullSecrets is an optional list of references to secrets in the same + namespace to use for pulling the image. If specified, these secrets will + be passed to individual puller implementations for them to use. For + example, in the case of docker, only DockerConfig type secrets are + honored. + More info: https://kubernetes.io/docs/concepts/containers/images#specifying-imagepullsecrets-on-a-pod displayName: Image Pull Secrets path: imagePullSecrets - - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + - description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names displayName: Name path: imagePullSecrets[0].name x-descriptors: - urn:alm:descriptor:io.kubernetes:Secret - - description: The initialization policy specifies how the database should be + - description: |- + The initialization policy specifies how the database should be configured. Options include creating a new database, reviving an existing - one, or simply scheduling pods. Possible values are Create, Revive, CreateSkipPackageInstall, - or ScheduleOnly. + one, or simply scheduling pods. Possible values are Create, Revive, + CreateSkipPackageInstall, or ScheduleOnly. displayName: Init Policy path: initPolicy x-descriptors: - urn:alm:descriptor:com.tectonic.ui:select:Create - urn:alm:descriptor:com.tectonic.ui:select:Revive - urn:alm:descriptor:com.tectonic.ui:select:ScheduleOnly - - description: 'A secret that contains files required for Kereberos setup. The - secret must have the following keys: - krb5.conf: The contents of the Kerberos - config file - krb5.keytab: The keytab file that stores credentials for each - Vertica principal. These files will be mounted in /etc. We use the same - keytab file on each host, so it must contain all of the Vertica principals.' + - description: |- + A secret that contains files required for Kereberos setup. The secret + must have the following keys: + - krb5.conf: The contents of the Kerberos config file + - krb5.keytab: The keytab file that stores credentials for each Vertica principal. + These files will be mounted in /etc. We use the same keytab file on each + host, so it must contain all of the Vertica principals. displayName: Kerberos Secret path: kerberosSecret x-descriptors: - urn:alm:descriptor:io.kubernetes:Secret - urn:alm:descriptor:com.tectonic.ui:advanced - - description: Custom labels that will be added to all of the objects that the - operator will create. + - description: |- + Custom labels that will be added to all of the objects that the operator + will create. displayName: Labels path: labels - - description: The name of a secret that contains the contents of license files. - The secret must be in the same namespace as the CRD. Each of the keys in - the secret will be mounted as files in /home/dbadmin/licensing/mnt. If this + - description: |- + The name of a secret that contains the contents of license files. The + secret must be in the same namespace as the CRD. Each of the keys in the + secret will be mounted as files in /home/dbadmin/licensing/mnt. If this is set prior to creating a database, it will include one of the licenses - from the secret -- if there are multiple licenses it will pick one by selecting - the first one alphabetically. The user is responsible for installing any - additional licenses or if the license was added to the secret after DB creation. + from the secret -- if there are multiple licenses it will pick one by + selecting the first one alphabetically. The user is responsible for + installing any additional licenses or if the license was added to the + secret after DB creation. displayName: License Secret path: licenseSecret x-descriptors: - urn:alm:descriptor:io.kubernetes:Secret - - description: Allows tuning of the Vertica pods liveness probe. Each of the - values here are applied to the default liveness probe we create. If this - is omitted, we use the default probe. + - description: |- + Allows tuning of the Vertica pods liveness probe. Each of the values + here are applied to the default liveness probe we create. If this is + omitted, we use the default probe. displayName: Liveness Probe Override path: livenessProbeOverride x-descriptors: @@ -575,81 +701,111 @@ spec: - description: Contain details about the local storage displayName: Local path: local - - description: The path in the container to the catalog. When initializing - the database with revive, this path must match the catalog path used when - the database was first created. For backwards compatibility, if this is - omitted, then it shares the same path as the dataPath. + - description: |- + The path in the container to the catalog. When initializing the database with + revive, this path must match the catalog path used when the database was + first created. For backwards compatibility, if this is omitted, then it + shares the same path as the dataPath. displayName: Catalog Path path: local.catalogPath - - description: The path in the container to the local directory for the 'DATA,TEMP' + - description: |- + The path in the container to the local directory for the 'DATA,TEMP' storage location usage. When initializing the database with revive, the local path here must match the path that was used when the database was first created. displayName: Data Path path: local.dataPath - - description: The path in the container to the depot -- 'DEPOT' storage location - usage. When initializing the database with revive, this path must match - the depot path used when the database was first created. + - description: |- + The path in the container to the depot -- 'DEPOT' storage location usage. + When initializing the database with revive, this path must match the + depot path used when the database was first created. displayName: Depot Path path: local.depotPath - - description: 'The type of volume to use for the depot. Allowable values will - be: EmptyDir and PersistentVolume or an empty string. An empty string currently - defaults to PersistentVolume.' + - description: |- + The type of volume to use for the depot. + Allowable values will be: EmptyDir and PersistentVolume or an empty string. + An empty string currently defaults to PersistentVolume. displayName: Depot Volume path: local.depotVolume x-descriptors: - urn:alm:descriptor:com.tectonic.ui:select:PersistentVolume - urn:alm:descriptor:com.tectonic.ui:select:EmptyDir - - description: The minimum size of the local data volume when picking a PV. If - changing this after the PV have been created, two things may happen. First, - it will cause a resize of the PV to the new size. And, if depot is stored - in the PV, a resize of the depot happens too. + - description: |- + The minimum size of the local data volume when picking a PV. If changing + this after the PV have been created, two things may happen. First, it + will cause a resize of the PV to the new size. And, if depot is + stored in the PV, a resize of the depot happens too. displayName: Request Size path: local.requestSize - - description: The local data stores the local catalog, depot and config files. - Portions of the local data are persisted with a persistent volume (PV) using - a persistent volume claim (PVC). The catalog and config files are always + - description: |- + The local data stores the local catalog, depot and config files. Portions + of the local data are persisted with a persistent volume (PV) using a + persistent volume claim (PVC). The catalog and config files are always stored in the PV. The depot may be include too if depotVolume is set to 'PersistentVolume'. This field is used to define the name of the storage - class to use for the PV. This will be set when creating the PVC. By default, - it is not set. This means that that the PVC we create will have the default - storage class set in Kubernetes. + class to use for the PV. This will be set when creating the PVC. By + default, it is not set. This means that that the PVC we create will have + the default storage class set in Kubernetes. displayName: Storage Class path: local.storageClass x-descriptors: - urn:alm:descriptor:io.kubernetes:StorageClass - - description: 'A secret that contains the TLS credentials to use for Vertica''s - node management agent (NMA). If this is empty, the operator will create - a secret to use and add the name of the generate secret in this field. When - set, the secret must have the following keys defined: tls.key, tls.crt and - ca.crt. To store this secret outside of Kubernetes, you can use a secret - path reference prefix, such as gsm://. Everything after the prefix is the - name of the secret in the service you are storing.' + - description: |- + A secret that contains the TLS credentials to use for Vertica's node + management agent (NMA). If this is empty, the operator will create a + secret to use and add the name of the generate secret in this field. + When set, the secret must have the following keys defined: tls.key, + tls.crt and ca.crt. To store this secret outside of Kubernetes, you can + use a secret path reference prefix, such as gsm://. Everything after the + prefix is the name of the secret in the service you are storing. displayName: NMATLSSecret path: nmaTLSSecret x-descriptors: - urn:alm:descriptor:com.tectonic.ui:hidden - - description: An optional name for a secret that contains the password for - the database's superuser. If this is not set, then we assume no such password + - description: |- + An optional name for a secret that contains the password for the + database's superuser. If this is not set, then we assume no such password is set for the database. If this is set, it is up the user to create this secret before deployment. The secret must have a key named password. To - store this secret outside of Kubernetes, you can use a secret path reference - prefix, such as gsm://. Everything after the prefix is the name of the secret - in the service you are storing. + store this secret outside of Kubernetes, you can use a secret path + reference prefix, such as gsm://. Everything after the prefix is the name + of the secret in the service you are storing. displayName: Password Secret path: passwordSecret x-descriptors: - urn:alm:descriptor:io.kubernetes:Secret - - description: This can be used to override any pod-level securityContext for - the Vertica pod. It will be merged with the default context. If omitted, - then the default context is used. + - description: |- + This can be used to override any pod-level securityContext for the + Vertica pod. It will be merged with the default context. If omitted, then + the default context is used. displayName: Pod Security Context path: podSecurityContext x-descriptors: - urn:alm:descriptor:com.tectonic.ui:hidden - - description: Allows tuning of the Vertica pods readiness probe. Each of the - values here are applied to the default readiness probe we create. If this - is omitted, we use the default probe. + - description: |- + Create client proxy pods for the subcluster if defined + All incoming connections to the subclusters will be routed through the proxy pods + displayName: Proxy + path: proxy + - description: The docker image name that contains the Vertica proxy server. + displayName: Image + path: proxy.image + - description: |- + A secret that contains the TLS credentials to use for Vertica's client + proxy. If this is empty, the operator will create a secret to use and + add the name of the generate secret in this field. + When set, the secret must have the following keys defined: tls.key, + tls.crt and ca.crt. To store this secret outside of Kubernetes, you can + use a secret path reference prefix, such as gsm://. Everything after the + prefix is the name of the secret in the service you are storing. + displayName: TLSSecret + path: proxy.tlsSecret + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:hidden + - description: |- + Allows tuning of the Vertica pods readiness probe. Each of the values + here are applied to the default readiness probe we create. If this is + omitted, we use the default probe. displayName: Readiness Probe Override path: readinessProbeOverride x-descriptors: @@ -661,42 +817,56 @@ spec: x-descriptors: - urn:alm:descriptor:com.tectonic.ui:fieldDependency:initPolicy:Revive - urn:alm:descriptor:com.tectonic.ui:advanced - - description: Name of the restore archive to use for bootstrapping. This name - refers to an object in the database. This must be specified if initPolicy - is Revive and a restore is intended. + - description: |- + Name of the restore archive to use for bootstrapping. + This name refers to an object in the database. + This must be specified if initPolicy is Revive and a restore is intended. displayName: Archive path: restorePoint.archive - - description: The identifier of the restore point in the restore archive to - restore from. Specify either index or id exclusively; one of these fields - is mandatory, but both cannot be used concurrently. + - description: |- + The identifier of the restore point in the restore archive to restore from. + Specify either index or id exclusively; one of these fields is mandatory, but both cannot be used concurrently. displayName: ID path: restorePoint.id - - description: The (1-based) index of the restore point in the restore archive - to restore from. Specify either index or id exclusively; one of these fields - is mandatory, but both cannot be used concurrently. + - description: |- + The (1-based) index of the restore point in the restore archive to restore from. + Specify either index or id exclusively; one of these fields is mandatory, but both cannot be used concurrently. displayName: Index path: restorePoint.index x-descriptors: - urn:alm:descriptor:com.tectonic.ui:number - - description: "This specifies the order of nodes when doing a revive. Each - entry contains an index to a subcluster, which is an index in Subclusters[], - and a pod count of the number of pods include from the subcluster. \n For - example, suppose the database you want to revive has the following setup: - v_db_node0001: subcluster A v_db_node0002: subcluster A v_db_node0003: subcluster - B v_db_node0004: subcluster A v_db_node0005: subcluster B v_db_node0006: - subcluster B \n And the Subcluster[] array is defined as {'A', 'B'}. The - revive order would be: - {subclusterIndex:0, podCount:2} # 2 pods from - subcluster A - {subclusterIndex:1, podCount:1} # 1 pod from subcluster - B - {subclusterIndex:0, podCount:1} # 1 pod from subcluster A - {subclusterIndex:1, - podCount:2} # 2 pods from subcluster B \n If InitPolicy is not Revive, - this field can be ignored." + - description: |- + This specifies the order of nodes when doing a revive. Each entry + contains an index to a subcluster, which is an index in Subclusters[], + and a pod count of the number of pods include from the subcluster. + + + For example, suppose the database you want to revive has the following setup: + v_db_node0001: subcluster A + v_db_node0002: subcluster A + v_db_node0003: subcluster B + v_db_node0004: subcluster A + v_db_node0005: subcluster B + v_db_node0006: subcluster B + + + And the Subcluster[] array is defined as {'A', 'B'}. The revive order + would be: + - {subclusterIndex:0, podCount:2} # 2 pods from subcluster A + - {subclusterIndex:1, podCount:1} # 1 pod from subcluster B + - {subclusterIndex:0, podCount:1} # 1 pod from subcluster A + - {subclusterIndex:1, podCount:2} # 2 pods from subcluster B + + + If InitPolicy is not Revive, this field can be ignored. displayName: Revive Order path: reviveOrder x-descriptors: - urn:alm:descriptor:com.tectonic.ui:fieldDependency:initPolicy:Revive - urn:alm:descriptor:com.tectonic.ui:advanced - - description: The number of pods paired with this subcluster. If this is omitted - then, all remaining pods in the subcluster will be used. + - description: |- + The number of pods paired with this subcluster. If this is omitted then, + all remaining pods in the subcluster will be used. displayName: Pod Count path: reviveOrder[0].podCount x-descriptors: @@ -709,73 +879,82 @@ spec: path: sandboxes x-descriptors: - urn:alm:descriptor:com.tectonic.ui:advanced - - description: The name of the image to use for the sandbox. If omitted, the - image is inherited from the spec.image field. + - description: |- + The name of the image to use for the sandbox. If omitted, the image + is inherited from the spec.image field. displayName: Image path: sandboxes[0].image - - description: This is the name of a sandbox. This is a required parameter. - This cannot change once the sandbox is created. + - description: |- + This is the name of a sandbox. This is a required parameter. This cannot + change once the sandbox is created. displayName: Name path: sandboxes[0].name - - description: State to indicate whether the operator must shut down the sandbox + - description: |- + State to indicate whether the operator must shut down the sandbox and not try to restart it. When true, stop_db will be performed on the sandbox and the operator will not try start_db on the sandbox. displayName: Shutdown path: sandboxes[0].shutdown - - description: This is the subcluster names that are part of the sandbox. There - must be at least one subcluster listed. All subclusters listed need to be - secondary subclusters. + - description: |- + This is the subcluster names that are part of the sandbox. + There must be at least one subcluster listed. All subclusters + listed need to be secondary subclusters. displayName: Subclusters path: sandboxes[0].subclusters - description: The name of a subcluster. displayName: Name path: sandboxes[0].subclusters[0].name - - description: Allows you to set any additional securityContext for the Vertica - server container. We merge the values with the securityContext generated - by the operator. The operator adds its own capabilities to this. If you - want those capabilities to be removed you must explicitly include them in - the drop list. + - description: |- + Allows you to set any additional securityContext for the Vertica server + container. We merge the values with the securityContext generated by the + operator. The operator adds its own capabilities to this. If you want + those capabilities to be removed you must explicitly include them in the + drop list. displayName: Security Context path: securityContext x-descriptors: - urn:alm:descriptor:com.tectonic.ui:hidden - - description: The name of a serviceAccount to use to run the Vertica pods. - If the service account is not specified or does not exist, the operator - will create one, using the specified name if provided, along with a Role - and RoleBinding. + - description: |- + The name of a serviceAccount to use to run the Vertica pods. If the + service account is not specified or does not exist, the operator will + create one, using the specified name if provided, along with a Role and + RoleBinding. displayName: Service Account Name path: serviceAccountName x-descriptors: - urn:alm:descriptor:io.kubernetes:ServiceAccount - urn:alm:descriptor:com.tectonic.ui:advanced - - description: 'The number of shards to create in the database. This cannot - be updated once the CRD is created. Refer to this page to determine an - optimal size: https://www.vertica.com/docs/latest/HTML/Content/Authoring/Eon/SizingEonCluster.htm - The default was chosen using this link and the default subcluster size of - 3.' + - description: |- + The number of shards to create in the database. This cannot be updated + once the CRD is created. Refer to this page to determine an optimal size: + https://www.vertica.com/docs/latest/HTML/Content/Authoring/Eon/SizingEonCluster.htm + The default was chosen using this link and the default subcluster size of 3. displayName: Shard Count path: shardCount x-descriptors: - urn:alm:descriptor:com.tectonic.ui:number - - description: Optional sidecar containers that run along side the vertica server. The - operator adds the same volume mounts that are in the vertica server container - to each sidecar container. + - description: |- + Optional sidecar containers that run along side the vertica server. The + operator adds the same volume mounts that are in the vertica server + container to each sidecar container. displayName: Sidecars path: sidecars x-descriptors: - urn:alm:descriptor:com.tectonic.ui:advanced - - description: Allows tuning of the Vertica pods startup probe. Each of the - values here are applied to the default startup probe we create. If this - is omitted, we use the default probe. + - description: |- + Allows tuning of the Vertica pods startup probe. Each of the values + here are applied to the default startup probe we create. If this is + omitted, we use the default probe. displayName: Startup Probe Override path: startupProbeOverride x-descriptors: - urn:alm:descriptor:com.tectonic.ui:hidden - displayName: Subclusters path: subclusters - - description: 'Like nodeSelector this allows you to constrain the pod only - to certain pods. It is more expressive than just using node selectors. More - info: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#affinity-and-anti-affinity' + - description: |- + Like nodeSelector this allows you to constrain the pod only to certain + pods. It is more expressive than just using node selectors. + More info: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#affinity-and-anti-affinity displayName: Affinity path: subclusters[0].affinity - description: Describes node affinity scheduling rules for the pod. @@ -795,58 +974,89 @@ spec: path: subclusters[0].affinity.podAntiAffinity x-descriptors: - urn:alm:descriptor:com.tectonic.ui:podAntiAffinity - - description: A map of key/value pairs appended to the stateful metadata.annotations - of the subcluster. + - description: |- + A map of key/value pairs appended to the stateful metadata.annotations of + the subcluster. displayName: Annotations path: subclusters[0].annotations - - description: When setting serviceType to NodePort, this parameter allows you - to define the port that is opened at each node for Vertica client connections. - If using NodePort and this is omitted, Kubernetes will choose the port automatically. - This port must be from within the defined range allocated by the control - plane (default is 30000-32767). + - description: |- + When setting serviceType to NodePort, this parameter allows you to define the + port that is opened at each node for Vertica client connections. If using + NodePort and this is omitted, Kubernetes will choose the port + automatically. This port must be from within the defined range allocated + by the control plane (default is 30000-32767). displayName: Client Node Port path: subclusters[0].clientNodePort x-descriptors: - urn:alm:descriptor:com.tectonic.ui:number - urn:alm:descriptor:com.tectonic.ui:advanced - - description: 'Allows the service object to be attached to a list of external - IPs that you specify. If not set, the external IP list is left empty in - the service object. More info: https://kubernetes.io/docs/concepts/services-networking/service/#external-ips' + - description: |- + Allows the service object to be attached to a list of external IPs that you + specify. If not set, the external IP list is left empty in the service object. + More info: https://kubernetes.io/docs/concepts/services-networking/service/#external-ips displayName: External IPs path: subclusters[0].externalIPs x-descriptors: - urn:alm:descriptor:com.tectonic.ui:advanced - - description: This allows a different image to be used for the subcluster than - the one in VerticaDB. This is intended to be used internally by the online - image change process. + - description: |- + This allows a different image to be used for the subcluster than the one + in VerticaDB. This is intended to be used internally by the online image + change process. displayName: Image Override path: subclusters[0].imageOverride x-descriptors: - urn:alm:descriptor:com.tectonic.ui:hidden - - description: 'Specify IP address of LoadBalancer service for this subcluster. - This field is ignored when serviceType != "LoadBalancer". More info: https://kubernetes.io/docs/concepts/services-networking/service/#loadbalancer' + - description: |- + Specify IP address of LoadBalancer service for this subcluster. + This field is ignored when serviceType != "LoadBalancer". + More info: https://kubernetes.io/docs/concepts/services-networking/service/#loadbalancer displayName: Load Balancer IP path: subclusters[0].loadBalancerIP x-descriptors: - urn:alm:descriptor:com.tectonic.ui:advanced - - description: The name of the subcluster. This is a required parameter. This - cannot change after CRD creation. + - description: |- + The name of the subcluster. This is a required parameter. This cannot + change after CRD creation. displayName: Name path: subclusters[0].name - - description: 'A map of label keys and values to restrict Vertica node scheduling - to workers with matching labels. More info: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodeselector' + - description: |- + A map of label keys and values to restrict Vertica node scheduling to workers + with matching labels. + More info: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodeselector displayName: Node Selector path: subclusters[0].nodeSelector - - description: 'The priority class name given to pods in this subcluster. This - affects where the pod gets scheduled. More info: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/#priorityclass' + - description: |- + The priority class name given to pods in this subcluster. This affects + where the pod gets scheduled. + More info: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/#priorityclass displayName: Priority Class Name path: subclusters[0].priorityClassName x-descriptors: - urn:alm:descriptor:com.tectonic.ui:advanced - - description: 'This defines the resource requests and limits for pods in the - subcluster. It is advisable that the request and limits match as this ensures - the pods are assigned to the guaranteed QoS class. This will reduces the - chance that pods are chosen by the OOM killer. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + - description: |- + Create client proxy pods for the subcluster if defined + All incoming connections to the subclusters will be routed through the proxy pods + displayName: Proxy + path: subclusters[0].proxy + - description: The number of replicas that the proxy server will have. + displayName: Replicas + path: subclusters[0].proxy.replicas + - description: |- + This defines the resource requests and limits for the client proxy pods in the subcluster. + It is advisable that the request and limits match as this ensures the + pods are assigned to the guaranteed QoS class. This will reduces the + chance that pods are chosen by the OOM killer. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + displayName: Resources + path: subclusters[0].proxy.resources + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:resourceRequirements + - description: |- + This defines the resource requests and limits for pods in the subcluster. + It is advisable that the request and limits match as this ensures the + pods are assigned to the guaranteed QoS class. This will reduces the + chance that pods are chosen by the OOM killer. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ displayName: Resources path: subclusters[0].resources x-descriptors: @@ -854,94 +1064,112 @@ spec: - description: A map of key/value pairs appended to service metadata.annotations. displayName: Service Annotations path: subclusters[0].serviceAnnotations - - description: Identifies the name of the service object that will serve this - subcluster. If multiple subclusters share the same service name then they - all share the same service object. This allows for a single service object - to round robin between multiple subclusters. If this is left blank, a service - object matching the subcluster name is used. The actual name of the service - object is always prefixed with the name of the owning VerticaDB. + - description: |- + Identifies the name of the service object that will serve this + subcluster. If multiple subclusters share the same service name then + they all share the same service object. This allows for a single service + object to round robin between multiple subclusters. If this is left + blank, a service object matching the subcluster name is used. The actual + name of the service object is always prefixed with the name of the owning + VerticaDB. displayName: Service Name path: subclusters[0].serviceName x-descriptors: - urn:alm:descriptor:com.tectonic.ui:advanced - - description: 'Identifies the type of Kubernetes service to use for external - client connectivity. The default is to use a ClusterIP, which sets a stable - IP and port to use that is accessible only from within Kubernetes itself. - Depending on the service type chosen the user may need to set other config - knobs to further config it. These other knobs follow this one. More info: - https://kubernetes.io/docs/concepts/services-networking/service/#publishing-services-service-types' + - description: |- + Identifies the type of Kubernetes service to use for external client + connectivity. The default is to use a ClusterIP, which sets a stable IP + and port to use that is accessible only from within Kubernetes itself. + Depending on the service type chosen the user may need to set other + config knobs to further config it. These other knobs follow this one. + More info: https://kubernetes.io/docs/concepts/services-networking/service/#publishing-services-service-types displayName: Service Type path: subclusters[0].serviceType x-descriptors: - urn:alm:descriptor:com.tectonic.ui:select:ClusterIP - urn:alm:descriptor:com.tectonic.ui:select:NodePort - urn:alm:descriptor:com.tectonic.ui:select:LoadBalancer - - description: State to indicate whether the operator must shut down the subcluster + - description: |- + State to indicate whether the operator must shut down the subcluster and not try to restart it. displayName: Shutdown path: subclusters[0].shutdown - - description: "The number of pods that the subcluster will have. This determines - the number of Vertica nodes that it will have. Changing this number will - either delete or schedule new pods. \n The database has a k-safety of 1. - So, if this is a primary subcluster, the minimum value is 3. If this is - a secondary subcluster, the minimum is 0. \n Note, you must have a valid - license to pick a value larger than 3. The default license that comes in - the vertica container is for the community edition, which can only have - 3 nodes. The license can be set with the db.licenseSecret parameter." + - description: |- + The number of pods that the subcluster will have. This determines the + number of Vertica nodes that it will have. Changing this number will + either delete or schedule new pods. + + + The database has a k-safety of 1. So, if this is a primary subcluster, + the minimum value is 3. If this is a secondary subcluster, the minimum is + 0. + + + Note, you must have a valid license to pick a value larger than 3. The + default license that comes in the vertica container is for the community + edition, which can only have 3 nodes. The license can be set with the + db.licenseSecret parameter. displayName: Size path: subclusters[0].size x-descriptors: - urn:alm:descriptor:com.tectonic.ui:podCount - - description: 'Any tolerations and taints to use to aid in where to schedule - a pod. More info: https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/' + - description: |- + Any tolerations and taints to use to aid in where to schedule a pod. + More info: https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/ displayName: Tolerations path: subclusters[0].tolerations x-descriptors: - urn:alm:descriptor:com.tectonic.ui:advanced - - description: 'Indicates the type of subcluster it is. Valid values are: primary, - secondary or transient. Types are case-sensitive. You must have at least - one primary subcluster in the database. If type is omitted, it will default - to a primary. Transient should only be set internally by the operator during - online upgrade. It is used to indicate a subcluster that exists temporarily - to serve traffic for subclusters that are restarting with a new image.' + - description: |- + Indicates the type of subcluster it is. Valid values are: primary, + secondary or transient. Types are case-sensitive. + You must have at least one primary subcluster in the database. + If type is omitted, it will default to a primary. + Transient should only be set internally by the operator during online + upgrade. It is used to indicate a subcluster that exists temporarily to + serve traffic for subclusters that are restarting with a new image. displayName: Type path: subclusters[0].type x-descriptors: - urn:alm:descriptor:com.tectonic.ui:select:primary - urn:alm:descriptor:com.tectonic.ui:select:secondary - - description: 'Like the clientNodePort parameter, except this controls the - node port to use for the http endpoint in the Vertica server. The same - rules apply: it must be defined within the range allocated by the control - plane, if omitted Kubernetes will choose the port automatically.' + - description: |- + Like the clientNodePort parameter, except this controls the node port to use + for the http endpoint in the Vertica server. The same rules apply: it + must be defined within the range allocated by the control plane, if + omitted Kubernetes will choose the port automatically. displayName: Vertica HTTPNode Port path: subclusters[0].verticaHTTPNodePort x-descriptors: - urn:alm:descriptor:com.tectonic.ui:number - urn:alm:descriptor:com.tectonic.ui:advanced - - description: When doing a read-only online upgrade, we designate a subcluster - to accept traffic while the other subclusters restart. The designated subcluster - is specified here. The name of the subcluster can refer to an existing - one or an entirely new subcluster. If the subcluster is new, it will exist - only for the duration of the upgrade. If this struct is left empty the - operator will default to picking existing subclusters. + - description: |- + When doing a read-only online upgrade, we designate a subcluster to + accept traffic while the other subclusters restart. The designated + subcluster is specified here. The name of the subcluster can refer to an + existing one or an entirely new subcluster. If the subcluster is new, it + will exist only for the duration of the upgrade. If this struct is + left empty the operator will default to picking existing subclusters. displayName: Temporary Subcluster Routing path: temporarySubclusterRouting x-descriptors: - urn:alm:descriptor:com.tectonic.ui:advanced - - description: Names of existing subclusters to use for temporary routing of - client connections. The operator will use the first subcluster that is - online. + - description: |- + Names of existing subclusters to use for temporary routing of client + connections. The operator will use the first subcluster that is online. displayName: Names path: temporarySubclusterRouting.names - - description: A new subcluster will be created using this as a template. This - subcluster will only exist for the life of the online upgrade. It will - accept client traffic for a subcluster that are in the process of being - restarted. + - description: |- + A new subcluster will be created using this as a template. This + subcluster will only exist for the life of the online upgrade. It + will accept client traffic for a subcluster that are in the process of + being restarted. displayName: Template path: temporarySubclusterRouting.template - - description: 'Like nodeSelector this allows you to constrain the pod only - to certain pods. It is more expressive than just using node selectors. More - info: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#affinity-and-anti-affinity' + - description: |- + Like nodeSelector this allows you to constrain the pod only to certain + pods. It is more expressive than just using node selectors. + More info: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#affinity-and-anti-affinity displayName: Affinity path: temporarySubclusterRouting.template.affinity - description: Describes node affinity scheduling rules for the pod. @@ -961,58 +1189,89 @@ spec: path: temporarySubclusterRouting.template.affinity.podAntiAffinity x-descriptors: - urn:alm:descriptor:com.tectonic.ui:podAntiAffinity - - description: A map of key/value pairs appended to the stateful metadata.annotations - of the subcluster. + - description: |- + A map of key/value pairs appended to the stateful metadata.annotations of + the subcluster. displayName: Annotations path: temporarySubclusterRouting.template.annotations - - description: When setting serviceType to NodePort, this parameter allows you - to define the port that is opened at each node for Vertica client connections. - If using NodePort and this is omitted, Kubernetes will choose the port automatically. - This port must be from within the defined range allocated by the control - plane (default is 30000-32767). + - description: |- + When setting serviceType to NodePort, this parameter allows you to define the + port that is opened at each node for Vertica client connections. If using + NodePort and this is omitted, Kubernetes will choose the port + automatically. This port must be from within the defined range allocated + by the control plane (default is 30000-32767). displayName: Client Node Port path: temporarySubclusterRouting.template.clientNodePort x-descriptors: - urn:alm:descriptor:com.tectonic.ui:number - urn:alm:descriptor:com.tectonic.ui:advanced - - description: 'Allows the service object to be attached to a list of external - IPs that you specify. If not set, the external IP list is left empty in - the service object. More info: https://kubernetes.io/docs/concepts/services-networking/service/#external-ips' + - description: |- + Allows the service object to be attached to a list of external IPs that you + specify. If not set, the external IP list is left empty in the service object. + More info: https://kubernetes.io/docs/concepts/services-networking/service/#external-ips displayName: External IPs path: temporarySubclusterRouting.template.externalIPs x-descriptors: - urn:alm:descriptor:com.tectonic.ui:advanced - - description: This allows a different image to be used for the subcluster than - the one in VerticaDB. This is intended to be used internally by the online - image change process. + - description: |- + This allows a different image to be used for the subcluster than the one + in VerticaDB. This is intended to be used internally by the online image + change process. displayName: Image Override path: temporarySubclusterRouting.template.imageOverride x-descriptors: - urn:alm:descriptor:com.tectonic.ui:hidden - - description: 'Specify IP address of LoadBalancer service for this subcluster. - This field is ignored when serviceType != "LoadBalancer". More info: https://kubernetes.io/docs/concepts/services-networking/service/#loadbalancer' + - description: |- + Specify IP address of LoadBalancer service for this subcluster. + This field is ignored when serviceType != "LoadBalancer". + More info: https://kubernetes.io/docs/concepts/services-networking/service/#loadbalancer displayName: Load Balancer IP path: temporarySubclusterRouting.template.loadBalancerIP x-descriptors: - urn:alm:descriptor:com.tectonic.ui:advanced - - description: The name of the subcluster. This is a required parameter. This - cannot change after CRD creation. + - description: |- + The name of the subcluster. This is a required parameter. This cannot + change after CRD creation. displayName: Name path: temporarySubclusterRouting.template.name - - description: 'A map of label keys and values to restrict Vertica node scheduling - to workers with matching labels. More info: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodeselector' + - description: |- + A map of label keys and values to restrict Vertica node scheduling to workers + with matching labels. + More info: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodeselector displayName: Node Selector path: temporarySubclusterRouting.template.nodeSelector - - description: 'The priority class name given to pods in this subcluster. This - affects where the pod gets scheduled. More info: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/#priorityclass' + - description: |- + The priority class name given to pods in this subcluster. This affects + where the pod gets scheduled. + More info: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/#priorityclass displayName: Priority Class Name path: temporarySubclusterRouting.template.priorityClassName x-descriptors: - urn:alm:descriptor:com.tectonic.ui:advanced - - description: 'This defines the resource requests and limits for pods in the - subcluster. It is advisable that the request and limits match as this ensures - the pods are assigned to the guaranteed QoS class. This will reduces the - chance that pods are chosen by the OOM killer. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + - description: |- + Create client proxy pods for the subcluster if defined + All incoming connections to the subclusters will be routed through the proxy pods + displayName: Proxy + path: temporarySubclusterRouting.template.proxy + - description: The number of replicas that the proxy server will have. + displayName: Replicas + path: temporarySubclusterRouting.template.proxy.replicas + - description: |- + This defines the resource requests and limits for the client proxy pods in the subcluster. + It is advisable that the request and limits match as this ensures the + pods are assigned to the guaranteed QoS class. This will reduces the + chance that pods are chosen by the OOM killer. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + displayName: Resources + path: temporarySubclusterRouting.template.proxy.resources + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:resourceRequirements + - description: |- + This defines the resource requests and limits for pods in the subcluster. + It is advisable that the request and limits match as this ensures the + pods are assigned to the guaranteed QoS class. This will reduces the + chance that pods are chosen by the OOM killer. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ displayName: Resources path: temporarySubclusterRouting.template.resources x-descriptors: @@ -1020,82 +1279,108 @@ spec: - description: A map of key/value pairs appended to service metadata.annotations. displayName: Service Annotations path: temporarySubclusterRouting.template.serviceAnnotations - - description: Identifies the name of the service object that will serve this - subcluster. If multiple subclusters share the same service name then they - all share the same service object. This allows for a single service object - to round robin between multiple subclusters. If this is left blank, a service - object matching the subcluster name is used. The actual name of the service - object is always prefixed with the name of the owning VerticaDB. + - description: |- + Identifies the name of the service object that will serve this + subcluster. If multiple subclusters share the same service name then + they all share the same service object. This allows for a single service + object to round robin between multiple subclusters. If this is left + blank, a service object matching the subcluster name is used. The actual + name of the service object is always prefixed with the name of the owning + VerticaDB. displayName: Service Name path: temporarySubclusterRouting.template.serviceName x-descriptors: - urn:alm:descriptor:com.tectonic.ui:advanced - - description: 'Identifies the type of Kubernetes service to use for external - client connectivity. The default is to use a ClusterIP, which sets a stable - IP and port to use that is accessible only from within Kubernetes itself. - Depending on the service type chosen the user may need to set other config - knobs to further config it. These other knobs follow this one. More info: - https://kubernetes.io/docs/concepts/services-networking/service/#publishing-services-service-types' + - description: |- + Identifies the type of Kubernetes service to use for external client + connectivity. The default is to use a ClusterIP, which sets a stable IP + and port to use that is accessible only from within Kubernetes itself. + Depending on the service type chosen the user may need to set other + config knobs to further config it. These other knobs follow this one. + More info: https://kubernetes.io/docs/concepts/services-networking/service/#publishing-services-service-types displayName: Service Type path: temporarySubclusterRouting.template.serviceType x-descriptors: - urn:alm:descriptor:com.tectonic.ui:select:ClusterIP - urn:alm:descriptor:com.tectonic.ui:select:NodePort - urn:alm:descriptor:com.tectonic.ui:select:LoadBalancer - - description: State to indicate whether the operator must shut down the subcluster + - description: |- + State to indicate whether the operator must shut down the subcluster and not try to restart it. displayName: Shutdown path: temporarySubclusterRouting.template.shutdown - - description: "The number of pods that the subcluster will have. This determines - the number of Vertica nodes that it will have. Changing this number will - either delete or schedule new pods. \n The database has a k-safety of 1. - So, if this is a primary subcluster, the minimum value is 3. If this is - a secondary subcluster, the minimum is 0. \n Note, you must have a valid - license to pick a value larger than 3. The default license that comes in - the vertica container is for the community edition, which can only have - 3 nodes. The license can be set with the db.licenseSecret parameter." + - description: |- + The number of pods that the subcluster will have. This determines the + number of Vertica nodes that it will have. Changing this number will + either delete or schedule new pods. + + + The database has a k-safety of 1. So, if this is a primary subcluster, + the minimum value is 3. If this is a secondary subcluster, the minimum is + 0. + + + Note, you must have a valid license to pick a value larger than 3. The + default license that comes in the vertica container is for the community + edition, which can only have 3 nodes. The license can be set with the + db.licenseSecret parameter. displayName: Size path: temporarySubclusterRouting.template.size x-descriptors: - urn:alm:descriptor:com.tectonic.ui:podCount - - description: 'Any tolerations and taints to use to aid in where to schedule - a pod. More info: https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/' + - description: |- + Any tolerations and taints to use to aid in where to schedule a pod. + More info: https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/ displayName: Tolerations path: temporarySubclusterRouting.template.tolerations x-descriptors: - urn:alm:descriptor:com.tectonic.ui:advanced - - description: 'Indicates the type of subcluster it is. Valid values are: primary, - secondary or transient. Types are case-sensitive. You must have at least - one primary subcluster in the database. If type is omitted, it will default - to a primary. Transient should only be set internally by the operator during - online upgrade. It is used to indicate a subcluster that exists temporarily - to serve traffic for subclusters that are restarting with a new image.' + - description: |- + Indicates the type of subcluster it is. Valid values are: primary, + secondary or transient. Types are case-sensitive. + You must have at least one primary subcluster in the database. + If type is omitted, it will default to a primary. + Transient should only be set internally by the operator during online + upgrade. It is used to indicate a subcluster that exists temporarily to + serve traffic for subclusters that are restarting with a new image. displayName: Type path: temporarySubclusterRouting.template.type x-descriptors: - urn:alm:descriptor:com.tectonic.ui:select:primary - urn:alm:descriptor:com.tectonic.ui:select:secondary - - description: 'Like the clientNodePort parameter, except this controls the - node port to use for the http endpoint in the Vertica server. The same - rules apply: it must be defined within the range allocated by the control - plane, if omitted Kubernetes will choose the port automatically.' + - description: |- + Like the clientNodePort parameter, except this controls the node port to use + for the http endpoint in the Vertica server. The same rules apply: it + must be defined within the range allocated by the control plane, if + omitted Kubernetes will choose the port automatically. displayName: Vertica HTTPNode Port path: temporarySubclusterRouting.template.verticaHTTPNodePort x-descriptors: - urn:alm:descriptor:com.tectonic.ui:number - urn:alm:descriptor:com.tectonic.ui:advanced - - description: "This setting defines how the upgrade process will be managed. - The available values are Offline, ReadOnlyOnline, Online, and Auto. \n Offline: - This option involves taking down the entire cluster and then bringing it - back up with the new image. \n ReadOnlyOnline: With this option, the cluster - remains operational for reads during the upgrade process. However, the data - will be in read-only mode until the Vertica nodes from the primary subcluster - re-form the cluster with the new image. \n Online: Similar to Online, this - option keeps the cluster operational throughout the upgrade process but - allows writes. The cluster is split into two replicas, and traffic is redirected - to the active replica to facilitate writes. \n Auto: This option selects - one of the above methods automatically based on compatibility with the version - of Vertica you are running." + - description: |- + This setting defines how the upgrade process will be managed. The + available values are Offline, ReadOnlyOnline, Online, and Auto. + + + Offline: This option involves taking down the entire cluster and then + bringing it back up with the new image. + + + ReadOnlyOnline: With this option, the cluster remains operational for reads + during the upgrade process. However, the data will be in read-only mode + until the Vertica nodes from the primary subcluster re-form the cluster + with the new image. + + + Online: Similar to Online, this option keeps the cluster operational + throughout the upgrade process but allows writes. The cluster is split + into two replicas, and traffic is redirected to the active replica to + facilitate writes. + + + Auto: This option selects one of the above methods automatically based on + compatibility with the version of Vertica you are running. displayName: Upgrade Policy path: upgradePolicy x-descriptors: @@ -1103,19 +1388,24 @@ spec: - urn:alm:descriptor:com.tectonic.ui:select:ReadOnlyOnline - urn:alm:descriptor:com.tectonic.ui:select:Online - urn:alm:descriptor:com.tectonic.ui:select:Offline - - description: Additional volume mounts to include in the Vertica container. These + - description: |- + Additional volume mounts to include in the Vertica container. These reference volumes that are in the Volumes list. The mount path must not conflict with a mount path that the operator adds internally. displayName: Volume Mounts path: volumeMounts x-descriptors: - urn:alm:descriptor:com.tectonic.ui:advanced - - description: "Custom volumes that are added to sidecars and the Vertica container. - For these volumes to be visible in either container, they must have a corresonding - volumeMounts entry. For sidecars, this is included in `spec.sidecars[*].volumeMounts`. - \ For the Vertica container, it is included in `spec.volumeMounts`. \n This - accepts any valid volume type. A unique name must be given for each volume - and it cannot conflict with any of the internally generated volumes." + - description: |- + Custom volumes that are added to sidecars and the Vertica container. + For these volumes to be visible in either container, they must have a + corresonding volumeMounts entry. For sidecars, this is included in + `spec.sidecars[*].volumeMounts`. For the Vertica container, it is + included in `spec.volumeMounts`. + + + This accepts any valid volume type. A unique name must be given for each + volume and it cannot conflict with any of the internally generated volumes. displayName: Volumes path: volumes x-descriptors: @@ -1143,18 +1433,21 @@ spec: - description: Name of the sandbox that was defined in the spec displayName: Name path: sandboxes[0].name - - description: The names of subclusters that are currently a part of the given - sandbox. This is updated as subclusters become sandboxed or unsandboxed. + - description: |- + The names of subclusters that are currently a part of the given sandbox. + This is updated as subclusters become sandboxed or unsandboxed. displayName: Subclusters path: sandboxes[0].subclusters - description: State of the current running upgrade in the sandbox displayName: Upgrade State path: sandboxes[0].upgradeState - - description: UpgradeInProgress indicates if the sandbox is in the process + - description: |- + UpgradeInProgress indicates if the sandbox is in the process of having its image change displayName: Upgrade In Progress path: sandboxes[0].upgradeState.upgradeInProgress - - description: Status message for the current running upgrade. If no upgrade + - description: |- + Status message for the current running upgrade. If no upgrade is occurring, this message remains blank. displayName: Upgrade Status path: sandboxes[0].upgradeState.upgradeStatus @@ -1177,8 +1470,9 @@ spec: - description: This is set to true if /opt/vertica/config has been bootstrapped. displayName: Installed path: subclusters[0].detail[0].installed - - description: True means the vertica process is running on this pod and it - can accept connections on port 5433. + - description: |- + True means the vertica process is running on this pod and it can accept + connections on port 5433. displayName: Up Node path: subclusters[0].detail[0].upNode - description: This is the vnode name that Vertica internally assigned this @@ -1191,8 +1485,9 @@ spec: - description: Object ID of the subcluster. displayName: Oid path: subclusters[0].oid - - description: State of the subcluster. true means the subcluster was explicitly - shut down by the user and must not be restarted. + - description: |- + State of the subcluster. true means the subcluster was explicitly shut down by the user + and must not be restarted. displayName: Shutdown path: subclusters[0].shutdown - description: A count of the number of pods that have a running vertica process @@ -1202,7 +1497,8 @@ spec: - description: A count of the number of pods that have a running vertica process. displayName: Up Node Count path: upNodeCount - - description: Status message for the current running upgrade. If no upgrade + - description: |- + Status message for the current running upgrade. If no upgrade is occurring, this message remains blank. displayName: Upgrade Status path: upgradeStatus @@ -1223,28 +1519,34 @@ spec: name: "" version: apps/v1 specDescriptors: - - description: Custom annotations that will be added to all of the objects that - the operator will create. + - description: |- + Custom annotations that will be added to all of the objects that the + operator will create. displayName: Annotations path: annotations - - description: State to indicate whether the operator will restart Vertica if - the process is not running. Under normal cicumstances this is set to true. - The purpose of this is to allow a maintenance window, such as a manual upgrade, - without the operator interfering. + - description: |- + State to indicate whether the operator will restart Vertica if the + process is not running. Under normal cicumstances this is set to true. + The purpose of this is to allow a maintenance window, such as a + manual upgrade, without the operator interfering. displayName: Auto Restart Vertica path: autoRestartVertica x-descriptors: - urn:alm:descriptor:com.tectonic.ui:booleanSwitch - urn:alm:descriptor:com.tectonic.ui:advanced - - description: 'Secrets that will be mounted in the vertica container. The - purpose of this is to allow custom certs to be available. The full path - is: /certs// Where is the name provided - in the secret and is one of the keys in the secret.' + - description: |- + Secrets that will be mounted in the vertica container. The purpose of + this is to allow custom certs to be available. The full path is: + /certs// + Where is the name provided in the secret and is one + of the keys in the secret. displayName: Cert Secrets path: certSecrets x-descriptors: - urn:alm:descriptor:com.tectonic.ui:advanced - - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + - description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names displayName: Name path: certSecrets[0].name x-descriptors: @@ -1252,47 +1554,51 @@ spec: - description: Contains details about the communal storage. displayName: Communal path: communal - - description: Contains a map of server configuration parameters. To avoid duplicate - values, if a parameter is already set through another CR field, (like S3ServerSideEncryption - through communal.s3ServerSideEncryption), the corresponding key/value pair - is skipped. If a config value is set that isn't supported by the server - version you are running, the server will fail to start. These are set only - during initial bootstrap. After the database has been initialized, changing - the options in the CR will have no affect in the server. + - description: |- + Contains a map of server configuration parameters. + To avoid duplicate values, if a parameter is already set through another CR field, + (like S3ServerSideEncryption through communal.s3ServerSideEncryption), the corresponding + key/value pair is skipped. If a config value is set that isn't supported by the server version + you are running, the server will fail to start. These are set only during initial bootstrap. After + the database has been initialized, changing the options in the CR will have no affect in the server. displayName: Additional Config path: communal.additionalConfig - - description: The absolute path to a certificate bundle of trusted CAs. This - CA bundle is used when establishing TLS connections to external services - such as AWS, Azure or swebhdf:// scheme. Typically this would refer to - a path to one of the certSecrets. + - description: |- + The absolute path to a certificate bundle of trusted CAs. This CA bundle + is used when establishing TLS connections to external services such as + AWS, Azure or swebhdf:// scheme. Typically this would refer to a path to + one of the certSecrets. displayName: Ca File path: communal.caFile - description: "The name of a secret that contains the credentials to connect - to the communal endpoint (only applies to s3://, gs:// or azb://). Certain - keys need to be set, depending on the endpoint type: - s3:// or gs:// - - If storing credentials in a secret, the secret must have the following keys - set: accesskey and secretkey. When using Google Cloud Storage, the IDs - set in the secret are taken from the hash-based message authentication code - (HMAC) keys. - azb:// - It must have the following keys set: accountName - - Name of the Azure account blobEndpoint - (Optional) Set this to the location - of the endpoint. If using an emulator like Azurite, it can be set to something - like 'http://:' accountKey - If accessing with an account - key set it here sharedAccessSignature - If accessing with a shared access - signature, set it here \n This field is optional. For AWS, authentication - to communal storage can be provided through an attached IAM profile: attached - to the EC2 instance or to a ServiceAccount with IRSA (see https://docs.aws.amazon.com/eks/latest/userguide/iam-roles-for-service-accounts.html). - IRSA requires a Vertica server running at least with version >= 12.0.3." + to the\ncommunal endpoint (only applies to s3://, gs:// or azb://). Certain + keys\nneed to be set, depending on the endpoint type:\n- s3:// or gs:// + - If storing credentials in a secret, the secret must\n have the following + keys set: accesskey and secretkey. When using\n Google Cloud Storage, + the IDs set in the secret are taken\n from the hash-based message authentication + code (HMAC) keys.\n- azb:// - It must have the following keys set:\n accountName + - Name of the Azure account\n blobEndpoint - (Optional) Set this to the + location of the endpoint.\n If using an emulator like Azurite, it can + be set to something like\n 'http://:'\n accountKey + - If accessing with an account key set it here\n sharedAccessSignature + - If accessing with a shared access signature,\n \t set it here\n\n\nThis + field is optional. For AWS, authentication to communal storage can\nbe provided + through an attached IAM profile: attached to the EC2 instance\nor to a ServiceAccount + with IRSA (see\nhttps://docs.aws.amazon.com/eks/latest/userguide/iam-roles-for-service-accounts.html).\nIRSA + requires a Vertica server running at least with version >= 12.0.3." displayName: Credential Secret path: communal.credentialSecret x-descriptors: - urn:alm:descriptor:io.kubernetes:Secret - - description: The URL to the communal endpoint. The endpoint must be prefaced - with http:// or https:// to know what protocol to connect with. If using - S3 or Google Cloud Storage as communal storage and initPolicy is Create - or Revive, this field is required and cannot change after creation. + - description: |- + The URL to the communal endpoint. The endpoint must be prefaced with http:// or + https:// to know what protocol to connect with. If using S3 or Google + Cloud Storage as communal storage and initPolicy is Create or Revive, + this field is required and cannot change after creation. displayName: Endpoint path: communal.endpoint - - description: A config map that contains the contents of the /etc/hadoop directory. + - description: |- + A config map that contains the contents of the /etc/hadoop directory. This gets mounted in the container and is used to configure connections to an HDFS communal path displayName: Hadoop Config @@ -1300,57 +1606,62 @@ spec: x-descriptors: - urn:alm:descriptor:io.kubernetes:ConfigMap - urn:alm:descriptor:com.tectonic.ui:advanced - - description: If true, the operator will include the VerticaDB's UID in the - path. This option exists if you reuse the communal path in the same endpoint - as it forces each database path to be unique. + - description: |- + If true, the operator will include the VerticaDB's UID in the path. This + option exists if you reuse the communal path in the same endpoint as it + forces each database path to be unique. displayName: Include UIDIn Path path: communal.includeUIDInPath x-descriptors: - urn:alm:descriptor:com.tectonic.ui:booleanSwitch - urn:alm:descriptor:com.tectonic.ui:advanced - - description: Name of the Kerberos realm. This is set in the database config - parameter KerberosRealm during bootstrapping. + - description: |- + Name of the Kerberos realm. This is set in the database config parameter + KerberosRealm during bootstrapping. displayName: Kerberos Realm path: communal.kerberosRealm x-descriptors: - urn:alm:descriptor:com.tectonic.ui:advanced - - description: The service name portion of the Vertica Kerberos principal. This - is set in the database config parameter KerberosServiceName during bootstrapping. + - description: |- + The service name portion of the Vertica Kerberos principal. This is set + in the database config parameter KerberosServiceName during bootstrapping. displayName: Kerberos Service Name path: communal.kerberosServiceName x-descriptors: - urn:alm:descriptor:com.tectonic.ui:advanced - - description: The path to the communal storage. We support S3, Google Cloud - Storage, and HDFS paths. The protocol in the path (e.g. s3:// or webhdfs://) - dictates the type of storage. The path, whether it be a S3 bucket or HDFS - path, must exist prior to creating the VerticaDB. When initPolicy is Create, - this field is required and the path must be empty. When initPolicy is Revive, - this field is required and must be non-empty. + - description: |- + The path to the communal storage. We support S3, Google Cloud Storage, + and HDFS paths. The protocol in the path (e.g. s3:// or webhdfs://) + dictates the type of storage. The path, whether it be a S3 bucket or + HDFS path, must exist prior to creating the VerticaDB. When initPolicy + is Create, this field is required and the path must be empty. When + initPolicy is Revive, this field is required and must be non-empty. displayName: Path path: communal.path - - description: The region containing the bucket. If you do not set the correct + - description: |- + The region containing the bucket. If you do not set the correct region, you might experience a delay before the bootstrap fails because Vertica retries several times before giving up. displayName: Region path: communal.region - - description: 'The server-side encryption type Vertica will use to read/write - from encrypted S3 communal storage. Available values are: SSE-S3, SSE-KMS, - SSE-C and empty string (""). - SSE-S3: the S3 service manages encryption - keys. - SSE-KMS: encryption keys are managed by the Key Management Service - (KMS). KMS key identifier must be supplied through communal.additionalConfig - map. - SSE-C: the client manages encryption keys and provides them to S3 - for each operation. The client key must be supplied through communal.s3SseCustomerKeySecret. - - Empty string (""): No encryption. This is the default value. This value - cannot change after the initial creation of the VerticaDB.' + - description: "The server-side encryption type Vertica will use to read/write + from encrypted S3 communal storage.\nAvailable values are: SSE-S3, SSE-KMS, + SSE-C and empty string (\"\").\n- SSE-S3: the S3 service manages encryption + keys.\n- SSE-KMS: encryption keys are managed by the Key Management Service + (KMS).\n\t KMS key identifier must be supplied through communal.additionalConfig + map.\n- SSE-C: the client manages encryption keys and provides them to S3 + for each operation.\n\t The client key must be supplied through communal.s3SseCustomerKeySecret.\n- + Empty string (\"\"): No encryption. This is the default value.\nThis value + cannot change after the initial creation of the VerticaDB." displayName: S3 Server Side Encryption path: communal.s3ServerSideEncryption x-descriptors: - urn:alm:descriptor:com.tectonic.ui:select:SSE-S3 - urn:alm:descriptor:com.tectonic.ui:select:SSE-KMS - urn:alm:descriptor:com.tectonic.ui:select:SSE-C - - description: The name of a secret that contains the key to use for the S3SseCustomerKey - config option in the server. It is required when S3ServerSideEncryption - is SSE-C. When set, the secret must have a key named clientKey. + - description: |- + The name of a secret that contains the key to use for the S3SseCustomerKey config option in the server. + It is required when S3ServerSideEncryption is SSE-C. When set, the secret must have a key named clientKey. displayName: S3 Sse Customer Key Secret path: communal.s3SseCustomerKeySecret x-descriptors: @@ -1361,51 +1672,59 @@ spec: path: dbName x-descriptors: - urn:alm:descriptor:com.tectonic.ui:advanced - - description: Controls if the spread communication between pods is encrypted. Valid - values are 'vertica' or an empty string if not enabled. When set to 'vertica', - Vertica generates the spread encryption key for the cluster when the database - starts up. This can only be set during initial creation of the CR. If - set for initPolicy other than Create, then it has no effect. + - description: |- + Controls if the spread communication between pods is encrypted. Valid + values are 'vertica' or an empty string if not enabled. When set to + 'vertica', Vertica generates the spread encryption key for the cluster + when the database starts up. This can only be set during initial + creation of the CR. If set for initPolicy other than Create, then it has + no effect. displayName: Encrypt Spread Comm path: encryptSpreadComm x-descriptors: - urn:alm:descriptor:com.tectonic.ui:advanced - - description: 'Deprecated: setup of TLS certs for http access is controlled - by the deployment type now.' + - description: |- + Deprecated: setup of TLS certs for http access is controlled by the + deployment type now. displayName: Deprecated HTTPServer Mode path: httpServerMode x-descriptors: - urn:alm:descriptor:com.tectonic.ui:hidden - - description: 'A secret that contains the TLS credentials to use for the node - management agent. If this is empty, the operator will create a secret to - use and add the name of the generate secret in this field. When set, the - secret must have the following keys defined: - tls.key: The private key - to be used by the HTTP server - tls.crt: The signed certificate chain for - the private key - ca.crt: The CA certificate' + - description: |- + A secret that contains the TLS credentials to use for the node management + agent. If this is empty, the operator will create a secret to use and + add the name of the generate secret in this field. When set, the secret + must have the following keys defined: + - tls.key: The private key to be used by the HTTP server + - tls.crt: The signed certificate chain for the private key + - ca.crt: The CA certificate displayName: HTTPServer TLSSecret path: httpServerTLSSecret x-descriptors: - urn:alm:descriptor:com.tectonic.ui:hidden - - description: Ignore the cluster lease when doing a revive or start_db. Use - this with caution, as ignoring the cluster lease when another system is - using the same communal storage will cause corruption. + - description: |- + Ignore the cluster lease when doing a revive or start_db. Use this with + caution, as ignoring the cluster lease when another system is using the + same communal storage will cause corruption. displayName: Ignore Cluster Lease path: ignoreClusterLease x-descriptors: - urn:alm:descriptor:com.tectonic.ui:booleanSwitch - urn:alm:descriptor:com.tectonic.ui:advanced - - description: When set to False, this parameter will ensure that when changing - the vertica version that we follow the upgrade path. The Vertica upgrade + - description: |- + When set to False, this parameter will ensure that when changing the + vertica version that we follow the upgrade path. The Vertica upgrade path means you cannot downgrade a Vertica release, nor can you skip any released Vertica versions when upgrading. displayName: Ignore Upgrade Path path: ignoreUpgradePath x-descriptors: - urn:alm:descriptor:com.tectonic.ui:hidden - - description: The docker image name that contains the Vertica server. Whenever - this changes, the operator treats this as an upgrade. The upgrade can be - done either in an online or offline fashion. See the upgradePolicy to understand - how to control the behavior. + - description: |- + The docker image name that contains the Vertica server. Whenever this + changes, the operator treats this as an upgrade. The upgrade can be done + either in an online or offline fashion. See the upgradePolicy to + understand how to control the behavior. displayName: Image path: image - description: This dictates the image pull policy to use @@ -1413,19 +1732,24 @@ spec: path: imagePullPolicy x-descriptors: - urn:alm:descriptor:com.tectonic.ui:imagePullPolicy - - description: 'ImagePullSecrets is an optional list of references to secrets - in the same namespace to use for pulling the image. If specified, these - secrets will be passed to individual puller implementations for them to - use. For example, in the case of docker, only DockerConfig type secrets - are honored. More info: https://kubernetes.io/docs/concepts/containers/images#specifying-imagepullsecrets-on-a-pod' + - description: |- + ImagePullSecrets is an optional list of references to secrets in the same + namespace to use for pulling the image. If specified, these secrets will + be passed to individual puller implementations for them to use. For + example, in the case of docker, only DockerConfig type secrets are + honored. + More info: https://kubernetes.io/docs/concepts/containers/images#specifying-imagepullsecrets-on-a-pod displayName: Image Pull Secrets path: imagePullSecrets - - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + - description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names displayName: Name path: imagePullSecrets[0].name x-descriptors: - urn:alm:descriptor:io.kubernetes:Secret - - description: The initialization policy defines how to setup the database. Available + - description: |- + The initialization policy defines how to setup the database. Available options are to create a new database or revive an existing one. displayName: Init Policy path: initPolicy @@ -1433,46 +1757,54 @@ spec: - urn:alm:descriptor:com.tectonic.ui:select:Create - urn:alm:descriptor:com.tectonic.ui:select:Revive - urn:alm:descriptor:com.tectonic.ui:select:ScheduleOnly - - description: "Sets the fault tolerance for the cluster. Allowable values - are 0 or 1. 0 is only suitable for test environments because we have no - fault tolerance and the cluster can only have between 1 and 3 pods. If - set to 1, we have fault tolerance if nodes die and the cluster has a minimum - of 3 pods. \n This value cannot change after the initial creation of the - VerticaDB." + - description: |- + Sets the fault tolerance for the cluster. Allowable values are 0 or 1. 0 is only + suitable for test environments because we have no fault tolerance and the cluster + can only have between 1 and 3 pods. If set to 1, we have fault tolerance if nodes + die and the cluster has a minimum of 3 pods. + + + This value cannot change after the initial creation of the VerticaDB. displayName: KSafety path: kSafety x-descriptors: - urn:alm:descriptor:com.tectonic.ui:select:0 - urn:alm:descriptor:com.tectonic.ui:select:1 - urn:alm:descriptor:com.tectonic.ui:advanced - - description: 'A secret that contains files required for Kereberos setup. The - secret must have the following keys: - krb5.conf: The contents of the Kerberos - config file - krb5.keytab: The keytab file that stores credentials for each - Vertica principal. These files will be mounted in /etc. We use the same - keytab file on each host, so it must contain all of the Vertica principals.' + - description: |- + A secret that contains files required for Kereberos setup. The secret + must have the following keys: + - krb5.conf: The contents of the Kerberos config file + - krb5.keytab: The keytab file that stores credentials for each Vertica principal. + These files will be mounted in /etc. We use the same keytab file on each + host, so it must contain all of the Vertica principals. displayName: Kerberos Secret path: kerberosSecret x-descriptors: - urn:alm:descriptor:io.kubernetes:Secret - urn:alm:descriptor:com.tectonic.ui:advanced - - description: Custom labels that will be added to all of the objects that the - operator will create. + - description: |- + Custom labels that will be added to all of the objects that the operator + will create. displayName: Labels path: labels - - description: The name of a secret that contains the contents of license files. - The secret must be in the same namespace as the CRD. Each of the keys in - the secret will be mounted as files in /home/dbadmin/licensing/mnt. If this + - description: |- + The name of a secret that contains the contents of license files. The + secret must be in the same namespace as the CRD. Each of the keys in the + secret will be mounted as files in /home/dbadmin/licensing/mnt. If this is set prior to creating a database, it will include one of the licenses - from the secret -- if there are multiple licenses it will pick one by selecting - the first one alphabetically. The user is responsible for installing any - additional licenses or if the license was added to the secret after DB creation. + from the secret -- if there are multiple licenses it will pick one by + selecting the first one alphabetically. The user is responsible for + installing any additional licenses or if the license was added to the + secret after DB creation. displayName: License Secret path: licenseSecret x-descriptors: - urn:alm:descriptor:io.kubernetes:Secret - - description: Allows tuning of the Vertica pods liveness probe. Each of the - values here are applied to the default liveness probe we create. If this - is omitted, we use the default probe. + - description: |- + Allows tuning of the Vertica pods liveness probe. Each of the values + here are applied to the default liveness probe we create. If this is + omitted, we use the default probe. displayName: Liveness Probe Override path: livenessProbeOverride x-descriptors: @@ -1480,65 +1812,94 @@ spec: - description: Contain details about the local storage displayName: Local path: local - - description: The path in the container to the catalog. When initializing - the database with revive, this path must match the catalog path used when - the database was first created. For backwards compatibility, if this is - omitted, then it shares the same path as the dataPath. + - description: |- + The path in the container to the catalog. When initializing the database with + revive, this path must match the catalog path used when the database was + first created. For backwards compatibility, if this is omitted, then it + shares the same path as the dataPath. displayName: Catalog Path path: local.catalogPath - - description: The path in the container to the local directory for the 'DATA,TEMP' + - description: |- + The path in the container to the local directory for the 'DATA,TEMP' storage location usage. When initializing the database with revive, the local path here must match the path that was used when the database was first created. displayName: Data Path path: local.dataPath - - description: The path in the container to the depot -- 'DEPOT' storage location - usage. When initializing the database with revive, this path must match - the depot path used when the database was first created. + - description: |- + The path in the container to the depot -- 'DEPOT' storage location usage. + When initializing the database with revive, this path must match the + depot path used when the database was first created. displayName: Depot Path path: local.depotPath - - description: 'The type of volume to use for the depot. Allowable values will - be: EmptyDir and PersistentVolume or an empty string. An empty string currently - defaults to PersistentVolume.' + - description: |- + The type of volume to use for the depot. + Allowable values will be: EmptyDir and PersistentVolume or an empty string. + An empty string currently defaults to PersistentVolume. displayName: Depot Volume path: local.depotVolume x-descriptors: - urn:alm:descriptor:com.tectonic.ui:select:PersistentVolume - urn:alm:descriptor:com.tectonic.ui:select:EmptyDir - - description: The minimum size of the local data volume when picking a PV. If - changing this after the PV have been created, two things may happen. First, - it will cause a resize of the PV to the new size. And, if depot is stored - in the PV, a resize of the depot happens too. + - description: |- + The minimum size of the local data volume when picking a PV. If changing + this after the PV have been created, two things may happen. First, it + will cause a resize of the PV to the new size. And, if depot is + stored in the PV, a resize of the depot happens too. displayName: Request Size path: local.requestSize - - description: The local data stores the local catalog, depot and config files. - Portions of the local data are persisted with a persistent volume (PV) using - a persistent volume claim (PVC). The catalog and config files are always + - description: |- + The local data stores the local catalog, depot and config files. Portions + of the local data are persisted with a persistent volume (PV) using a + persistent volume claim (PVC). The catalog and config files are always stored in the PV. The depot may be include too if depotVolume is set to 'PersistentVolume'. This field is used to define the name of the storage - class to use for the PV. This will be set when creating the PVC. By default, - it is not set. This means that that the PVC we create will have the default - storage class set in Kubernetes. + class to use for the PV. This will be set when creating the PVC. By + default, it is not set. This means that that the PVC we create will have + the default storage class set in Kubernetes. displayName: Storage Class path: local.storageClass x-descriptors: - urn:alm:descriptor:io.kubernetes:StorageClass - - description: This can be used to override any pod-level securityContext for - the Vertica pod. It will be merged with the default context. If omitted, - then the default context is used. + - description: |- + This can be used to override any pod-level securityContext for the + Vertica pod. It will be merged with the default context. If omitted, then + the default context is used. displayName: Pod Security Context path: podSecurityContext x-descriptors: - urn:alm:descriptor:com.tectonic.ui:hidden - - description: Allows tuning of the Vertica pods readiness probe. Each of the - values here are applied to the default readiness probe we create. If this - is omitted, we use the default probe. + - description: |- + Create client proxy pods for the subcluster if defined + All incoming connections to the subclusters will be routed through the proxy pods + displayName: Proxy + path: proxy + - description: The docker image name that contains the Vertica proxy server. + displayName: Image + path: proxy.image + - description: |- + A secret that contains the TLS credentials to use for Vertica's client + proxy. If this is empty, the operator will create a secret to use and + add the name of the generate secret in this field. + When set, the secret must have the following keys defined: tls.key, + tls.crt and ca.crt. To store this secret outside of Kubernetes, you can + use a secret path reference prefix, such as gsm://. Everything after the + prefix is the name of the secret in the service you are storing. + displayName: TLSSecret + path: proxy.tlsSecret + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:hidden + - description: |- + Allows tuning of the Vertica pods readiness probe. Each of the values + here are applied to the default readiness probe we create. If this is + omitted, we use the default probe. displayName: Readiness Probe Override path: readinessProbeOverride x-descriptors: - urn:alm:descriptor:com.tectonic.ui:hidden - - description: If a reconciliation iteration needs to be requeued this controls - the amount of time in seconds to wait. If this is set to 0, then the requeue + - description: |- + If a reconciliation iteration needs to be requeued this controls the + amount of time in seconds to wait. If this is set to 0, then the requeue time will increase using an exponential backoff algorithm. Caution, when setting this to some positive value the exponential backoff is disabled. This should be reserved for test environments as an error scenario could @@ -1548,8 +1909,9 @@ spec: x-descriptors: - urn:alm:descriptor:com.tectonic.ui:number - urn:alm:descriptor:com.tectonic.ui:advanced - - description: The timeout, in seconds, to use when admintools restarts a node - or the entire cluster. If omitted, we use the admintools default timeout + - description: |- + The timeout, in seconds, to use when admintools restarts a node or the + entire cluster. If omitted, we use the admintools default timeout of 20 minutes. displayName: Restart Timeout path: restartTimeout @@ -1563,42 +1925,56 @@ spec: x-descriptors: - urn:alm:descriptor:com.tectonic.ui:fieldDependency:initPolicy:Revive - urn:alm:descriptor:com.tectonic.ui:advanced - - description: Name of the restore archive to use for bootstrapping. This name - refers to an object in the database. This must be specified if initPolicy - is Revive and a restore is intended. + - description: |- + Name of the restore archive to use for bootstrapping. + This name refers to an object in the database. + This must be specified if initPolicy is Revive and a restore is intended. displayName: Archive path: restorePoint.archive - - description: The identifier of the restore point in the restore archive to - restore from. Specify either index or id exclusively; one of these fields - is mandatory, but both cannot be used concurrently. + - description: |- + The identifier of the restore point in the restore archive to restore from. + Specify either index or id exclusively; one of these fields is mandatory, but both cannot be used concurrently. displayName: ID path: restorePoint.id - - description: The (1-based) index of the restore point in the restore archive - to restore from. Specify either index or id exclusively; one of these fields - is mandatory, but both cannot be used concurrently. + - description: |- + The (1-based) index of the restore point in the restore archive to restore from. + Specify either index or id exclusively; one of these fields is mandatory, but both cannot be used concurrently. displayName: Index path: restorePoint.index x-descriptors: - urn:alm:descriptor:com.tectonic.ui:number - - description: "This specifies the order of nodes when doing a revive. Each - entry contains an index to a subcluster, which is an index in Subclusters[], - and a pod count of the number of pods include from the subcluster. \n For - example, suppose the database you want to revive has the following setup: - v_db_node0001: subcluster A v_db_node0002: subcluster A v_db_node0003: subcluster - B v_db_node0004: subcluster A v_db_node0005: subcluster B v_db_node0006: - subcluster B \n And the Subcluster[] array is defined as {'A', 'B'}. The - revive order would be: - {subclusterIndex:0, podCount:2} # 2 pods from - subcluster A - {subclusterIndex:1, podCount:1} # 1 pod from subcluster - B - {subclusterIndex:0, podCount:1} # 1 pod from subcluster A - {subclusterIndex:1, - podCount:2} # 2 pods from subcluster B \n If InitPolicy is not Revive, - this field can be ignored." + - description: |- + This specifies the order of nodes when doing a revive. Each entry + contains an index to a subcluster, which is an index in Subclusters[], + and a pod count of the number of pods include from the subcluster. + + + For example, suppose the database you want to revive has the following setup: + v_db_node0001: subcluster A + v_db_node0002: subcluster A + v_db_node0003: subcluster B + v_db_node0004: subcluster A + v_db_node0005: subcluster B + v_db_node0006: subcluster B + + + And the Subcluster[] array is defined as {'A', 'B'}. The revive order + would be: + - {subclusterIndex:0, podCount:2} # 2 pods from subcluster A + - {subclusterIndex:1, podCount:1} # 1 pod from subcluster B + - {subclusterIndex:0, podCount:1} # 1 pod from subcluster A + - {subclusterIndex:1, podCount:2} # 2 pods from subcluster B + + + If InitPolicy is not Revive, this field can be ignored. displayName: Revive Order path: reviveOrder x-descriptors: - urn:alm:descriptor:com.tectonic.ui:fieldDependency:initPolicy:Revive - urn:alm:descriptor:com.tectonic.ui:advanced - - description: The number of pods paired with this subcluster. If this is omitted - then, all remaining pods in the subcluster will be used. + - description: |- + The number of pods paired with this subcluster. If this is omitted then, + all remaining pods in the subcluster will be used. displayName: Pod Count path: reviveOrder[0].podCount x-descriptors: @@ -1611,83 +1987,93 @@ spec: path: sandboxes x-descriptors: - urn:alm:descriptor:com.tectonic.ui:advanced - - description: The name of the image to use for the sandbox. If omitted, the - image is inherited from the spec.image field. + - description: |- + The name of the image to use for the sandbox. If omitted, the image + is inherited from the spec.image field. displayName: Image path: sandboxes[0].image - - description: This is the name of a sandbox. This is a required parameter. - This cannot change once the sandbox is created. + - description: |- + This is the name of a sandbox. This is a required parameter. This cannot + change once the sandbox is created. displayName: Name path: sandboxes[0].name - - description: State to indicate whether the operator must shut down the sandbox + - description: |- + State to indicate whether the operator must shut down the sandbox and not try to restart it. When true, stop_db will be performed on the sandbox and the operator will not try start_db on the sandbox. displayName: Shutdown path: sandboxes[0].shutdown - - description: This is the subcluster names that are part of the sandbox. There - must be at least one subcluster listed. All subclusters listed need to be - secondary subclusters. + - description: |- + This is the subcluster names that are part of the sandbox. + There must be at least one subcluster listed. All subclusters + listed need to be secondary subclusters. displayName: Subclusters path: sandboxes[0].subclusters - description: The name of a subcluster. displayName: Name path: sandboxes[0].subclusters[0].name - - description: Allows you to set any additional securityContext for the Vertica - server container. We merge the values with the securityContext generated - by the operator. The operator adds its own capabilities to this. If you - want those capabilities to be removed you must explicitly include them in - the drop list. + - description: |- + Allows you to set any additional securityContext for the Vertica server + container. We merge the values with the securityContext generated by the + operator. The operator adds its own capabilities to this. If you want + those capabilities to be removed you must explicitly include them in the + drop list. displayName: Security Context path: securityContext x-descriptors: - urn:alm:descriptor:com.tectonic.ui:hidden - - description: The name of a serviceAccount to use to run the Vertica pods. - If the service account is not specified or does not exist, the operator - will create one, using the specified name if provided, along with a Role - and RoleBinding. + - description: |- + The name of a serviceAccount to use to run the Vertica pods. If the + service account is not specified or does not exist, the operator will + create one, using the specified name if provided, along with a Role and + RoleBinding. displayName: Service Account Name path: serviceAccountName x-descriptors: - urn:alm:descriptor:io.kubernetes:ServiceAccount - urn:alm:descriptor:com.tectonic.ui:advanced - - description: 'The number of shards to create in the database. This cannot - be updated once the CRD is created. Refer to this page to determine an - optimal size: https://www.vertica.com/docs/latest/HTML/Content/Authoring/Eon/SizingEonCluster.htm - The default was chosen using this link and the default subcluster size of - 3.' + - description: |- + The number of shards to create in the database. This cannot be updated + once the CRD is created. Refer to this page to determine an optimal size: + https://www.vertica.com/docs/latest/HTML/Content/Authoring/Eon/SizingEonCluster.htm + The default was chosen using this link and the default subcluster size of 3. displayName: Shard Count path: shardCount x-descriptors: - urn:alm:descriptor:com.tectonic.ui:number - - description: Optional sidecar containers that run along side the vertica server. The - operator adds the same volume mounts that are in the vertica server container - to each sidecar container. + - description: |- + Optional sidecar containers that run along side the vertica server. The + operator adds the same volume mounts that are in the vertica server + container to each sidecar container. displayName: Sidecars path: sidecars x-descriptors: - urn:alm:descriptor:com.tectonic.ui:advanced - - description: 'An optional secret that has the files for /home/dbadmin/.ssh. If - this is omitted, the ssh files from the image are used. You can this option - if you have a cluster that talks to Vertica notes outside of Kubernetes, - as it has the public keys to be able to ssh to those nodes. It must have - the following keys present: id_rsa, id_rsa.pub and authorized_keys.' + - description: |- + An optional secret that has the files for /home/dbadmin/.ssh. If this is + omitted, the ssh files from the image are used. You can this option if + you have a cluster that talks to Vertica notes outside of Kubernetes, as + it has the public keys to be able to ssh to those nodes. It must have + the following keys present: id_rsa, id_rsa.pub and authorized_keys. displayName: SSHSecret path: sshSecret x-descriptors: - urn:alm:descriptor:io.kubernetes:Secret - urn:alm:descriptor:com.tectonic.ui:advanced - - description: Allows tuning of the Vertica pods startup probe. Each of the - values here are applied to the default startup probe we create. If this - is omitted, we use the default probe. + - description: |- + Allows tuning of the Vertica pods startup probe. Each of the values + here are applied to the default startup probe we create. If this is + omitted, we use the default probe. displayName: Startup Probe Override path: startupProbeOverride x-descriptors: - urn:alm:descriptor:com.tectonic.ui:hidden - displayName: Subclusters path: subclusters - - description: 'Like nodeSelector this allows you to constrain the pod only - to certain pods. It is more expressive than just using node selectors. More - info: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#affinity-and-anti-affinity' + - description: |- + Like nodeSelector this allows you to constrain the pod only to certain + pods. It is more expressive than just using node selectors. + More info: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#affinity-and-anti-affinity displayName: Affinity path: subclusters[0].affinity - description: Describes node affinity scheduling rules for the pod. @@ -1707,79 +2093,114 @@ spec: path: subclusters[0].affinity.podAntiAffinity x-descriptors: - urn:alm:descriptor:com.tectonic.ui:podAntiAffinity - - description: A map of key/value pairs appended to the stateful metadata.annotations - of the subcluster. + - description: |- + A map of key/value pairs appended to the stateful metadata.annotations of + the subcluster. displayName: Annotations path: subclusters[0].annotations - - description: 'Allows the service object to be attached to a list of external - IPs that you specify. If not set, the external IP list is left empty in - the service object. More info: https://kubernetes.io/docs/concepts/services-networking/service/#external-ips' + - description: |- + Allows the service object to be attached to a list of external IPs that you + specify. If not set, the external IP list is left empty in the service object. + More info: https://kubernetes.io/docs/concepts/services-networking/service/#external-ips displayName: External IPs path: subclusters[0].externalIPs x-descriptors: - urn:alm:descriptor:com.tectonic.ui:advanced - - description: This allows a different image to be used for the subcluster than - the one in VerticaDB. This is intended to be used internally by the online - image change process. + - description: |- + This allows a different image to be used for the subcluster than the one + in VerticaDB. This is intended to be used internally by the online image + change process. displayName: Image Override path: subclusters[0].imageOverride x-descriptors: - urn:alm:descriptor:com.tectonic.ui:hidden - - description: Indicates whether the subcluster is a primary or secondary. You - must have at least one primary subcluster in the database. + - description: |- + Indicates whether the subcluster is a primary or secondary. You must have + at least one primary subcluster in the database. displayName: Is Primary path: subclusters[0].isPrimary x-descriptors: - urn:alm:descriptor:com.tectonic.ui:booleanSwitch - - description: A sandbox primary subcluster is a secondary subcluster that was - the first subcluster in a sandbox. These subclusters are primaries when - they are sandboxed. When unsandboxed, they will go back to being just a - secondary subcluster + - description: |- + A sandbox primary subcluster is a secondary subcluster that was the first + subcluster in a sandbox. These subclusters are primaries when they are + sandboxed. When unsandboxed, they will go back to being just a secondary + subcluster displayName: Is Sandbox Primary path: subclusters[0].isSandboxPrimary x-descriptors: - urn:alm:descriptor:com.tectonic.ui:hidden - - description: Internal state that indicates whether this is a transient read-only - subcluster used for online upgrade. A subcluster that exists temporarily - to serve traffic for subclusters that are restarting with the new image. + - description: |- + Internal state that indicates whether this is a transient read-only + subcluster used for online upgrade. A subcluster that exists + temporarily to serve traffic for subclusters that are restarting with the + new image. displayName: Is Transient path: subclusters[0].isTransient x-descriptors: - urn:alm:descriptor:com.tectonic.ui:hidden - - description: 'Specify IP address of LoadBalancer service for this subcluster. - This field is ignored when serviceType != "LoadBalancer". More info: https://kubernetes.io/docs/concepts/services-networking/service/#loadbalancer' + - description: |- + Specify IP address of LoadBalancer service for this subcluster. + This field is ignored when serviceType != "LoadBalancer". + More info: https://kubernetes.io/docs/concepts/services-networking/service/#loadbalancer displayName: Load Balancer IP path: subclusters[0].loadBalancerIP x-descriptors: - urn:alm:descriptor:com.tectonic.ui:advanced - - description: The name of the subcluster. This is a required parameter. This - cannot change after CRD creation. + - description: |- + The name of the subcluster. This is a required parameter. This cannot + change after CRD creation. displayName: Name path: subclusters[0].name - - description: When setting serviceType to NodePort, this parameter allows you - to define the port that is opened at each node for Vertica client connections. - If using NodePort and this is omitted, Kubernetes will choose the port automatically. - This port must be from within the defined range allocated by the control - plane (default is 30000-32767). + - description: |- + When setting serviceType to NodePort, this parameter allows you to define the + port that is opened at each node for Vertica client connections. If using + NodePort and this is omitted, Kubernetes will choose the port + automatically. This port must be from within the defined range allocated + by the control plane (default is 30000-32767). displayName: Node Port path: subclusters[0].nodePort x-descriptors: - urn:alm:descriptor:com.tectonic.ui:number - urn:alm:descriptor:com.tectonic.ui:advanced - - description: 'A map of label keys and values to restrict Vertica node scheduling - to workers with matching labels. More info: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodeselector' + - description: |- + A map of label keys and values to restrict Vertica node scheduling to workers + with matching labels. + More info: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodeselector displayName: Node Selector path: subclusters[0].nodeSelector - - description: 'The priority class name given to pods in this subcluster. This - affects where the pod gets scheduled. More info: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/#priorityclass' + - description: |- + The priority class name given to pods in this subcluster. This affects + where the pod gets scheduled. + More info: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/#priorityclass displayName: Priority Class Name path: subclusters[0].priorityClassName x-descriptors: - urn:alm:descriptor:com.tectonic.ui:advanced - - description: 'This defines the resource requests and limits for pods in the - subcluster. It is advisable that the request and limits match as this ensures - the pods are assigned to the guaranteed QoS class. This will reduces the - chance that pods are chosen by the OOM killer. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + - description: |- + Create client proxy pods for the subcluster if defined + All incoming connections to the subclusters will be routed through the proxy pods + displayName: Proxy + path: subclusters[0].proxy + - description: The number of replicas that the proxy server will have. + displayName: Replicas + path: subclusters[0].proxy.replicas + - description: |- + This defines the resource requests and limits for the client proxy pods in the subcluster. + It is advisable that the request and limits match as this ensures the + pods are assigned to the guaranteed QoS class. This will reduces the + chance that pods are chosen by the OOM killer. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + displayName: Resources + path: subclusters[0].proxy.resources + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:resourceRequirements + - description: |- + This defines the resource requests and limits for pods in the subcluster. + It is advisable that the request and limits match as this ensures the + pods are assigned to the guaranteed QoS class. This will reduces the + chance that pods are chosen by the OOM killer. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ displayName: Resources path: subclusters[0].resources x-descriptors: @@ -1787,91 +2208,108 @@ spec: - description: A map of key/value pairs appended to service metadata.annotations. displayName: Service Annotations path: subclusters[0].serviceAnnotations - - description: Identifies the name of the service object that will serve this - subcluster. If multiple subclusters share the same service name then they - all share the same service object. This allows for a single service object - to round robin between multiple subclusters. If this is left blank, a service - object matching the subcluster name is used. The actual name of the service - object is always prefixed with the name of the owning VerticaDB. + - description: |- + Identifies the name of the service object that will serve this + subcluster. If multiple subclusters share the same service name then + they all share the same service object. This allows for a single service + object to round robin between multiple subclusters. If this is left + blank, a service object matching the subcluster name is used. The actual + name of the service object is always prefixed with the name of the owning + VerticaDB. displayName: Service Name path: subclusters[0].serviceName x-descriptors: - urn:alm:descriptor:com.tectonic.ui:advanced - - description: 'Identifies the type of Kubernetes service to use for external - client connectivity. The default is to use a ClusterIP, which sets a stable - IP and port to use that is accessible only from within Kubernetes itself. - Depending on the service type chosen the user may need to set other config - knobs to further config it. These other knobs follow this one. More info: - https://kubernetes.io/docs/concepts/services-networking/service/#publishing-services-service-types' + - description: |- + Identifies the type of Kubernetes service to use for external client + connectivity. The default is to use a ClusterIP, which sets a stable IP + and port to use that is accessible only from within Kubernetes itself. + Depending on the service type chosen the user may need to set other + config knobs to further config it. These other knobs follow this one. + More info: https://kubernetes.io/docs/concepts/services-networking/service/#publishing-services-service-types displayName: Service Type path: subclusters[0].serviceType x-descriptors: - urn:alm:descriptor:com.tectonic.ui:select:ClusterIP - urn:alm:descriptor:com.tectonic.ui:select:NodePort - urn:alm:descriptor:com.tectonic.ui:select:LoadBalancer - - description: State to indicate whether the operator must shut down the subcluster + - description: |- + State to indicate whether the operator must shut down the subcluster and not try to restart it. displayName: Shutdown path: subclusters[0].shutdown - - description: "The number of pods that the subcluster will have. This determines - the number of Vertica nodes that it will have. Changing this number will - either delete or schedule new pods. \n The database has a k-safety of 1. - So, if this is a primary subcluster, the minimum value is 3. If this is - a secondary subcluster, the minimum is 0. \n Note, you must have a valid - license to pick a value larger than 3. The default license that comes in - the vertica container is for the community edition, which can only have - 3 nodes. The license can be set with the db.licenseSecret parameter." + - description: |- + The number of pods that the subcluster will have. This determines the + number of Vertica nodes that it will have. Changing this number will + either delete or schedule new pods. + + + The database has a k-safety of 1. So, if this is a primary subcluster, + the minimum value is 3. If this is a secondary subcluster, the minimum is + 0. + + + Note, you must have a valid license to pick a value larger than 3. The + default license that comes in the vertica container is for the community + edition, which can only have 3 nodes. The license can be set with the + db.licenseSecret parameter. displayName: Size path: subclusters[0].size x-descriptors: - urn:alm:descriptor:com.tectonic.ui:podCount - - description: 'Any tolerations and taints to use to aid in where to schedule - a pod. More info: https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/' + - description: |- + Any tolerations and taints to use to aid in where to schedule a pod. + More info: https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/ displayName: Tolerations path: subclusters[0].tolerations x-descriptors: - urn:alm:descriptor:com.tectonic.ui:advanced - - description: 'Like the nodePort parameter, except this controls the node port - to use for the http endpoint in the Vertica server. The same rules apply: - it must be defined within the range allocated by the control plane, if omitted - Kubernetes will choose the port automatically.' + - description: |- + Like the nodePort parameter, except this controls the node port to use + for the http endpoint in the Vertica server. The same rules apply: it + must be defined within the range allocated by the control plane, if + omitted Kubernetes will choose the port automatically. displayName: Vertica HTTPNode Port path: subclusters[0].verticaHTTPNodePort x-descriptors: - urn:alm:descriptor:com.tectonic.ui:number - urn:alm:descriptor:com.tectonic.ui:advanced - - description: An optional name for a secret that contains the password for - the database's superuser. If this is not set, then we assume no such password + - description: |- + An optional name for a secret that contains the password for the + database's superuser. If this is not set, then we assume no such password is set for the database. If this is set, it is up the user to create this secret before deployment. The secret must have a key named password. displayName: Superuser Password Secret path: superuserPasswordSecret x-descriptors: - urn:alm:descriptor:io.kubernetes:Secret - - description: When doing an online upgrade, we designate a subcluster to accept - traffic while the other subclusters restart. The designated subcluster - is specified here. The name of the subcluster can refer to an existing - one or an entirely new subcluster. If the subcluster is new, it will exist - only for the duration of the upgrade. If this struct is left empty the - operator will default to picking existing subclusters. + - description: |- + When doing an online upgrade, we designate a subcluster to + accept traffic while the other subclusters restart. The designated + subcluster is specified here. The name of the subcluster can refer to an + existing one or an entirely new subcluster. If the subcluster is new, it + will exist only for the duration of the upgrade. If this struct is + left empty the operator will default to picking existing subclusters. displayName: Temporary Subcluster Routing path: temporarySubclusterRouting x-descriptors: - urn:alm:descriptor:com.tectonic.ui:advanced - - description: Names of existing subclusters to use for temporary routing of - client connections. The operator will use the first subcluster that is - online. + - description: |- + Names of existing subclusters to use for temporary routing of client + connections. The operator will use the first subcluster that is online. displayName: Names path: temporarySubclusterRouting.names - - description: A new subcluster will be created using this as a template. This - subcluster will only exist for the life of the online upgrade. It will - accept client traffic for a subcluster that are in the process of being - restarted. + - description: |- + A new subcluster will be created using this as a template. This + subcluster will only exist for the life of the online upgrade. It + will accept client traffic for a subcluster that are in the process of + being restarted. displayName: Template path: temporarySubclusterRouting.template - - description: 'Like nodeSelector this allows you to constrain the pod only - to certain pods. It is more expressive than just using node selectors. More - info: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#affinity-and-anti-affinity' + - description: |- + Like nodeSelector this allows you to constrain the pod only to certain + pods. It is more expressive than just using node selectors. + More info: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#affinity-and-anti-affinity displayName: Affinity path: temporarySubclusterRouting.template.affinity - description: Describes node affinity scheduling rules for the pod. @@ -1891,79 +2329,114 @@ spec: path: temporarySubclusterRouting.template.affinity.podAntiAffinity x-descriptors: - urn:alm:descriptor:com.tectonic.ui:podAntiAffinity - - description: A map of key/value pairs appended to the stateful metadata.annotations - of the subcluster. + - description: |- + A map of key/value pairs appended to the stateful metadata.annotations of + the subcluster. displayName: Annotations path: temporarySubclusterRouting.template.annotations - - description: 'Allows the service object to be attached to a list of external - IPs that you specify. If not set, the external IP list is left empty in - the service object. More info: https://kubernetes.io/docs/concepts/services-networking/service/#external-ips' + - description: |- + Allows the service object to be attached to a list of external IPs that you + specify. If not set, the external IP list is left empty in the service object. + More info: https://kubernetes.io/docs/concepts/services-networking/service/#external-ips displayName: External IPs path: temporarySubclusterRouting.template.externalIPs x-descriptors: - urn:alm:descriptor:com.tectonic.ui:advanced - - description: This allows a different image to be used for the subcluster than - the one in VerticaDB. This is intended to be used internally by the online - image change process. + - description: |- + This allows a different image to be used for the subcluster than the one + in VerticaDB. This is intended to be used internally by the online image + change process. displayName: Image Override path: temporarySubclusterRouting.template.imageOverride x-descriptors: - urn:alm:descriptor:com.tectonic.ui:hidden - - description: Indicates whether the subcluster is a primary or secondary. You - must have at least one primary subcluster in the database. + - description: |- + Indicates whether the subcluster is a primary or secondary. You must have + at least one primary subcluster in the database. displayName: Is Primary path: temporarySubclusterRouting.template.isPrimary x-descriptors: - urn:alm:descriptor:com.tectonic.ui:booleanSwitch - - description: A sandbox primary subcluster is a secondary subcluster that was - the first subcluster in a sandbox. These subclusters are primaries when - they are sandboxed. When unsandboxed, they will go back to being just a - secondary subcluster + - description: |- + A sandbox primary subcluster is a secondary subcluster that was the first + subcluster in a sandbox. These subclusters are primaries when they are + sandboxed. When unsandboxed, they will go back to being just a secondary + subcluster displayName: Is Sandbox Primary path: temporarySubclusterRouting.template.isSandboxPrimary x-descriptors: - urn:alm:descriptor:com.tectonic.ui:hidden - - description: Internal state that indicates whether this is a transient read-only - subcluster used for online upgrade. A subcluster that exists temporarily - to serve traffic for subclusters that are restarting with the new image. + - description: |- + Internal state that indicates whether this is a transient read-only + subcluster used for online upgrade. A subcluster that exists + temporarily to serve traffic for subclusters that are restarting with the + new image. displayName: Is Transient path: temporarySubclusterRouting.template.isTransient x-descriptors: - urn:alm:descriptor:com.tectonic.ui:hidden - - description: 'Specify IP address of LoadBalancer service for this subcluster. - This field is ignored when serviceType != "LoadBalancer". More info: https://kubernetes.io/docs/concepts/services-networking/service/#loadbalancer' + - description: |- + Specify IP address of LoadBalancer service for this subcluster. + This field is ignored when serviceType != "LoadBalancer". + More info: https://kubernetes.io/docs/concepts/services-networking/service/#loadbalancer displayName: Load Balancer IP path: temporarySubclusterRouting.template.loadBalancerIP x-descriptors: - urn:alm:descriptor:com.tectonic.ui:advanced - - description: The name of the subcluster. This is a required parameter. This - cannot change after CRD creation. + - description: |- + The name of the subcluster. This is a required parameter. This cannot + change after CRD creation. displayName: Name path: temporarySubclusterRouting.template.name - - description: When setting serviceType to NodePort, this parameter allows you - to define the port that is opened at each node for Vertica client connections. - If using NodePort and this is omitted, Kubernetes will choose the port automatically. - This port must be from within the defined range allocated by the control - plane (default is 30000-32767). + - description: |- + When setting serviceType to NodePort, this parameter allows you to define the + port that is opened at each node for Vertica client connections. If using + NodePort and this is omitted, Kubernetes will choose the port + automatically. This port must be from within the defined range allocated + by the control plane (default is 30000-32767). displayName: Node Port path: temporarySubclusterRouting.template.nodePort x-descriptors: - urn:alm:descriptor:com.tectonic.ui:number - urn:alm:descriptor:com.tectonic.ui:advanced - - description: 'A map of label keys and values to restrict Vertica node scheduling - to workers with matching labels. More info: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodeselector' + - description: |- + A map of label keys and values to restrict Vertica node scheduling to workers + with matching labels. + More info: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodeselector displayName: Node Selector path: temporarySubclusterRouting.template.nodeSelector - - description: 'The priority class name given to pods in this subcluster. This - affects where the pod gets scheduled. More info: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/#priorityclass' + - description: |- + The priority class name given to pods in this subcluster. This affects + where the pod gets scheduled. + More info: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/#priorityclass displayName: Priority Class Name path: temporarySubclusterRouting.template.priorityClassName x-descriptors: - urn:alm:descriptor:com.tectonic.ui:advanced - - description: 'This defines the resource requests and limits for pods in the - subcluster. It is advisable that the request and limits match as this ensures - the pods are assigned to the guaranteed QoS class. This will reduces the - chance that pods are chosen by the OOM killer. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + - description: |- + Create client proxy pods for the subcluster if defined + All incoming connections to the subclusters will be routed through the proxy pods + displayName: Proxy + path: temporarySubclusterRouting.template.proxy + - description: The number of replicas that the proxy server will have. + displayName: Replicas + path: temporarySubclusterRouting.template.proxy.replicas + - description: |- + This defines the resource requests and limits for the client proxy pods in the subcluster. + It is advisable that the request and limits match as this ensures the + pods are assigned to the guaranteed QoS class. This will reduces the + chance that pods are chosen by the OOM killer. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + displayName: Resources + path: temporarySubclusterRouting.template.proxy.resources + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:resourceRequirements + - description: |- + This defines the resource requests and limits for pods in the subcluster. + It is advisable that the request and limits match as this ensures the + pods are assigned to the guaranteed QoS class. This will reduces the + chance that pods are chosen by the OOM killer. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ displayName: Resources path: temporarySubclusterRouting.template.resources x-descriptors: @@ -1971,96 +2444,116 @@ spec: - description: A map of key/value pairs appended to service metadata.annotations. displayName: Service Annotations path: temporarySubclusterRouting.template.serviceAnnotations - - description: Identifies the name of the service object that will serve this - subcluster. If multiple subclusters share the same service name then they - all share the same service object. This allows for a single service object - to round robin between multiple subclusters. If this is left blank, a service - object matching the subcluster name is used. The actual name of the service - object is always prefixed with the name of the owning VerticaDB. + - description: |- + Identifies the name of the service object that will serve this + subcluster. If multiple subclusters share the same service name then + they all share the same service object. This allows for a single service + object to round robin between multiple subclusters. If this is left + blank, a service object matching the subcluster name is used. The actual + name of the service object is always prefixed with the name of the owning + VerticaDB. displayName: Service Name path: temporarySubclusterRouting.template.serviceName x-descriptors: - urn:alm:descriptor:com.tectonic.ui:advanced - - description: 'Identifies the type of Kubernetes service to use for external - client connectivity. The default is to use a ClusterIP, which sets a stable - IP and port to use that is accessible only from within Kubernetes itself. - Depending on the service type chosen the user may need to set other config - knobs to further config it. These other knobs follow this one. More info: - https://kubernetes.io/docs/concepts/services-networking/service/#publishing-services-service-types' + - description: |- + Identifies the type of Kubernetes service to use for external client + connectivity. The default is to use a ClusterIP, which sets a stable IP + and port to use that is accessible only from within Kubernetes itself. + Depending on the service type chosen the user may need to set other + config knobs to further config it. These other knobs follow this one. + More info: https://kubernetes.io/docs/concepts/services-networking/service/#publishing-services-service-types displayName: Service Type path: temporarySubclusterRouting.template.serviceType x-descriptors: - urn:alm:descriptor:com.tectonic.ui:select:ClusterIP - urn:alm:descriptor:com.tectonic.ui:select:NodePort - urn:alm:descriptor:com.tectonic.ui:select:LoadBalancer - - description: State to indicate whether the operator must shut down the subcluster + - description: |- + State to indicate whether the operator must shut down the subcluster and not try to restart it. displayName: Shutdown path: temporarySubclusterRouting.template.shutdown - - description: "The number of pods that the subcluster will have. This determines - the number of Vertica nodes that it will have. Changing this number will - either delete or schedule new pods. \n The database has a k-safety of 1. - So, if this is a primary subcluster, the minimum value is 3. If this is - a secondary subcluster, the minimum is 0. \n Note, you must have a valid - license to pick a value larger than 3. The default license that comes in - the vertica container is for the community edition, which can only have - 3 nodes. The license can be set with the db.licenseSecret parameter." + - description: |- + The number of pods that the subcluster will have. This determines the + number of Vertica nodes that it will have. Changing this number will + either delete or schedule new pods. + + + The database has a k-safety of 1. So, if this is a primary subcluster, + the minimum value is 3. If this is a secondary subcluster, the minimum is + 0. + + + Note, you must have a valid license to pick a value larger than 3. The + default license that comes in the vertica container is for the community + edition, which can only have 3 nodes. The license can be set with the + db.licenseSecret parameter. displayName: Size path: temporarySubclusterRouting.template.size x-descriptors: - urn:alm:descriptor:com.tectonic.ui:podCount - - description: 'Any tolerations and taints to use to aid in where to schedule - a pod. More info: https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/' + - description: |- + Any tolerations and taints to use to aid in where to schedule a pod. + More info: https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/ displayName: Tolerations path: temporarySubclusterRouting.template.tolerations x-descriptors: - urn:alm:descriptor:com.tectonic.ui:advanced - - description: 'Like the nodePort parameter, except this controls the node port - to use for the http endpoint in the Vertica server. The same rules apply: - it must be defined within the range allocated by the control plane, if omitted - Kubernetes will choose the port automatically.' + - description: |- + Like the nodePort parameter, except this controls the node port to use + for the http endpoint in the Vertica server. The same rules apply: it + must be defined within the range allocated by the control plane, if + omitted Kubernetes will choose the port automatically. displayName: Vertica HTTPNode Port path: temporarySubclusterRouting.template.verticaHTTPNodePort x-descriptors: - urn:alm:descriptor:com.tectonic.ui:number - urn:alm:descriptor:com.tectonic.ui:advanced - - description: 'Defines how upgrade will be managed. Available values are: - Offline, Online and Auto. - Offline: means we take down the entire cluster - then bring it back up with the new image. - Online: will keep the cluster - up when the upgrade occurs. The data will go into read-only mode until - the Vertica nodes from the primary subcluster reform the cluster with the - new image. - Auto: will pick between Offline or Online. Online is only - chosen if a license Secret exists, the k-Safety of the database is 1 and - we are running with a Vertica version that supports read-only subclusters.' + - description: |- + Defines how upgrade will be managed. Available values are: Offline, + Online and Auto. + - Offline: means we take down the entire cluster then bring it back up + with the new image. + - Online: will keep the cluster up when the upgrade occurs. The + data will go into read-only mode until the Vertica nodes from the primary + subcluster reform the cluster with the new image. + - Auto: will pick between Offline or Online. Online is only chosen if a + license Secret exists, the k-Safety of the database is 1 and we are + running with a Vertica version that supports read-only subclusters. displayName: Upgrade Policy path: upgradePolicy x-descriptors: - urn:alm:descriptor:com.tectonic.ui:select:Auto - urn:alm:descriptor:com.tectonic.ui:select:Online - urn:alm:descriptor:com.tectonic.ui:select:Offline - - description: If a reconciliation iteration during an operation such as Upgrade - needs to be requeued, this controls the amount of time in seconds to delay - adding the key to the reconcile queue. If RequeueTime is set, it overrides - this value. If RequeueTime is not set either, then we set the default value - only for upgrades. For other reconciles we use the exponential backoff algorithm. + - description: |- + If a reconciliation iteration during an operation such as Upgrade needs to be requeued, this controls the + amount of time in seconds to delay adding the key to the reconcile queue. If RequeueTime is set, it overrides this value. + If RequeueTime is not set either, then we set the default value only for upgrades. For other reconciles we use the exponential backoff algorithm. displayName: Upgrade Requeue Time path: upgradeRequeueTime x-descriptors: - urn:alm:descriptor:com.tectonic.ui:number - urn:alm:descriptor:com.tectonic.ui:advanced - - description: Additional volume mounts to include in the Vertica container. These + - description: |- + Additional volume mounts to include in the Vertica container. These reference volumes that are in the Volumes list. The mount path must not conflict with a mount path that the operator adds internally. displayName: Volume Mounts path: volumeMounts x-descriptors: - urn:alm:descriptor:com.tectonic.ui:advanced - - description: "Custom volumes that are added to sidecars and the Vertica container. - For these volumes to be visible in either container, they must have a corresonding - volumeMounts entry. For sidecars, this is included in `spec.sidecars[*].volumeMounts`. - \ For the Vertica container, it is included in `spec.volumeMounts`. \n This - accepts any valid volume type. A unique name must be given for each volume - and it cannot conflict with any of the internally generated volumes." + - description: |- + Custom volumes that are added to sidecars and the Vertica container. + For these volumes to be visible in either container, they must have a + corresonding volumeMounts entry. For sidecars, this is included in + `spec.sidecars[*].volumeMounts`. For the Vertica container, it is + included in `spec.volumeMounts`. + + + This accepts any valid volume type. A unique name must be given for each + volume and it cannot conflict with any of the internally generated volumes. displayName: Volumes path: volumes x-descriptors: @@ -2075,7 +2568,9 @@ spec: - description: Last time the condition transitioned from one status to another. displayName: Last Transition Time path: conditions[0].lastTransitionTime - - description: Status is the status of the condition can be True, False or Unknown + - description: |- + Status is the status of the condition + can be True, False or Unknown displayName: Status path: conditions[0].status - description: Type is the name of the condition @@ -2101,18 +2596,21 @@ spec: - description: Name of the sandbox that was defined in the spec displayName: Name path: sandboxes[0].name - - description: The names of subclusters that are currently a part of the given - sandbox. This is updated as subclusters become sandboxed or unsandboxed. + - description: |- + The names of subclusters that are currently a part of the given sandbox. + This is updated as subclusters become sandboxed or unsandboxed. displayName: Subclusters path: sandboxes[0].subclusters - description: State of the current running upgrade in the sandbox displayName: Upgrade State path: sandboxes[0].upgradeState - - description: UpgradeInProgress indicates if the sandbox is in the process + - description: |- + UpgradeInProgress indicates if the sandbox is in the process of having its image change displayName: Upgrade In Progress path: sandboxes[0].upgradeState.upgradeInProgress - - description: Status message for the current running upgrade. If no upgrade + - description: |- + Status message for the current running upgrade. If no upgrade is occurring, this message remains blank. displayName: Upgrade Status path: sandboxes[0].upgradeState.upgradeStatus @@ -2138,8 +2636,9 @@ spec: - description: True means the vertica process on this pod is in read-only state displayName: Read Only path: subclusters[0].detail[0].readOnly - - description: True means the vertica process is running on this pod and it - can accept connections on port 5433. + - description: |- + True means the vertica process is running on this pod and it can accept + connections on port 5433. displayName: Up Node path: subclusters[0].detail[0].upNode - description: This is the vnode name that Vertica internally assigned this @@ -2160,8 +2659,9 @@ spec: this subcluster. displayName: Read Only Count path: subclusters[0].readOnlyCount - - description: State of the subcluster. true means the subcluster was explicitly - shut down by the user and must not be restarted. + - description: |- + State of the subcluster. true means the subcluster was explicitly shut down by the user + and must not be restarted. displayName: Shutdown path: subclusters[0].shutdown - description: A count of the number of pods that have a running vertica process @@ -2171,7 +2671,8 @@ spec: - description: A count of the number of pods that have a running vertica process. displayName: Up Node Count path: upNodeCount - - description: Status message for the current running upgrade. If no upgrade + - description: |- + Status message for the current running upgrade. If no upgrade is occurring, this message remains blank. displayName: Upgrade Status path: upgradeStatus @@ -2192,52 +2693,59 @@ spec: - description: Information of the source Vertica database to replicate from displayName: Source path: source - - description: A string containing a wildcard pattern of the schemas and/or - tables to exclude from the set of tables matched by the include pattern. - Namespace names must be front-qualified with a period. + - description: |- + A string containing a wildcard pattern of the schemas and/or tables to exclude from the set of tables matched + by the include pattern. Namespace names must be front-qualified with a period. displayName: Exclude Pattern path: source.excludePattern x-descriptors: - urn:alm:descriptor:com.tectonic.ui:text - - description: A string containing a wildcard pattern of the schemas and/or - tables to include in the replication. Namespace names must be front-qualified - with a period. + - description: |- + A string containing a wildcard pattern of the schemas and/or tables to include in the replication. + Namespace names must be front-qualified with a period. displayName: Include Pattern path: source.includePattern x-descriptors: - urn:alm:descriptor:com.tectonic.ui:text - - description: 'The object name we want to copy from the source side. The available - types are: namespace, schema, table. If this is omitted, the operator will - replicate all namespaces in the source database.' + - description: |- + The object name we want to copy from the source side. The available types are: namespace, schema, table. + If this is omitted, the operator will replicate all namespaces in the source database. displayName: Object Name path: source.objectName x-descriptors: - urn:alm:descriptor:com.tectonic.ui:text - - description: "The password secret for the given UserName is stored in this - field. If this field and UserName are omitted, the default is set to the - superuser password secret found in the VerticaDB. An empty value indicates - the absence of a password. \n The secret is assumed to be a Kubernetes (k8s) - secret unless a secret path reference is specified. In the latter case, - the secret is retrieved from an external secret storage manager." + - description: |- + The password secret for the given UserName is stored in this field. If + this field and UserName are omitted, the default is set to the superuser + password secret found in the VerticaDB. An empty value indicates the + absence of a password. + + + The secret is assumed to be a Kubernetes (k8s) secret unless a secret + path reference is specified. In the latter case, the secret is retrieved + from an external secret storage manager. displayName: Password Secret path: source.passwordSecret x-descriptors: - urn:alm:descriptor:io.kubernetes:Secret - - description: Specify the sandbox name to establish a connection. If no sandbox - name is provided, the system assumes the main cluster of the database. + - description: |- + Specify the sandbox name to establish a connection. If no sandbox name is + provided, the system assumes the main cluster of the database. displayName: Sandbox Name path: source.sandboxName x-descriptors: - urn:alm:descriptor:com.tectonic.ui:text - - description: This field allows you to specify the name of the service object - that will be used to connect to the database. If you do not specify a name, - the service object for the first primary subcluster will be used. + - description: |- + This field allows you to specify the name of the service object that will + be used to connect to the database. If you do not specify a name, the + service object for the first primary subcluster will be used. displayName: Service Name path: source.serviceName x-descriptors: - urn:alm:descriptor:com.tectonic.ui:text - - description: The username to connect to Vertica with. If no username is specified, - the database's superuser will be assumed. Custom username for source database + - description: |- + The username to connect to Vertica with. If no username is specified, the + database's superuser will be assumed. Custom username for source database is not supported yet. displayName: User Name path: source.userName @@ -2251,42 +2759,48 @@ spec: - description: Information of the target Vertica database to replicate to displayName: Target path: target - - description: Namespace in the target database to which objects are replicated. - The target namespace must have the same shard count as the source namespace - in the source cluster. If you do not specify a target namespace, objects - are replicated to a namespace with the same name as the source namespace. - If no such namespace exists in the target cluster, it is created with the - same name and shard count as the source namespace. You can only replicate - tables in the public schema to the default_namespace in the target cluster. + - description: |- + Namespace in the target database to which objects are replicated. The target namespace must have the same shard + count as the source namespace in the source cluster. If you do not specify a target namespace, objects are + replicated to a namespace with the same name as the source namespace. If no such namespace exists in the target + cluster, it is created with the same name and shard count as the source namespace. You can only replicate tables + in the public schema to the default_namespace in the target cluster. displayName: Namespace path: target.namespace x-descriptors: - urn:alm:descriptor:com.tectonic.ui:text - - description: "The password secret for the given UserName is stored in this - field. If this field and UserName are omitted, the default is set to the - superuser password secret found in the VerticaDB. An empty value indicates - the absence of a password. \n The secret is assumed to be a Kubernetes (k8s) - secret unless a secret path reference is specified. In the latter case, - the secret is retrieved from an external secret storage manager." + - description: |- + The password secret for the given UserName is stored in this field. If + this field and UserName are omitted, the default is set to the superuser + password secret found in the VerticaDB. An empty value indicates the + absence of a password. + + + The secret is assumed to be a Kubernetes (k8s) secret unless a secret + path reference is specified. In the latter case, the secret is retrieved + from an external secret storage manager. displayName: Password Secret path: target.passwordSecret x-descriptors: - urn:alm:descriptor:io.kubernetes:Secret - - description: Specify the sandbox name to establish a connection. If no sandbox - name is provided, the system assumes the main cluster of the database. + - description: |- + Specify the sandbox name to establish a connection. If no sandbox name is + provided, the system assumes the main cluster of the database. displayName: Sandbox Name path: target.sandboxName x-descriptors: - urn:alm:descriptor:com.tectonic.ui:text - - description: This field allows you to specify the name of the service object - that will be used to connect to the database. If you do not specify a name, - the service object for the first primary subcluster will be used. + - description: |- + This field allows you to specify the name of the service object that will + be used to connect to the database. If you do not specify a name, the + service object for the first primary subcluster will be used. displayName: Service Name path: target.serviceName x-descriptors: - urn:alm:descriptor:com.tectonic.ui:text - - description: The username to connect to Vertica with. If no username is specified, - the database's superuser will be assumed. Custom username for source database + - description: |- + The username to connect to Vertica with. If no username is specified, the + database's superuser will be assumed. Custom username for source database is not supported yet. displayName: User Name path: target.userName @@ -2297,12 +2811,14 @@ spec: path: target.verticaDB x-descriptors: - urn:alm:descriptor:com.tectonic.ui:text - - description: Optional TLS configuration to use when connecting from the source - database to the target database. It refers to an existing TLS config that - already exists in the source. Using TLS configuration for target database - authentication requires the same username to be used for both source and - target databases. It also requires security config parameter EnableConnectCredentialForwarding - to be enabled on the source database. Custom username for source and target + - description: |- + Optional TLS configuration to use when connecting from the source + database to the target database. + It refers to an existing TLS config that already exists in the source. + Using TLS configuration for target database authentication requires the + same username to be used for both source and target databases. It also + requires security config parameter EnableConnectCredentialForwarding to + be enabled on the source database. Custom username for source and target databases is not supported yet when TLS configuration is used. displayName: TLSConfig path: tlsConfig @@ -2341,27 +2857,25 @@ spec: path: filterOptions.archiveName x-descriptors: - urn:alm:descriptor:com.tectonic.ui:text - - description: Optional parameter that will limit the query to only restore - points created at this timestamp or before timestamp; the timestamp can - be of date time format or date only format, e.g. "2006-01-02", "2006-01-02 - 15:04:05", "2006-01-02 15:04:05.000000000"; the timestamp is interpreted - as in UTC timezone + - description: |- + Optional parameter that will limit the query to only restore points created at this timestamp or before timestamp; + the timestamp can be of date time format or date only format, e.g. "2006-01-02", "2006-01-02 15:04:05", "2006-01-02 15:04:05.000000000"; + the timestamp is interpreted as in UTC timezone displayName: End Timestamp path: filterOptions.endTimestamp x-descriptors: - urn:alm:descriptor:com.tectonic.ui:text - - description: Optional parameter that will limit the query to only restore - points created at this timestamp or after this timestamp; the timestamp - can be of date time format or date only format, e.g. "2006-01-02", "2006-01-02 - 15:04:05", "2006-01-02 15:04:05.000000000"; the timestamp is interpreted - as in UTC timezone + - description: |- + Optional parameter that will limit the query to only restore points created at this timestamp or after this timestamp; + the timestamp can be of date time format or date only format, e.g. "2006-01-02", "2006-01-02 15:04:05", "2006-01-02 15:04:05.000000000"; + the timestamp is interpreted as in UTC timezone displayName: Start Timestamp path: filterOptions.startTimestamp x-descriptors: - urn:alm:descriptor:com.tectonic.ui:text - - description: The name of the VerticaDB CR that this VerticaRestorePointsQuery - is defined for. The VerticaDB object must exist in the same namespace as - this object. + - description: |- + The name of the VerticaDB CR that this VerticaRestorePointsQuery is defined for. The + VerticaDB object must exist in the same namespace as this object. displayName: Vertica DBName path: verticaDBName x-descriptors: @@ -2370,9 +2884,9 @@ spec: - description: Conditions for VerticaRestorePointsQuery displayName: Conditions path: conditions - - description: This contains the result of the restore points query. Check the - QueryComplete status condition to know when this has been populated by the - operator. + - description: |- + This contains the result of the restore points query. Check the QueryComplete + status condition to know when this has been populated by the operator. displayName: Restore Points path: restorePoints - description: Status message for running query @@ -2391,9 +2905,10 @@ spec: name: "" version: vertica.com/v1beta1 specDescriptors: - - description: 'Like nodeSelector this allows you to constrain the pod only - to certain pods. It is more expressive than just using node selectors. More - info: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#affinity-and-anti-affinity' + - description: |- + Like nodeSelector this allows you to constrain the pod only to certain + pods. It is more expressive than just using node selectors. + More info: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#affinity-and-anti-affinity displayName: Affinity path: affinity - description: Describes node affinity scheduling rules for the pod. @@ -2416,10 +2931,10 @@ spec: - description: A list of annotations that will be added to the scrutinize pod. displayName: Annotations path: annotations - - description: A list of additional init containers to run. These are run after - running the init container that collects the scrutinize command. These can - be used to do some kind of post-processing of the tarball, such as uploading - it to some kind of storage. + - description: |- + A list of additional init containers to run. These are run after running the init container that collects + the scrutinize command. These can be used to do some kind of post-processing of the tarball, such as uploading it + to some kind of storage. displayName: Init Containers path: initContainers x-descriptors: @@ -2427,40 +2942,48 @@ spec: - description: A list of labels that will be added to the scrutinize pod. displayName: Labels path: labels - - description: 'A map of label keys and values to restrict scrutinize node scheduling - to workers with matching labels. More info: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodeselector' + - description: |- + A map of label keys and values to restrict scrutinize node scheduling to workers + with matching labels. + More info: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodeselector displayName: Node Selector path: nodeSelector - - description: 'The priority class name given to the scrutinize. This affects - where the pod gets scheduled. More info: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/#priorityclass' + - description: |- + The priority class name given to the scrutinize. This affects + where the pod gets scheduled. + More info: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/#priorityclass displayName: Priority Class Name path: priorityClassName x-descriptors: - urn:alm:descriptor:com.tectonic.ui:advanced - - description: 'This defines the resource requests and limits for the scrutinize - pod. It is advisable that the request and limits match as this ensures the - pods are assigned to the guaranteed QoS class. This will reduces the chance - that pods are chosen by the OOM killer. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + - description: |- + This defines the resource requests and limits for the scrutinize pod. + It is advisable that the request and limits match as this ensures the + pods are assigned to the guaranteed QoS class. This will reduces the + chance that pods are chosen by the OOM killer. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ displayName: Resources path: resources x-descriptors: - urn:alm:descriptor:com.tectonic.ui:resourceRequirements - - description: 'Any tolerations and taints to use to aid in where to schedule - a pod. More info: https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/' + - description: |- + Any tolerations and taints to use to aid in where to schedule a pod. + More info: https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/ displayName: Tolerations path: tolerations x-descriptors: - urn:alm:descriptor:com.tectonic.ui:advanced - - description: The name of the VerticaDB CR that this VerticaScrutinize is defined - for. The VerticaDB object must exist in the same namespace as this object. + - description: |- + The name of the VerticaDB CR that this VerticaScrutinize is defined for. The + VerticaDB object must exist in the same namespace as this object. displayName: Vertica DBName path: verticaDBName x-descriptors: - urn:alm:descriptor:com.tectonic.ui:text - - description: This allows the user to select the volume to use for the final - destination of the scrutinize tarball and any intermediate files. The volume - must have enough space to store the scrutinize data. If this is omitted, - then a simple emptyDir volume is created to store the scrutinize data. + - description: |- + This allows the user to select the volume to use for the final destination of the scrutinize tarball and + any intermediate files. The volume must have enough space to store the scrutinize data. + If this is omitted, then a simple emptyDir volume is created to store the scrutinize data. displayName: Volume path: volume statusDescriptors: diff --git a/pkg/opcfg/config.go b/pkg/opcfg/config.go index 4a1c4ce72..fc8b28d11 100644 --- a/pkg/opcfg/config.go +++ b/pkg/opcfg/config.go @@ -38,7 +38,15 @@ func GetIsWebhookEnabled() bool { // GetBroadcasterBurstSize returns the customizable burst size for broadcaster. func GetBroadcasterBurstSize() int { - burstSize := lookupIntEnvVar("BROADCASTER_BURST_SIZE", envCanNotExist) + envName := "BROADCASTER_BURST_SIZE" + burstSizeStr := lookupStringEnvVar(envName, envCanNotExist) + if burstSizeStr == "" { + return defaultBurstSize + } + burstSize, err := strconv.Atoi(burstSizeStr) + if err != nil { + dieIfNotValid(envName) + } if burstSize > defaultBurstSize { return burstSize } diff --git a/scripts/gen-csv.sh b/scripts/gen-csv.sh index 8ab3af5cc..ece350bd6 100755 --- a/scripts/gen-csv.sh +++ b/scripts/gen-csv.sh @@ -84,6 +84,10 @@ $KUSTOMIZE build config/overlays/csv | $OPERATOR_SDK generate bundle $BUNDLE_GEN perl -i -0777 -pe "s/CREATED_AT_PLACEHOLDER/$(date +"%FT%H:%M:%SZ")/g" bundle/manifests/verticadb-operator.clusterserviceversion.yaml perl -i -0777 -pe "s+OPERATOR_IMG_PLACEHOLDER+$(make echo-images | grep OPERATOR_IMG | cut -d'=' -f2)+g" bundle/manifests/verticadb-operator.clusterserviceversion.yaml +# Remove custom mounted certs +perl -i -0777 -pe 's/\n\s*- name: auth-cert\s*\n\s*secret:\s*\n\s*secretName: custom-cert//g' bundle/manifests/verticadb-operator.clusterserviceversion.yaml +perl -i -0777 -pe 's/\n\s*- mountPath: \/cert\s*\n\s*name: auth-cert//g' bundle/manifests/verticadb-operator.clusterserviceversion.yaml + # Delete the ServiceMonitor object from the bundle. This puts a # requirement on having the Prometheus Operator installed. We are only # optionally installing this. We will include the manifest in our GitHub diff --git a/scripts/setup-olm.sh b/scripts/setup-olm.sh index a5b7aeff7..b6f1e1111 100755 --- a/scripts/setup-olm.sh +++ b/scripts/setup-olm.sh @@ -82,7 +82,7 @@ if ! $SCRIPT_DIR/is-openshift.sh then if ! kubectl get -n $OLM_NS deployment olm-operator then - $OPERATOR_SDK olm install --version 0.26.0 + $OPERATOR_SDK olm install --version 0.31.0 # Delete the default catalog that OLM ships with to avoid a lot of duplicates entries. kubectl delete catalogsource operatorhubio-catalog -n $OLM_NS || true diff --git a/scripts/template-helm-chart.sh b/scripts/template-helm-chart.sh index 717682168..137c574b3 100755 --- a/scripts/template-helm-chart.sh +++ b/scripts/template-helm-chart.sh @@ -153,8 +153,8 @@ perl -i -0777 -pe 's/verticadb-operator/{{ include "vdb-op.name" . }}/g' $TEMPLA for f in $TEMPLATE_DIR/verticadb-operator-manager-deployment.yaml do perl -i -0777 -pe 's/(.*--v=[0-9]+)/$1\n{{- if not (empty .Values.prometheus.tlsSecret) }}\n - --tls-cert-file=\/cert\/tls.crt\n - --tls-private-key-file=\/cert\/tls.key\n - --client-ca-file=\/cert\/ca.crt\n{{- end }}/g' $f - perl -i -0777 -pe 's/(volumes:)/$1\n{{- if not (empty .Values.prometheus.tlsSecret) }}\n - name: auth-cert\n secret:\n secretName: {{ .Values.prometheus.tlsSecret }}\n{{- end }}/g' $f perl -i -0777 -pe 's/(.*- mountPath: .*\n.*name: auth-cert.*)/\{\{- if not (empty .Values.prometheus.tlsSecret) }}\n - mountPath: \/cert\n name: auth-cert\n{{- end }}/g' $f + perl -i -0777 -pe 's/(.*- name: auth-cert.*\n.*secret:\n.*secretName: custom-cert)/{{- if not \(empty .Values.prometheus.tlsSecret\) }}\n - name: auth-cert\n secret:\n secretName: {{ .Values.prometheus.tlsSecret }}\n{{- end }}/g' $f done # 18. Add pod scheduling options From 9412cda95d772b310f046673e1dc6bac6352dfcd Mon Sep 17 00:00:00 2001 From: Cai Chen Date: Tue, 28 Jan 2025 00:00:22 +0000 Subject: [PATCH 10/15] fixed olm env var issue --- .gitignore | 1 + config/manager/kustomization.yaml | 2 +- config/manager/operator-envs | 36 +++++++++++++++---------------- scripts/config-transformer.sh | 3 +++ scripts/gen-csv.sh | 3 +++ 5 files changed, 26 insertions(+), 19 deletions(-) diff --git a/.gitignore b/.gitignore index 945e13063..5be004f52 100644 --- a/.gitignore +++ b/.gitignore @@ -56,6 +56,7 @@ testbin/* # Omit some fully generated files config/crd/bases/*.yaml +config/manager/operator-envs-with-value config/rbac/role.yaml api/v1beta1/zz_generated.deepcopy.go diff --git a/config/manager/kustomization.yaml b/config/manager/kustomization.yaml index 422bbe903..f1db65e47 100644 --- a/config/manager/kustomization.yaml +++ b/config/manager/kustomization.yaml @@ -6,7 +6,7 @@ generatorOptions: configMapGenerator: - envs: - - operator-envs + - operator-envs-with-value literals: - PREFIX_NAME=verticadb-operator - WEBHOOK_CERT_SECRET=verticadb-operator-service-cert diff --git a/config/manager/operator-envs b/config/manager/operator-envs index dccb580ab..df6c60ce4 100644 --- a/config/manager/operator-envs +++ b/config/manager/operator-envs @@ -1,20 +1,20 @@ # These are environment variables that are included in the operator config map. # These are exported in the Makefile. -DEPLOY_WITH -VERSION -WEBHOOKS_ENABLED -CONTROLLERS_ENABLED -CONTROLLERS_SCOPE -METRICS_ADDR -METRICS_TLS_SECRET -METRICS_PROXY_RBAC -LOG_LEVEL -CONCURRENCY_VERTICADB -CONCURRENCY_VERTICAAUTOSCALER -CONCURRENCY_EVENTTRIGGER -CONCURRENCY_VERTICARESTOREPOINTSQUERY -CONCURRENCY_VERTICASCRUTINIZE -CONCURRENCY_SANDBOXCONFIGMAP -CONCURRENCY_VERTICAREPLICATOR -BROADCASTER_BURST_SIZE -VDB_MAX_BACKOFF_DURATION +DEPLOY_WITH=${DEPLOY_WITH} +VERSION=${VERSION} +WEBHOOKS_ENABLED=${WEBHOOKS_ENABLED} +CONTROLLERS_ENABLED=${CONTROLLERS_ENABLED} +CONTROLLERS_SCOPE=${CONTROLLERS_SCOPE} +METRICS_ADDR=${METRICS_ADDR} +METRICS_TLS_SECRET=${METRICS_TLS_SECRET} +METRICS_PROXY_RBAC=${METRICS_PROXY_RBAC} +LOG_LEVEL=${LOG_LEVEL} +CONCURRENCY_VERTICADB=${CONCURRENCY_VERTICADB} +CONCURRENCY_VERTICAAUTOSCALER=${CONCURRENCY_VERTICAAUTOSCALER} +CONCURRENCY_EVENTTRIGGER=${CONCURRENCY_EVENTTRIGGER} +CONCURRENCY_VERTICARESTOREPOINTSQUERY=${CONCURRENCY_VERTICARESTOREPOINTSQUERY} +CONCURRENCY_VERTICASCRUTINIZE=${CONCURRENCY_VERTICASCRUTINIZE} +CONCURRENCY_SANDBOXCONFIGMAP=${CONCURRENCY_SANDBOXCONFIGMAP} +CONCURRENCY_VERTICAREPLICATOR=${CONCURRENCY_VERTICAREPLICATOR} +BROADCASTER_BURST_SIZE=${BROADCASTER_BURST_SIZE} +VDB_MAX_BACKOFF_DURATION=${VDB_MAX_BACKOFF_DURATION} diff --git a/scripts/config-transformer.sh b/scripts/config-transformer.sh index 3faf14ff4..07413233a 100755 --- a/scripts/config-transformer.sh +++ b/scripts/config-transformer.sh @@ -27,6 +27,9 @@ OPERATOR_CHART="$REPO_DIR/helm-charts/verticadb-operator" TEMPLATE_DIR=$OPERATOR_CHART/templates CRD_DIR=$OPERATOR_CHART/crds +# Fill in operator variables +envsubst < $REPO_DIR/config/manager/operator-envs > $REPO_DIR/config/manager/operator-envs-with-value + rm $TEMPLATE_DIR/*yaml 2>/dev/null || true $KUSTOMIZE build $REPO_DIR/config/default | $KUBERNETES_SPLIT_YAML --outdir $TEMPLATE_DIR - mv $TEMPLATE_DIR/*-crd.yaml $CRD_DIR diff --git a/scripts/gen-csv.sh b/scripts/gen-csv.sh index ece350bd6..cdfae2f14 100755 --- a/scripts/gen-csv.sh +++ b/scripts/gen-csv.sh @@ -67,6 +67,9 @@ shift BUNDLE_METADATA_OPTS=$@ BUNDLE_GEN_FLAGS="-q --overwrite --version $VERSION $BUNDLE_METADATA_OPTS $USE_IMAGE_DIGESTS_FLAG" +# Fill in operator variables +envsubst < $REPO_DIR/config/manager/operator-envs > $REPO_DIR/config/manager/operator-envs-with-value + cd $REPO_DIR rm -rf bundle/ 2>/dev/null || true $OPERATOR_SDK generate kustomize manifests -q From 4676133a8f75fa7602df26d35b281cd43279bff8 Mon Sep 17 00:00:00 2001 From: Cai Chen Date: Tue, 28 Jan 2025 18:41:16 +0000 Subject: [PATCH 11/15] fixed multiple operator issue --- config/rbac/metrics_auth_role_binding.yaml | 2 +- scripts/authorize-metrics.sh | 23 ---------------------- scripts/template-helm-chart.sh | 9 ++++++++- 3 files changed, 9 insertions(+), 25 deletions(-) diff --git a/config/rbac/metrics_auth_role_binding.yaml b/config/rbac/metrics_auth_role_binding.yaml index c7063d7b3..72d047f72 100644 --- a/config/rbac/metrics_auth_role_binding.yaml +++ b/config/rbac/metrics_auth_role_binding.yaml @@ -8,5 +8,5 @@ roleRef: name: metrics-auth-role subjects: - kind: ServiceAccount - name: controller-manager + name: manager namespace: system \ No newline at end of file diff --git a/scripts/authorize-metrics.sh b/scripts/authorize-metrics.sh index de71549e5..c85df9782 100755 --- a/scripts/authorize-metrics.sh +++ b/scripts/authorize-metrics.sh @@ -67,31 +67,8 @@ then exit 0 fi -kubectl apply -f $REPO_DIR/config/release-manifests/verticadb-operator-proxy-role-cr.yaml kubectl apply -f $REPO_DIR/config/release-manifests/verticadb-operator-metrics-reader-cr.yaml -set +o errexit -kubectl create clusterrolebinding verticadb-operator-proxy-rolebinding --clusterrole=verticadb-operator-proxy-role --serviceaccount=$OP_NAMESPACE:$OP_SA -RES=$? -set -o errexit - -# Append to ClusterRoleBinding if it already exists -if [ $RES -ne "0" ] -then - tmpfile=$(mktemp /tmp/patch-XXXXXX.yaml) - cat <<- EOF > $tmpfile - [{"op": "add", - "path": "/subjects/-", - "value": - {"kind": "ServiceAccount", - "name": "$OP_SA", - "namespace": "$OP_NAMESPACE"} - }] -EOF - kubectl patch clusterrolebinding verticadb-operator-proxy-rolebinding --type='json' --patch-file $tmpfile - rm $tmpfile -fi - set +o errexit kubectl create clusterrolebinding verticadb-operator-metrics-reader --clusterrole=verticadb-operator-metrics-reader --serviceaccount=$ACCESS_NAMESPACE:$ACCESS_SA RES=$? diff --git a/scripts/template-helm-chart.sh b/scripts/template-helm-chart.sh index 137c574b3..ac20ccdf9 100755 --- a/scripts/template-helm-chart.sh +++ b/scripts/template-helm-chart.sh @@ -93,7 +93,8 @@ EOF for f in \ verticadb-operator-leader-election-rolebinding-rb.yaml \ verticadb-operator-manager-clusterrolebinding-crb.yaml \ - verticadb-operator-webhook-config-crb.yaml + verticadb-operator-webhook-config-crb.yaml \ + verticadb-operator-metrics-auth-rolebinding-crb.yaml do perl -i -0777 -pe 's/kind: ServiceAccount\n.*name: .*/kind: ServiceAccount\n name: {{ include "vdb-op.serviceAccount" . }}/g' $TEMPLATE_DIR/$f done @@ -215,6 +216,12 @@ do perl -i -pe 's/^/{{- if .Values.controllers.enable -}}\n/ if 1 .. 1' $f echo "{{- end }}" >> $f done +for f in $TEMPLATE_DIR/verticadb-operator-metrics-auth-role-cr.yaml \ + $TEMPLATE_DIR/verticadb-operator-metrics-auth-rolebinding-crb.yaml +do + perl -i -0777 -pe 's/kind: ClusterRoleBinding/kind: {{ include "vdb-op.roleBindingKind" . }}/g' $f + perl -i -0777 -pe 's/kind: ClusterRole/kind: {{ include "vdb-op.roleKind" . }}/g' $f +done # 22. Template the operator config for fn in $TEMPLATE_DIR/verticadb-operator-manager-config-cm.yaml From 8f40158333f454d632b5d8b0ce1cb2790f2461a9 Mon Sep 17 00:00:00 2001 From: Cai Chen Date: Tue, 28 Jan 2025 19:32:09 +0000 Subject: [PATCH 12/15] fixed tests with vlogger --- scripts/setup-kustomize.sh | 4 ++-- .../multi-online-upgrade/15-assert.yaml | 4 ++-- .../multi-online-upgrade/20-assert.yaml | 4 ++-- .../30-initiate-upgrade.yaml | 2 +- .../multi-online-upgrade/32-assert.yaml | 2 +- .../multi-online-upgrade/35-assert.yaml | 6 +++--- .../multi-online-upgrade/40-assert.yaml | 4 ++-- .../multi-online-upgrade/40-errors.yaml | 2 +- .../45-initiate-upgrade.yaml | 2 +- .../multi-online-upgrade/46-assert.yaml | 2 +- .../multi-online-upgrade/50-assert.yaml | 6 +++--- .../multi-online-upgrade/55-assert.yaml | 4 ++-- .../multi-online-upgrade/55-errors.yaml | 2 +- .../setup-vdb/base/setup-vdb.yaml | 2 +- .../15-assert.yaml | 4 ++-- .../20-assert.yaml | 4 ++-- .../30-initiate-upgrade.yaml | 2 +- .../32-assert.yaml | 2 +- .../35-assert.yaml | 6 +++--- .../36-assert.yaml | 2 +- .../36-kill-pods-in-main.yaml | 2 +- .../40-assert.yaml | 4 ++-- .../40-errors.yaml | 2 +- .../setup-vdb/base/setup-vdb.yaml | 2 +- .../new-online-upgrade-sanity/15-assert.yaml | 8 ++++---- .../new-online-upgrade-sanity/20-assert.yaml | 8 ++++---- .../30-initiate-upgrade.yaml | 2 +- .../new-online-upgrade-sanity/31-assert.yaml | 8 ++++---- .../new-online-upgrade-sanity/32-assert.yaml | 2 +- .../new-online-upgrade-sanity/35-assert.yaml | 18 +++++++++--------- .../36-verify-non-replicatable-queries.yaml | 4 ++-- .../37-create-data-in-main-cluster.yaml | 2 +- .../39-verify-data-in-sandbox.yaml | 2 +- .../new-online-upgrade-sanity/40-assert.yaml | 8 ++++---- .../new-online-upgrade-sanity/40-errors.yaml | 10 +++++----- .../45-verify-new-main-cluster.yaml | 2 +- .../new-online-upgrade-sanity/55-assert.yaml | 2 +- .../55-kill-one-pod.yaml | 2 +- .../new-online-upgrade-sanity/56-assert.yaml | 2 +- .../setup-vdb/base/setup-vdb.yaml | 2 +- .../verify-new-main-cluster-connection.yaml | 4 ++-- .../online-upgrade-pods-pending/15-assert.yaml | 10 +++++----- .../online-upgrade-pods-pending/20-assert.yaml | 10 +++++----- .../online-upgrade-pods-pending/26-assert.yaml | 2 +- .../26-set-pod-pending.yaml | 2 +- .../online-upgrade-pods-pending/27-assert.yaml | 4 ++-- .../27-initiate-upgrade.yaml | 2 +- .../29-set-pod-running.yaml | 2 +- .../online-upgrade-pods-pending/30-assert.yaml | 2 +- .../online-upgrade-pods-pending/31-assert.yaml | 10 +++++----- .../setup-vdb/base/setup-vdb.yaml | 2 +- 51 files changed, 104 insertions(+), 104 deletions(-) diff --git a/scripts/setup-kustomize.sh b/scripts/setup-kustomize.sh index dbeebe217..34a780c34 100755 --- a/scripts/setup-kustomize.sh +++ b/scripts/setup-kustomize.sh @@ -300,7 +300,7 @@ replacements: targets: - select: kind: VerticaDB - name: vertica-sample + name: verticadb-sample fieldPaths: - spec.sidecars.[name=vlogger].image - select: @@ -375,7 +375,7 @@ replacements: - spec.sidecars.[name=vlogger].image - select: kind: VerticaDB - name: v-base-upgrade + name: v-online-upgrade fieldPaths: - spec.sidecars.[name=vlogger].image EOF diff --git a/tests/e2e-leg-9/multi-online-upgrade/15-assert.yaml b/tests/e2e-leg-9/multi-online-upgrade/15-assert.yaml index 8c50b78e7..5fdfd8646 100644 --- a/tests/e2e-leg-9/multi-online-upgrade/15-assert.yaml +++ b/tests/e2e-leg-9/multi-online-upgrade/15-assert.yaml @@ -14,11 +14,11 @@ apiVersion: apps/v1 kind: StatefulSet metadata: - name: v-base-upgrade-main + name: v-online-upgrade-main status: currentReplicas: 3 --- apiVersion: vertica.com/v1 kind: VerticaDB metadata: - name: v-base-upgrade + name: v-online-upgrade diff --git a/tests/e2e-leg-9/multi-online-upgrade/20-assert.yaml b/tests/e2e-leg-9/multi-online-upgrade/20-assert.yaml index dfcecc028..4ceb82186 100644 --- a/tests/e2e-leg-9/multi-online-upgrade/20-assert.yaml +++ b/tests/e2e-leg-9/multi-online-upgrade/20-assert.yaml @@ -14,7 +14,7 @@ apiVersion: apps/v1 kind: StatefulSet metadata: - name: v-base-upgrade-main + name: v-online-upgrade-main status: currentReplicas: 3 readyReplicas: 3 @@ -22,7 +22,7 @@ status: apiVersion: vertica.com/v1 kind: VerticaDB metadata: - name: v-base-upgrade + name: v-online-upgrade status: subclusters: - addedToDBCount: 3 diff --git a/tests/e2e-leg-9/multi-online-upgrade/30-initiate-upgrade.yaml b/tests/e2e-leg-9/multi-online-upgrade/30-initiate-upgrade.yaml index f42fc7b9a..891ae633f 100644 --- a/tests/e2e-leg-9/multi-online-upgrade/30-initiate-upgrade.yaml +++ b/tests/e2e-leg-9/multi-online-upgrade/30-initiate-upgrade.yaml @@ -14,4 +14,4 @@ apiVersion: kuttl.dev/v1beta1 kind: TestStep commands: - - command: ../../../scripts/patch-image-in-vdb.sh -n $NAMESPACE v-base-upgrade opentext/vertica-k8s-private:20240929-minimal + - command: ../../../scripts/patch-image-in-vdb.sh -n $NAMESPACE v-online-upgrade opentext/vertica-k8s-private:20240929-minimal diff --git a/tests/e2e-leg-9/multi-online-upgrade/32-assert.yaml b/tests/e2e-leg-9/multi-online-upgrade/32-assert.yaml index 5ce597a8a..0006c1236 100644 --- a/tests/e2e-leg-9/multi-online-upgrade/32-assert.yaml +++ b/tests/e2e-leg-9/multi-online-upgrade/32-assert.yaml @@ -19,4 +19,4 @@ source: involvedObject: apiVersion: vertica.com/v1 kind: VerticaDB - name: v-base-upgrade \ No newline at end of file + name: v-online-upgrade \ No newline at end of file diff --git a/tests/e2e-leg-9/multi-online-upgrade/35-assert.yaml b/tests/e2e-leg-9/multi-online-upgrade/35-assert.yaml index 41a882d01..68cb8e02a 100644 --- a/tests/e2e-leg-9/multi-online-upgrade/35-assert.yaml +++ b/tests/e2e-leg-9/multi-online-upgrade/35-assert.yaml @@ -14,9 +14,9 @@ apiVersion: apps/v1 kind: StatefulSet metadata: - name: v-base-upgrade-main-sb + name: v-online-upgrade-main-sb annotations: - vertica.com/statefulset-name-override: "v-base-upgrade-main-sb" + vertica.com/statefulset-name-override: "v-online-upgrade-main-sb" status: currentReplicas: 3 readyReplicas: 3 @@ -24,7 +24,7 @@ status: apiVersion: vertica.com/v1 kind: VerticaDB metadata: - name: v-base-upgrade + name: v-online-upgrade spec: subclusters: - name: main diff --git a/tests/e2e-leg-9/multi-online-upgrade/40-assert.yaml b/tests/e2e-leg-9/multi-online-upgrade/40-assert.yaml index 34d8ccaec..36c15defd 100644 --- a/tests/e2e-leg-9/multi-online-upgrade/40-assert.yaml +++ b/tests/e2e-leg-9/multi-online-upgrade/40-assert.yaml @@ -14,7 +14,7 @@ apiVersion: apps/v1 kind: StatefulSet metadata: - name: v-base-upgrade-main-sb + name: v-online-upgrade-main-sb status: currentReplicas: 3 readyReplicas: 3 @@ -22,7 +22,7 @@ status: apiVersion: vertica.com/v1 kind: VerticaDB metadata: - name: v-base-upgrade + name: v-online-upgrade spec: subclusters: - name: main diff --git a/tests/e2e-leg-9/multi-online-upgrade/40-errors.yaml b/tests/e2e-leg-9/multi-online-upgrade/40-errors.yaml index 30a961a09..6bd00300c 100644 --- a/tests/e2e-leg-9/multi-online-upgrade/40-errors.yaml +++ b/tests/e2e-leg-9/multi-online-upgrade/40-errors.yaml @@ -14,4 +14,4 @@ apiVersion: apps/v1 kind: StatefulSet metadata: - name: v-base-upgrade-main \ No newline at end of file + name: v-online-upgrade-main \ No newline at end of file diff --git a/tests/e2e-leg-9/multi-online-upgrade/45-initiate-upgrade.yaml b/tests/e2e-leg-9/multi-online-upgrade/45-initiate-upgrade.yaml index 2b0413793..f1a6d2eb4 100644 --- a/tests/e2e-leg-9/multi-online-upgrade/45-initiate-upgrade.yaml +++ b/tests/e2e-leg-9/multi-online-upgrade/45-initiate-upgrade.yaml @@ -14,4 +14,4 @@ apiVersion: kuttl.dev/v1beta1 kind: TestStep commands: - - command: ../../../scripts/patch-image-in-vdb.sh -n $NAMESPACE v-base-upgrade + - command: ../../../scripts/patch-image-in-vdb.sh -n $NAMESPACE v-online-upgrade diff --git a/tests/e2e-leg-9/multi-online-upgrade/46-assert.yaml b/tests/e2e-leg-9/multi-online-upgrade/46-assert.yaml index 5ce597a8a..0006c1236 100644 --- a/tests/e2e-leg-9/multi-online-upgrade/46-assert.yaml +++ b/tests/e2e-leg-9/multi-online-upgrade/46-assert.yaml @@ -19,4 +19,4 @@ source: involvedObject: apiVersion: vertica.com/v1 kind: VerticaDB - name: v-base-upgrade \ No newline at end of file + name: v-online-upgrade \ No newline at end of file diff --git a/tests/e2e-leg-9/multi-online-upgrade/50-assert.yaml b/tests/e2e-leg-9/multi-online-upgrade/50-assert.yaml index 4c4d478a8..5bb25b8b8 100644 --- a/tests/e2e-leg-9/multi-online-upgrade/50-assert.yaml +++ b/tests/e2e-leg-9/multi-online-upgrade/50-assert.yaml @@ -14,9 +14,9 @@ apiVersion: apps/v1 kind: StatefulSet metadata: - name: v-base-upgrade-main + name: v-online-upgrade-main annotations: - vertica.com/statefulset-name-override: "v-base-upgrade-main" + vertica.com/statefulset-name-override: "v-online-upgrade-main" status: currentReplicas: 3 readyReplicas: 3 @@ -24,7 +24,7 @@ status: apiVersion: vertica.com/v1 kind: VerticaDB metadata: - name: v-base-upgrade + name: v-online-upgrade spec: subclusters: - name: main diff --git a/tests/e2e-leg-9/multi-online-upgrade/55-assert.yaml b/tests/e2e-leg-9/multi-online-upgrade/55-assert.yaml index 29370ab63..bbff18200 100644 --- a/tests/e2e-leg-9/multi-online-upgrade/55-assert.yaml +++ b/tests/e2e-leg-9/multi-online-upgrade/55-assert.yaml @@ -14,7 +14,7 @@ apiVersion: apps/v1 kind: StatefulSet metadata: - name: v-base-upgrade-main + name: v-online-upgrade-main status: currentReplicas: 3 readyReplicas: 3 @@ -22,7 +22,7 @@ status: apiVersion: vertica.com/v1 kind: VerticaDB metadata: - name: v-base-upgrade + name: v-online-upgrade spec: subclusters: - name: main diff --git a/tests/e2e-leg-9/multi-online-upgrade/55-errors.yaml b/tests/e2e-leg-9/multi-online-upgrade/55-errors.yaml index 51bf60e16..028e4a004 100644 --- a/tests/e2e-leg-9/multi-online-upgrade/55-errors.yaml +++ b/tests/e2e-leg-9/multi-online-upgrade/55-errors.yaml @@ -14,4 +14,4 @@ apiVersion: apps/v1 kind: StatefulSet metadata: - name: v-base-upgrade-main-sb \ No newline at end of file + name: v-online-upgrade-main-sb \ No newline at end of file diff --git a/tests/e2e-leg-9/multi-online-upgrade/setup-vdb/base/setup-vdb.yaml b/tests/e2e-leg-9/multi-online-upgrade/setup-vdb/base/setup-vdb.yaml index 3cd02b1cc..064cc8098 100644 --- a/tests/e2e-leg-9/multi-online-upgrade/setup-vdb/base/setup-vdb.yaml +++ b/tests/e2e-leg-9/multi-online-upgrade/setup-vdb/base/setup-vdb.yaml @@ -14,7 +14,7 @@ apiVersion: vertica.com/v1 kind: VerticaDB metadata: - name: v-base-upgrade + name: v-online-upgrade annotations: vertica.com/include-uid-in-path: true vertica.com/save-restore-point-on-upgrade: true diff --git a/tests/e2e-leg-9/new-online-upgrade-pods-down/15-assert.yaml b/tests/e2e-leg-9/new-online-upgrade-pods-down/15-assert.yaml index 8c50b78e7..5fdfd8646 100644 --- a/tests/e2e-leg-9/new-online-upgrade-pods-down/15-assert.yaml +++ b/tests/e2e-leg-9/new-online-upgrade-pods-down/15-assert.yaml @@ -14,11 +14,11 @@ apiVersion: apps/v1 kind: StatefulSet metadata: - name: v-base-upgrade-main + name: v-online-upgrade-main status: currentReplicas: 3 --- apiVersion: vertica.com/v1 kind: VerticaDB metadata: - name: v-base-upgrade + name: v-online-upgrade diff --git a/tests/e2e-leg-9/new-online-upgrade-pods-down/20-assert.yaml b/tests/e2e-leg-9/new-online-upgrade-pods-down/20-assert.yaml index dfcecc028..4ceb82186 100644 --- a/tests/e2e-leg-9/new-online-upgrade-pods-down/20-assert.yaml +++ b/tests/e2e-leg-9/new-online-upgrade-pods-down/20-assert.yaml @@ -14,7 +14,7 @@ apiVersion: apps/v1 kind: StatefulSet metadata: - name: v-base-upgrade-main + name: v-online-upgrade-main status: currentReplicas: 3 readyReplicas: 3 @@ -22,7 +22,7 @@ status: apiVersion: vertica.com/v1 kind: VerticaDB metadata: - name: v-base-upgrade + name: v-online-upgrade status: subclusters: - addedToDBCount: 3 diff --git a/tests/e2e-leg-9/new-online-upgrade-pods-down/30-initiate-upgrade.yaml b/tests/e2e-leg-9/new-online-upgrade-pods-down/30-initiate-upgrade.yaml index 66ab83175..098fa0187 100644 --- a/tests/e2e-leg-9/new-online-upgrade-pods-down/30-initiate-upgrade.yaml +++ b/tests/e2e-leg-9/new-online-upgrade-pods-down/30-initiate-upgrade.yaml @@ -14,4 +14,4 @@ apiVersion: kuttl.dev/v1beta1 kind: TestStep commands: - - command: ../../../scripts/patch-image-in-vdb.sh -n $NAMESPACE v-base-upgrade \ No newline at end of file + - command: ../../../scripts/patch-image-in-vdb.sh -n $NAMESPACE v-online-upgrade \ No newline at end of file diff --git a/tests/e2e-leg-9/new-online-upgrade-pods-down/32-assert.yaml b/tests/e2e-leg-9/new-online-upgrade-pods-down/32-assert.yaml index 5ce597a8a..0006c1236 100644 --- a/tests/e2e-leg-9/new-online-upgrade-pods-down/32-assert.yaml +++ b/tests/e2e-leg-9/new-online-upgrade-pods-down/32-assert.yaml @@ -19,4 +19,4 @@ source: involvedObject: apiVersion: vertica.com/v1 kind: VerticaDB - name: v-base-upgrade \ No newline at end of file + name: v-online-upgrade \ No newline at end of file diff --git a/tests/e2e-leg-9/new-online-upgrade-pods-down/35-assert.yaml b/tests/e2e-leg-9/new-online-upgrade-pods-down/35-assert.yaml index 41a882d01..68cb8e02a 100644 --- a/tests/e2e-leg-9/new-online-upgrade-pods-down/35-assert.yaml +++ b/tests/e2e-leg-9/new-online-upgrade-pods-down/35-assert.yaml @@ -14,9 +14,9 @@ apiVersion: apps/v1 kind: StatefulSet metadata: - name: v-base-upgrade-main-sb + name: v-online-upgrade-main-sb annotations: - vertica.com/statefulset-name-override: "v-base-upgrade-main-sb" + vertica.com/statefulset-name-override: "v-online-upgrade-main-sb" status: currentReplicas: 3 readyReplicas: 3 @@ -24,7 +24,7 @@ status: apiVersion: vertica.com/v1 kind: VerticaDB metadata: - name: v-base-upgrade + name: v-online-upgrade spec: subclusters: - name: main diff --git a/tests/e2e-leg-9/new-online-upgrade-pods-down/36-assert.yaml b/tests/e2e-leg-9/new-online-upgrade-pods-down/36-assert.yaml index ddb9d5011..7d38f10f8 100644 --- a/tests/e2e-leg-9/new-online-upgrade-pods-down/36-assert.yaml +++ b/tests/e2e-leg-9/new-online-upgrade-pods-down/36-assert.yaml @@ -14,6 +14,6 @@ apiVersion: apps/v1 kind: StatefulSet metadata: - name: v-base-upgrade-main + name: v-online-upgrade-main status: currentReplicas: 3 \ No newline at end of file diff --git a/tests/e2e-leg-9/new-online-upgrade-pods-down/36-kill-pods-in-main.yaml b/tests/e2e-leg-9/new-online-upgrade-pods-down/36-kill-pods-in-main.yaml index 7a0e703a5..90cdbd654 100644 --- a/tests/e2e-leg-9/new-online-upgrade-pods-down/36-kill-pods-in-main.yaml +++ b/tests/e2e-leg-9/new-online-upgrade-pods-down/36-kill-pods-in-main.yaml @@ -14,5 +14,5 @@ apiVersion: kuttl.dev/v1beta1 kind: TestStep commands: - - command: kubectl delete pod v-base-upgrade-main-0 v-base-upgrade-main-1 v-base-upgrade-main-2 + - command: kubectl delete pod v-online-upgrade-main-0 v-online-upgrade-main-1 v-online-upgrade-main-2 namespaced: true diff --git a/tests/e2e-leg-9/new-online-upgrade-pods-down/40-assert.yaml b/tests/e2e-leg-9/new-online-upgrade-pods-down/40-assert.yaml index 34d8ccaec..36c15defd 100644 --- a/tests/e2e-leg-9/new-online-upgrade-pods-down/40-assert.yaml +++ b/tests/e2e-leg-9/new-online-upgrade-pods-down/40-assert.yaml @@ -14,7 +14,7 @@ apiVersion: apps/v1 kind: StatefulSet metadata: - name: v-base-upgrade-main-sb + name: v-online-upgrade-main-sb status: currentReplicas: 3 readyReplicas: 3 @@ -22,7 +22,7 @@ status: apiVersion: vertica.com/v1 kind: VerticaDB metadata: - name: v-base-upgrade + name: v-online-upgrade spec: subclusters: - name: main diff --git a/tests/e2e-leg-9/new-online-upgrade-pods-down/40-errors.yaml b/tests/e2e-leg-9/new-online-upgrade-pods-down/40-errors.yaml index 30a961a09..6bd00300c 100644 --- a/tests/e2e-leg-9/new-online-upgrade-pods-down/40-errors.yaml +++ b/tests/e2e-leg-9/new-online-upgrade-pods-down/40-errors.yaml @@ -14,4 +14,4 @@ apiVersion: apps/v1 kind: StatefulSet metadata: - name: v-base-upgrade-main \ No newline at end of file + name: v-online-upgrade-main \ No newline at end of file diff --git a/tests/e2e-leg-9/new-online-upgrade-pods-down/setup-vdb/base/setup-vdb.yaml b/tests/e2e-leg-9/new-online-upgrade-pods-down/setup-vdb/base/setup-vdb.yaml index 188780bc8..ee144dc24 100644 --- a/tests/e2e-leg-9/new-online-upgrade-pods-down/setup-vdb/base/setup-vdb.yaml +++ b/tests/e2e-leg-9/new-online-upgrade-pods-down/setup-vdb/base/setup-vdb.yaml @@ -14,7 +14,7 @@ apiVersion: vertica.com/v1 kind: VerticaDB metadata: - name: v-base-upgrade + name: v-online-upgrade annotations: vertica.com/include-uid-in-path: true vertica.com/save-restore-point-on-upgrade: false diff --git a/tests/e2e-leg-9/new-online-upgrade-sanity/15-assert.yaml b/tests/e2e-leg-9/new-online-upgrade-sanity/15-assert.yaml index 34f31b0f5..29787ca8c 100644 --- a/tests/e2e-leg-9/new-online-upgrade-sanity/15-assert.yaml +++ b/tests/e2e-leg-9/new-online-upgrade-sanity/15-assert.yaml @@ -14,25 +14,25 @@ apiVersion: apps/v1 kind: StatefulSet metadata: - name: v-base-upgrade-pri1 + name: v-online-upgrade-pri1 status: currentReplicas: 2 --- apiVersion: apps/v1 kind: StatefulSet metadata: - name: v-base-upgrade-pri-2 + name: v-online-upgrade-pri-2 status: currentReplicas: 1 --- apiVersion: apps/v1 kind: StatefulSet metadata: - name: v-base-upgrade-sec1 + name: v-online-upgrade-sec1 status: currentReplicas: 3 --- apiVersion: vertica.com/v1 kind: VerticaDB metadata: - name: v-base-upgrade + name: v-online-upgrade diff --git a/tests/e2e-leg-9/new-online-upgrade-sanity/20-assert.yaml b/tests/e2e-leg-9/new-online-upgrade-sanity/20-assert.yaml index 9910619eb..7e3808b3f 100644 --- a/tests/e2e-leg-9/new-online-upgrade-sanity/20-assert.yaml +++ b/tests/e2e-leg-9/new-online-upgrade-sanity/20-assert.yaml @@ -14,7 +14,7 @@ apiVersion: apps/v1 kind: StatefulSet metadata: - name: v-base-upgrade-pri1 + name: v-online-upgrade-pri1 status: currentReplicas: 2 readyReplicas: 2 @@ -22,7 +22,7 @@ status: apiVersion: apps/v1 kind: StatefulSet metadata: - name: v-base-upgrade-pri-2 + name: v-online-upgrade-pri-2 status: currentReplicas: 1 readyReplicas: 1 @@ -30,7 +30,7 @@ status: apiVersion: apps/v1 kind: StatefulSet metadata: - name: v-base-upgrade-sec1 + name: v-online-upgrade-sec1 status: currentReplicas: 3 readyReplicas: 3 @@ -38,7 +38,7 @@ status: apiVersion: vertica.com/v1 kind: VerticaDB metadata: - name: v-base-upgrade + name: v-online-upgrade status: subclusters: - addedToDBCount: 3 diff --git a/tests/e2e-leg-9/new-online-upgrade-sanity/30-initiate-upgrade.yaml b/tests/e2e-leg-9/new-online-upgrade-sanity/30-initiate-upgrade.yaml index 2b0413793..f1a6d2eb4 100644 --- a/tests/e2e-leg-9/new-online-upgrade-sanity/30-initiate-upgrade.yaml +++ b/tests/e2e-leg-9/new-online-upgrade-sanity/30-initiate-upgrade.yaml @@ -14,4 +14,4 @@ apiVersion: kuttl.dev/v1beta1 kind: TestStep commands: - - command: ../../../scripts/patch-image-in-vdb.sh -n $NAMESPACE v-base-upgrade + - command: ../../../scripts/patch-image-in-vdb.sh -n $NAMESPACE v-online-upgrade diff --git a/tests/e2e-leg-9/new-online-upgrade-sanity/31-assert.yaml b/tests/e2e-leg-9/new-online-upgrade-sanity/31-assert.yaml index 9dc7e6321..3052a8e3c 100644 --- a/tests/e2e-leg-9/new-online-upgrade-sanity/31-assert.yaml +++ b/tests/e2e-leg-9/new-online-upgrade-sanity/31-assert.yaml @@ -14,7 +14,7 @@ apiVersion: apps/v1 kind: StatefulSet metadata: - name: v-base-upgrade-pri1-sb + name: v-online-upgrade-pri1-sb status: currentReplicas: 2 readyReplicas: 2 @@ -22,7 +22,7 @@ status: apiVersion: apps/v1 kind: StatefulSet metadata: - name: v-base-upgrade-pri-2-sb + name: v-online-upgrade-pri-2-sb status: currentReplicas: 1 readyReplicas: 1 @@ -30,7 +30,7 @@ status: apiVersion: apps/v1 kind: StatefulSet metadata: - name: v-base-upgrade-sec1-sb + name: v-online-upgrade-sec1-sb status: currentReplicas: 3 readyReplicas: 3 @@ -38,7 +38,7 @@ status: apiVersion: vertica.com/v1 kind: VerticaDB metadata: - name: v-base-upgrade + name: v-online-upgrade status: subclusters: - addedToDBCount: 3 diff --git a/tests/e2e-leg-9/new-online-upgrade-sanity/32-assert.yaml b/tests/e2e-leg-9/new-online-upgrade-sanity/32-assert.yaml index 5ce597a8a..0006c1236 100644 --- a/tests/e2e-leg-9/new-online-upgrade-sanity/32-assert.yaml +++ b/tests/e2e-leg-9/new-online-upgrade-sanity/32-assert.yaml @@ -19,4 +19,4 @@ source: involvedObject: apiVersion: vertica.com/v1 kind: VerticaDB - name: v-base-upgrade \ No newline at end of file + name: v-online-upgrade \ No newline at end of file diff --git a/tests/e2e-leg-9/new-online-upgrade-sanity/35-assert.yaml b/tests/e2e-leg-9/new-online-upgrade-sanity/35-assert.yaml index 8d1d1123f..396f1a52a 100644 --- a/tests/e2e-leg-9/new-online-upgrade-sanity/35-assert.yaml +++ b/tests/e2e-leg-9/new-online-upgrade-sanity/35-assert.yaml @@ -14,9 +14,9 @@ apiVersion: apps/v1 kind: StatefulSet metadata: - name: v-base-upgrade-pri1-sb + name: v-online-upgrade-pri1-sb annotations: - vertica.com/statefulset-name-override: "v-base-upgrade-pri1-sb" + vertica.com/statefulset-name-override: "v-online-upgrade-pri1-sb" labels: vertica.com/sandbox: replica-group-b status: @@ -26,9 +26,9 @@ status: apiVersion: apps/v1 kind: StatefulSet metadata: - name: v-base-upgrade-pri-2-sb + name: v-online-upgrade-pri-2-sb annotations: - vertica.com/statefulset-name-override: "v-base-upgrade-pri-2-sb" + vertica.com/statefulset-name-override: "v-online-upgrade-pri-2-sb" labels: vertica.com/sandbox: replica-group-b status: @@ -38,9 +38,9 @@ status: apiVersion: apps/v1 kind: StatefulSet metadata: - name: v-base-upgrade-sec1-sb + name: v-online-upgrade-sec1-sb annotations: - vertica.com/statefulset-name-override: "v-base-upgrade-sec1-sb" + vertica.com/statefulset-name-override: "v-online-upgrade-sec1-sb" labels: vertica.com/sandbox: replica-group-b status: @@ -50,16 +50,16 @@ status: apiVersion: v1 kind: ConfigMap metadata: - name: v-base-upgrade-replica-group-b + name: v-online-upgrade-replica-group-b data: sandboxName: replica-group-b - verticaDBName: v-base-upgrade + verticaDBName: v-online-upgrade immutable: true --- apiVersion: vertica.com/v1 kind: VerticaDB metadata: - name: v-base-upgrade + name: v-online-upgrade spec: subclusters: - name: sec1 diff --git a/tests/e2e-leg-9/new-online-upgrade-sanity/36-verify-non-replicatable-queries.yaml b/tests/e2e-leg-9/new-online-upgrade-sanity/36-verify-non-replicatable-queries.yaml index 4f7f761ca..8c5171e87 100644 --- a/tests/e2e-leg-9/new-online-upgrade-sanity/36-verify-non-replicatable-queries.yaml +++ b/tests/e2e-leg-9/new-online-upgrade-sanity/36-verify-non-replicatable-queries.yaml @@ -15,12 +15,12 @@ apiVersion: kuttl.dev/v1beta1 kind: TestStep commands: - script: | - val=$(kubectl exec -n $NAMESPACE v-base-upgrade-pri1-0 -c server -- vsql -tAc "alter database default set DataSSLMaxBufSize = 32768"); \ + val=$(kubectl exec -n $NAMESPACE v-online-upgrade-pri1-0 -c server -- vsql -tAc "alter database default set DataSSLMaxBufSize = 32768"); \ if [ $val == 0 ]; then \ exit 1; \ fi - script: | - val=$(kubectl exec -n $NAMESPACE v-base-upgrade-pri1-sb-0 -c server -- vsql -tAc "alter database default set DataSSLMaxBufSize = 32768"); \ + val=$(kubectl exec -n $NAMESPACE v-online-upgrade-pri1-sb-0 -c server -- vsql -tAc "alter database default set DataSSLMaxBufSize = 32768"); \ if [ $val == 1 ]; then \ exit 1; \ fi \ No newline at end of file diff --git a/tests/e2e-leg-9/new-online-upgrade-sanity/37-create-data-in-main-cluster.yaml b/tests/e2e-leg-9/new-online-upgrade-sanity/37-create-data-in-main-cluster.yaml index 86abf1b0b..880ad4fde 100644 --- a/tests/e2e-leg-9/new-online-upgrade-sanity/37-create-data-in-main-cluster.yaml +++ b/tests/e2e-leg-9/new-online-upgrade-sanity/37-create-data-in-main-cluster.yaml @@ -21,7 +21,7 @@ data: set -o errexit set -o xtrace - POD_NAME=v-base-upgrade-pri1-0 + POD_NAME=v-online-upgrade-pri1-0 kubectl exec $POD_NAME -i -c server -- bash -c "vsql -U dbadmin -w superuser -tAc \"CREATE TABLE public.test_table (val INTEGER);\"" kubectl exec $POD_NAME -i -c server -- bash -c "vsql -U dbadmin -w superuser -tAc \"INSERT INTO public.test_table VALUES (99); COMMIT;\"" diff --git a/tests/e2e-leg-9/new-online-upgrade-sanity/39-verify-data-in-sandbox.yaml b/tests/e2e-leg-9/new-online-upgrade-sanity/39-verify-data-in-sandbox.yaml index e4d7c5567..6f6f4799f 100644 --- a/tests/e2e-leg-9/new-online-upgrade-sanity/39-verify-data-in-sandbox.yaml +++ b/tests/e2e-leg-9/new-online-upgrade-sanity/39-verify-data-in-sandbox.yaml @@ -21,7 +21,7 @@ data: set -o errexit set -o xtrace - POD_NAME=v-base-upgrade-pri1-sb-0 + POD_NAME=v-online-upgrade-pri1-sb-0 result=$(kubectl exec $POD_NAME -i -c server -- bash -c "vsql -U dbadmin -tAc \"SELECT * FROM public.test_table ORDER BY val;\"") echo "$result" | grep -Pzo "^99\n$" > /dev/null diff --git a/tests/e2e-leg-9/new-online-upgrade-sanity/40-assert.yaml b/tests/e2e-leg-9/new-online-upgrade-sanity/40-assert.yaml index e20c63afe..78a98d8c7 100644 --- a/tests/e2e-leg-9/new-online-upgrade-sanity/40-assert.yaml +++ b/tests/e2e-leg-9/new-online-upgrade-sanity/40-assert.yaml @@ -14,7 +14,7 @@ apiVersion: apps/v1 kind: StatefulSet metadata: - name: v-base-upgrade-pri1-sb + name: v-online-upgrade-pri1-sb status: currentReplicas: 2 readyReplicas: 2 @@ -22,7 +22,7 @@ status: apiVersion: apps/v1 kind: StatefulSet metadata: - name: v-base-upgrade-pri-2-sb + name: v-online-upgrade-pri-2-sb status: currentReplicas: 1 readyReplicas: 1 @@ -30,7 +30,7 @@ status: apiVersion: apps/v1 kind: StatefulSet metadata: - name: v-base-upgrade-sec1-sb + name: v-online-upgrade-sec1-sb status: currentReplicas: 3 readyReplicas: 3 @@ -38,7 +38,7 @@ status: apiVersion: vertica.com/v1 kind: VerticaDB metadata: - name: v-base-upgrade + name: v-online-upgrade spec: subclusters: - name: pri1 diff --git a/tests/e2e-leg-9/new-online-upgrade-sanity/40-errors.yaml b/tests/e2e-leg-9/new-online-upgrade-sanity/40-errors.yaml index d985e9c57..9a5aa7d1d 100644 --- a/tests/e2e-leg-9/new-online-upgrade-sanity/40-errors.yaml +++ b/tests/e2e-leg-9/new-online-upgrade-sanity/40-errors.yaml @@ -14,23 +14,23 @@ apiVersion: apps/v1 kind: StatefulSet metadata: - name: v-base-upgrade-pri1 + name: v-online-upgrade-pri1 --- apiVersion: apps/v1 kind: StatefulSet metadata: - name: v-base-upgrade-pri-2 + name: v-online-upgrade-pri-2 --- apiVersion: apps/v1 kind: StatefulSet metadata: - name: v-base-upgrade-sec1 + name: v-online-upgrade-sec1 --- apiVersion: v1 kind: ConfigMap metadata: - name: v-base-upgrade-replica-group-b + name: v-online-upgrade-replica-group-b data: sandboxName: replica-group-b - verticaDBName: v-base-upgrade + verticaDBName: v-online-upgrade immutable: true diff --git a/tests/e2e-leg-9/new-online-upgrade-sanity/45-verify-new-main-cluster.yaml b/tests/e2e-leg-9/new-online-upgrade-sanity/45-verify-new-main-cluster.yaml index bac5dc29d..45b8590c4 100644 --- a/tests/e2e-leg-9/new-online-upgrade-sanity/45-verify-new-main-cluster.yaml +++ b/tests/e2e-leg-9/new-online-upgrade-sanity/45-verify-new-main-cluster.yaml @@ -21,7 +21,7 @@ data: set -o errexit set -o xtrace - POD_NAMES=("v-base-upgrade-pri1-sb-0" "v-base-upgrade-pri-2-sb-0" "v-base-upgrade-sec1-sb-0") + POD_NAMES=("v-online-upgrade-pri1-sb-0" "v-online-upgrade-pri-2-sb-0" "v-online-upgrade-sec1-sb-0") for POD_NAME in "${POD_NAMES[@]}"; do result=$(kubectl exec $POD_NAME -i -c server -- bash -c "vsql -U dbadmin -tAc \"SELECT COUNT(*) FROM nodes WHERE node_state = 'UP' and subcluster_name not like '%sb' and sandbox = '';\"") diff --git a/tests/e2e-leg-9/new-online-upgrade-sanity/55-assert.yaml b/tests/e2e-leg-9/new-online-upgrade-sanity/55-assert.yaml index 71dd5a0c2..d3e541b96 100644 --- a/tests/e2e-leg-9/new-online-upgrade-sanity/55-assert.yaml +++ b/tests/e2e-leg-9/new-online-upgrade-sanity/55-assert.yaml @@ -14,7 +14,7 @@ apiVersion: apps/v1 kind: StatefulSet metadata: - name: v-base-upgrade-sec1-sb + name: v-online-upgrade-sec1-sb status: currentReplicas: 3 readyReplicas: 2 diff --git a/tests/e2e-leg-9/new-online-upgrade-sanity/55-kill-one-pod.yaml b/tests/e2e-leg-9/new-online-upgrade-sanity/55-kill-one-pod.yaml index 9a93fc9be..872b2c57d 100644 --- a/tests/e2e-leg-9/new-online-upgrade-sanity/55-kill-one-pod.yaml +++ b/tests/e2e-leg-9/new-online-upgrade-sanity/55-kill-one-pod.yaml @@ -14,5 +14,5 @@ apiVersion: kuttl.dev/v1beta1 kind: TestStep commands: - - command: kubectl delete pod v-base-upgrade-sec1-sb-0 + - command: kubectl delete pod v-online-upgrade-sec1-sb-0 namespaced: true diff --git a/tests/e2e-leg-9/new-online-upgrade-sanity/56-assert.yaml b/tests/e2e-leg-9/new-online-upgrade-sanity/56-assert.yaml index cf86ae9b8..452a78819 100644 --- a/tests/e2e-leg-9/new-online-upgrade-sanity/56-assert.yaml +++ b/tests/e2e-leg-9/new-online-upgrade-sanity/56-assert.yaml @@ -14,7 +14,7 @@ apiVersion: apps/v1 kind: StatefulSet metadata: - name: v-base-upgrade-sec1-sb + name: v-online-upgrade-sec1-sb status: currentReplicas: 3 readyReplicas: 3 diff --git a/tests/e2e-leg-9/new-online-upgrade-sanity/setup-vdb/base/setup-vdb.yaml b/tests/e2e-leg-9/new-online-upgrade-sanity/setup-vdb/base/setup-vdb.yaml index 6377f7cdb..e57d22a90 100644 --- a/tests/e2e-leg-9/new-online-upgrade-sanity/setup-vdb/base/setup-vdb.yaml +++ b/tests/e2e-leg-9/new-online-upgrade-sanity/setup-vdb/base/setup-vdb.yaml @@ -14,7 +14,7 @@ apiVersion: vertica.com/v1 kind: VerticaDB metadata: - name: v-base-upgrade + name: v-online-upgrade annotations: vertica.com/include-uid-in-path: true vertica.com/online-upgrade-preferred-sandbox: "replica-group-b" diff --git a/tests/e2e-leg-9/new-online-upgrade-sanity/verify-new-main-cluster-connection/base/verify-new-main-cluster-connection.yaml b/tests/e2e-leg-9/new-online-upgrade-sanity/verify-new-main-cluster-connection/base/verify-new-main-cluster-connection.yaml index c78f4c156..b918b46d7 100644 --- a/tests/e2e-leg-9/new-online-upgrade-sanity/verify-new-main-cluster-connection/base/verify-new-main-cluster-connection.yaml +++ b/tests/e2e-leg-9/new-online-upgrade-sanity/verify-new-main-cluster-connection/base/verify-new-main-cluster-connection.yaml @@ -21,7 +21,7 @@ data: set -o errexit set -o xtrace # Access to subclusters through primary service should route to the new main cluster. - CONNECTION_NODE=$(vsql -U dbadmin -h v-base-upgrade-pri1 -tAc "select node_name from current_session") + CONNECTION_NODE=$(vsql -U dbadmin -h v-online-upgrade-pri1 -tAc "select node_name from current_session") echo $CONNECTION_NODE if [[ $CONNECTION_NODE == "v_repup_node0007" ]] || \ [[ $CONNECTION_NODE == "v_repup_node0008" ]] || \ @@ -30,7 +30,7 @@ data: exit 0 fi # Access to subclusters through secondary service should route to the new main cluster. - CONNECTION_NODE=$(vsql -U dbadmin -h v-base-upgrade-sec1 -tAc "select node_name from current_session") + CONNECTION_NODE=$(vsql -U dbadmin -h v-online-upgrade-sec1 -tAc "select node_name from current_session") echo $CONNECTION_NODE if [[ $CONNECTION_NODE == "v_repup_node0010" ]] || \ [[ $CONNECTION_NODE == "v_repup_node0011" ]] || \ diff --git a/tests/e2e-leg-9/online-upgrade-pods-pending/15-assert.yaml b/tests/e2e-leg-9/online-upgrade-pods-pending/15-assert.yaml index c607e4663..1d9725caf 100644 --- a/tests/e2e-leg-9/online-upgrade-pods-pending/15-assert.yaml +++ b/tests/e2e-leg-9/online-upgrade-pods-pending/15-assert.yaml @@ -14,32 +14,32 @@ apiVersion: apps/v1 kind: StatefulSet metadata: - name: v-base-upgrade-pri1 + name: v-online-upgrade-pri1 status: currentReplicas: 2 --- apiVersion: apps/v1 kind: StatefulSet metadata: - name: v-base-upgrade-pri-2 + name: v-online-upgrade-pri-2 status: currentReplicas: 1 --- apiVersion: apps/v1 kind: StatefulSet metadata: - name: v-base-upgrade-sec1 + name: v-online-upgrade-sec1 status: currentReplicas: 2 --- apiVersion: apps/v1 kind: StatefulSet metadata: - name: v-base-upgrade-sec2 + name: v-online-upgrade-sec2 status: currentReplicas: 1 --- apiVersion: vertica.com/v1 kind: VerticaDB metadata: - name: v-base-upgrade + name: v-online-upgrade diff --git a/tests/e2e-leg-9/online-upgrade-pods-pending/20-assert.yaml b/tests/e2e-leg-9/online-upgrade-pods-pending/20-assert.yaml index e86364725..cc54d2688 100644 --- a/tests/e2e-leg-9/online-upgrade-pods-pending/20-assert.yaml +++ b/tests/e2e-leg-9/online-upgrade-pods-pending/20-assert.yaml @@ -14,7 +14,7 @@ apiVersion: apps/v1 kind: StatefulSet metadata: - name: v-base-upgrade-pri1 + name: v-online-upgrade-pri1 status: currentReplicas: 2 readyReplicas: 2 @@ -22,7 +22,7 @@ status: apiVersion: apps/v1 kind: StatefulSet metadata: - name: v-base-upgrade-pri-2 + name: v-online-upgrade-pri-2 status: currentReplicas: 1 readyReplicas: 1 @@ -30,14 +30,14 @@ status: apiVersion: apps/v1 kind: StatefulSet metadata: - name: v-base-upgrade-sec1 + name: v-online-upgrade-sec1 status: currentReplicas: 2 --- apiVersion: apps/v1 kind: StatefulSet metadata: - name: v-base-upgrade-sec2 + name: v-online-upgrade-sec2 status: currentReplicas: 1 readyReplicas: 1 @@ -45,7 +45,7 @@ status: apiVersion: vertica.com/v1 kind: VerticaDB metadata: - name: v-base-upgrade + name: v-online-upgrade status: subclusters: - addedToDBCount: 2 diff --git a/tests/e2e-leg-9/online-upgrade-pods-pending/26-assert.yaml b/tests/e2e-leg-9/online-upgrade-pods-pending/26-assert.yaml index 5e3499916..da593e2bf 100644 --- a/tests/e2e-leg-9/online-upgrade-pods-pending/26-assert.yaml +++ b/tests/e2e-leg-9/online-upgrade-pods-pending/26-assert.yaml @@ -14,6 +14,6 @@ apiVersion: v1 kind: Pod metadata: - name: v-base-upgrade-sec2-0 + name: v-online-upgrade-sec2-0 status: phase: Pending diff --git a/tests/e2e-leg-9/online-upgrade-pods-pending/26-set-pod-pending.yaml b/tests/e2e-leg-9/online-upgrade-pods-pending/26-set-pod-pending.yaml index 4d3afe966..b74a978e1 100644 --- a/tests/e2e-leg-9/online-upgrade-pods-pending/26-set-pod-pending.yaml +++ b/tests/e2e-leg-9/online-upgrade-pods-pending/26-set-pod-pending.yaml @@ -14,7 +14,7 @@ apiVersion: vertica.com/v1 kind: VerticaDB metadata: - name: v-base-upgrade + name: v-online-upgrade spec: subclusters: - name: sec1 diff --git a/tests/e2e-leg-9/online-upgrade-pods-pending/27-assert.yaml b/tests/e2e-leg-9/online-upgrade-pods-pending/27-assert.yaml index ccb58f602..5bb73fda5 100644 --- a/tests/e2e-leg-9/online-upgrade-pods-pending/27-assert.yaml +++ b/tests/e2e-leg-9/online-upgrade-pods-pending/27-assert.yaml @@ -14,13 +14,13 @@ apiVersion: v1 kind: Pod metadata: - name: v-base-upgrade-sec2-0 + name: v-online-upgrade-sec2-0 status: phase: Pending --- apiVersion: vertica.com/v1beta1 kind: VerticaDB metadata: - name: v-base-upgrade + name: v-online-upgrade status: upgradeStatus: "Requeue as not all pods are running" \ No newline at end of file diff --git a/tests/e2e-leg-9/online-upgrade-pods-pending/27-initiate-upgrade.yaml b/tests/e2e-leg-9/online-upgrade-pods-pending/27-initiate-upgrade.yaml index 2b0413793..f1a6d2eb4 100644 --- a/tests/e2e-leg-9/online-upgrade-pods-pending/27-initiate-upgrade.yaml +++ b/tests/e2e-leg-9/online-upgrade-pods-pending/27-initiate-upgrade.yaml @@ -14,4 +14,4 @@ apiVersion: kuttl.dev/v1beta1 kind: TestStep commands: - - command: ../../../scripts/patch-image-in-vdb.sh -n $NAMESPACE v-base-upgrade + - command: ../../../scripts/patch-image-in-vdb.sh -n $NAMESPACE v-online-upgrade diff --git a/tests/e2e-leg-9/online-upgrade-pods-pending/29-set-pod-running.yaml b/tests/e2e-leg-9/online-upgrade-pods-pending/29-set-pod-running.yaml index 084266b38..6e9c58240 100644 --- a/tests/e2e-leg-9/online-upgrade-pods-pending/29-set-pod-running.yaml +++ b/tests/e2e-leg-9/online-upgrade-pods-pending/29-set-pod-running.yaml @@ -14,7 +14,7 @@ apiVersion: vertica.com/v1 kind: VerticaDB metadata: - name: v-base-upgrade + name: v-online-upgrade spec: subclusters: - name: sec1 diff --git a/tests/e2e-leg-9/online-upgrade-pods-pending/30-assert.yaml b/tests/e2e-leg-9/online-upgrade-pods-pending/30-assert.yaml index 18a1717c3..2e34a2bbf 100644 --- a/tests/e2e-leg-9/online-upgrade-pods-pending/30-assert.yaml +++ b/tests/e2e-leg-9/online-upgrade-pods-pending/30-assert.yaml @@ -14,7 +14,7 @@ apiVersion: apps/v1 kind: StatefulSet metadata: - name: v-base-upgrade-sec2 + name: v-online-upgrade-sec2 status: currentReplicas: 1 readyReplicas: 1 diff --git a/tests/e2e-leg-9/online-upgrade-pods-pending/31-assert.yaml b/tests/e2e-leg-9/online-upgrade-pods-pending/31-assert.yaml index 420750eb4..12cee0ab5 100644 --- a/tests/e2e-leg-9/online-upgrade-pods-pending/31-assert.yaml +++ b/tests/e2e-leg-9/online-upgrade-pods-pending/31-assert.yaml @@ -14,7 +14,7 @@ apiVersion: apps/v1 kind: StatefulSet metadata: - name: v-base-upgrade-pri1-sb + name: v-online-upgrade-pri1-sb status: currentReplicas: 2 readyReplicas: 2 @@ -22,7 +22,7 @@ status: apiVersion: apps/v1 kind: StatefulSet metadata: - name: v-base-upgrade-pri-2-sb + name: v-online-upgrade-pri-2-sb status: currentReplicas: 1 readyReplicas: 1 @@ -30,7 +30,7 @@ status: apiVersion: apps/v1 kind: StatefulSet metadata: - name: v-base-upgrade-sec1-sb + name: v-online-upgrade-sec1-sb status: currentReplicas: 2 readyReplicas: 2 @@ -38,7 +38,7 @@ status: apiVersion: apps/v1 kind: StatefulSet metadata: - name: v-base-upgrade-sec2-sb + name: v-online-upgrade-sec2-sb status: currentReplicas: 1 readyReplicas: 1 @@ -46,7 +46,7 @@ status: apiVersion: vertica.com/v1 kind: VerticaDB metadata: - name: v-base-upgrade + name: v-online-upgrade status: subclusters: - addedToDBCount: 2 diff --git a/tests/e2e-leg-9/online-upgrade-pods-pending/setup-vdb/base/setup-vdb.yaml b/tests/e2e-leg-9/online-upgrade-pods-pending/setup-vdb/base/setup-vdb.yaml index 0551d867b..d37c1350b 100644 --- a/tests/e2e-leg-9/online-upgrade-pods-pending/setup-vdb/base/setup-vdb.yaml +++ b/tests/e2e-leg-9/online-upgrade-pods-pending/setup-vdb/base/setup-vdb.yaml @@ -14,7 +14,7 @@ apiVersion: vertica.com/v1 kind: VerticaDB metadata: - name: v-base-upgrade + name: v-online-upgrade annotations: vertica.com/include-uid-in-path: true vertica.com/online-upgrade-preferred-sandbox: "replica-group-b" From 1839fc4033d45e3bad015950ebdfb95f472a05a5 Mon Sep 17 00:00:00 2001 From: Cai Chen Date: Tue, 28 Jan 2025 22:25:31 +0000 Subject: [PATCH 13/15] fixed kustomize issue --- config/default/manager_metrics_patch.yaml | 1 + scripts/setup-kustomize.sh | 10 ++++++++++ scripts/template-helm-chart.sh | 2 +- tests/e2e-leg-5/metrics-disabled/05-assert.yaml | 2 +- .../add-cert-to-vdb/base/add-cert-to-vdb.yaml | 1 + 5 files changed, 14 insertions(+), 2 deletions(-) diff --git a/config/default/manager_metrics_patch.yaml b/config/default/manager_metrics_patch.yaml index 77f0d50f6..3004d8975 100644 --- a/config/default/manager_metrics_patch.yaml +++ b/config/default/manager_metrics_patch.yaml @@ -12,4 +12,5 @@ value: name: auth-cert secret: + defaultMode: 420 secretName: custom-cert \ No newline at end of file diff --git a/scripts/setup-kustomize.sh b/scripts/setup-kustomize.sh index 34a780c34..318a75856 100755 --- a/scripts/setup-kustomize.sh +++ b/scripts/setup-kustomize.sh @@ -260,6 +260,16 @@ replacements: name: v-fallback fieldPaths: - spec.image + - select: + kind: VerticaDB + name: v-online-upgrade + fieldPaths: + - spec.image + - select: + kind: VerticaDB + name: v-client-proxy-upgrade + fieldPaths: + - spec.image - source: kind: ConfigMap name: e2e diff --git a/scripts/template-helm-chart.sh b/scripts/template-helm-chart.sh index ac20ccdf9..b8b109f6b 100755 --- a/scripts/template-helm-chart.sh +++ b/scripts/template-helm-chart.sh @@ -155,7 +155,7 @@ for f in $TEMPLATE_DIR/verticadb-operator-manager-deployment.yaml do perl -i -0777 -pe 's/(.*--v=[0-9]+)/$1\n{{- if not (empty .Values.prometheus.tlsSecret) }}\n - --tls-cert-file=\/cert\/tls.crt\n - --tls-private-key-file=\/cert\/tls.key\n - --client-ca-file=\/cert\/ca.crt\n{{- end }}/g' $f perl -i -0777 -pe 's/(.*- mountPath: .*\n.*name: auth-cert.*)/\{\{- if not (empty .Values.prometheus.tlsSecret) }}\n - mountPath: \/cert\n name: auth-cert\n{{- end }}/g' $f - perl -i -0777 -pe 's/(.*- name: auth-cert.*\n.*secret:\n.*secretName: custom-cert)/{{- if not \(empty .Values.prometheus.tlsSecret\) }}\n - name: auth-cert\n secret:\n secretName: {{ .Values.prometheus.tlsSecret }}\n{{- end }}/g' $f + perl -i -0777 -pe 's/(.*- name: auth-cert.*\n.*secret:.*\n.*defaultMode: 420.*\n.*secretName: custom-cert)/{{- if not \(empty .Values.prometheus.tlsSecret\) }}\n - name: auth-cert\n secret:\n defaultMode: 420\n secretName: {{ .Values.prometheus.tlsSecret }}\n{{- end }}/g' $f done # 18. Add pod scheduling options diff --git a/tests/e2e-leg-5/metrics-disabled/05-assert.yaml b/tests/e2e-leg-5/metrics-disabled/05-assert.yaml index dc09521d0..6c7b37a79 100644 --- a/tests/e2e-leg-5/metrics-disabled/05-assert.yaml +++ b/tests/e2e-leg-5/metrics-disabled/05-assert.yaml @@ -18,7 +18,7 @@ metadata: control-plane: verticadb-operator data: WEBHOOK_CERT_SOURCE: cert-manager - METRICS_ADDR: "" + METRICS_ADDR: "0" --- apiVersion: v1 kind: Service diff --git a/tests/e2e-leg-7/mount-certs/add-cert-to-vdb/base/add-cert-to-vdb.yaml b/tests/e2e-leg-7/mount-certs/add-cert-to-vdb/base/add-cert-to-vdb.yaml index cb6b1cc8c..906b4cc38 100644 --- a/tests/e2e-leg-7/mount-certs/add-cert-to-vdb/base/add-cert-to-vdb.yaml +++ b/tests/e2e-leg-7/mount-certs/add-cert-to-vdb/base/add-cert-to-vdb.yaml @@ -16,6 +16,7 @@ kind: VerticaDB metadata: name: v-mount-certs spec: + image: kustomize-vertica-image communal: {} certSecrets: - name: my-cert-1 From 85ee26588744e15c7fe547bc80692da69c43e923 Mon Sep 17 00:00:00 2001 From: Cai Chen Date: Fri, 31 Jan 2025 22:33:35 +0000 Subject: [PATCH 14/15] fixed the tests --- DEVELOPER.md | 2 +- Makefile | 28 +++++++++++++++- changes/unreleased/Added-20250131-221406.yaml | 6 ++++ .../unreleased/Removed-20250131-221220.yaml | 5 +++ cmd/operator/main.go | 32 ++++++++++++++++--- config/default/kustomization.yaml | 2 +- config/manager/operator-envs | 2 +- config/rbac/kustomization.yaml | 1 + config/rbac/metrics_reader_role_binding.yaml | 16 ++++++++++ helm-charts/verticadb-operator/README.md | 5 ++- .../tests/image-name-and-tag_test.yaml | 2 +- .../tests/metrics-configmap_test.yaml | 4 +-- .../serviceaccount-rolebinding_test.yaml | 4 ++- .../tests/servicemonitor_test.yaml | 22 ------------- helm-charts/verticadb-operator/values.yaml | 20 ++++++------ pkg/opcfg/config.go | 5 +++ scripts/authorize-metrics.sh | 1 + scripts/gen-csv.sh | 4 +-- scripts/gen-release-artifacts.sh | 2 +- scripts/template-helm-chart.sh | 24 +++++++------- .../00-create-creds.yaml | 0 .../05-install-prometheus-crd.yaml | 0 .../prometheus-service-monitor/10-assert.yaml | 0 .../10-deploy-operator-with-auth.yaml | 2 +- .../prometheus-service-monitor/15-errors.yaml | 0 .../15-undeploy-operator.yaml | 0 .../prometheus-service-monitor/20-assert.yaml | 0 .../20-deploy-operator-without-auth.yaml | 0 .../prometheus-service-monitor/25-errors.yaml | 0 .../25-undeploy-operator.yaml | 0 .../99-delete-ns.yaml | 0 .../helm-nameoverride/15-deploy-operator.yaml | 2 +- .../00-create-creds.yaml | 0 .../04-assert.yaml | 0 .../04-create-cert.yaml | 0 .../05-assert.yaml | 0 .../05-deploy-operator.yaml | 2 +- .../10-assert.yaml | 0 ...0-verify-no-authority-to-read-metrics.yaml | 2 +- .../20-assert.yaml | 0 .../20-verify-crb-with-metrics.yaml | 0 .../90-errors.yaml | 0 .../90-uninstall-operator.yaml | 0 .../99-delete-ns.yaml | 0 .../cert.yaml | 0 .../00-create-creds.yaml | 0 .../05-assert.yaml | 1 - .../05-deploy-operator.yaml | 2 +- .../10-assert.yaml | 0 ...0-verify-no-authority-to-read-metrics.yaml | 0 .../20-assert.yaml | 0 .../20-verify-crb-with-metrics.yaml | 0 .../90-errors.yaml | 0 .../90-uninstall-operator.yaml | 0 .../99-delete-ns.yaml | 0 .../00-create-creds.yaml | 0 .../05-assert.yaml | 1 - .../05-deploy-operator.yaml | 2 +- .../10-assert.yaml | 0 ...0-verify-no-authority-to-read-metrics.yaml | 0 .../15-apply-cluster-role.yaml | 0 .../20-assert.yaml | 0 .../20-verify-crb-with-metrics.yaml | 0 .../90-errors.yaml | 0 .../90-uninstall-operator.yaml | 0 .../99-delete-ns.yaml | 0 .../override-first-deploy.yaml | 2 +- .../override-second-deploy.yaml | 2 +- .../from-1.2.0/15-assert.yaml | 1 + .../from-1.3.1/15-assert.yaml | 1 + .../from-1.4.0/15-assert.yaml | 1 + .../from-1.6.0/15-assert.yaml | 1 + .../from-1.7.0/15-assert.yaml | 1 + 73 files changed, 138 insertions(+), 72 deletions(-) create mode 100644 changes/unreleased/Added-20250131-221406.yaml create mode 100644 changes/unreleased/Removed-20250131-221220.yaml create mode 100644 config/rbac/metrics_reader_role_binding.yaml delete mode 100644 helm-charts/verticadb-operator/tests/servicemonitor_test.yaml rename tests/{e2e-leg-5/metrics-auth-proxy-cert => e2e-disabled/prometheus-service-monitor}/00-create-creds.yaml (100%) rename tests/{e2e-leg-5 => e2e-disabled}/prometheus-service-monitor/05-install-prometheus-crd.yaml (100%) rename tests/{e2e-leg-5 => e2e-disabled}/prometheus-service-monitor/10-assert.yaml (100%) rename tests/{e2e-leg-5 => e2e-disabled}/prometheus-service-monitor/10-deploy-operator-with-auth.yaml (88%) rename tests/{e2e-leg-5 => e2e-disabled}/prometheus-service-monitor/15-errors.yaml (100%) rename tests/{e2e-leg-5 => e2e-disabled}/prometheus-service-monitor/15-undeploy-operator.yaml (100%) rename tests/{e2e-leg-5 => e2e-disabled}/prometheus-service-monitor/20-assert.yaml (100%) rename tests/{e2e-leg-5 => e2e-disabled}/prometheus-service-monitor/20-deploy-operator-without-auth.yaml (100%) rename tests/{e2e-leg-5 => e2e-disabled}/prometheus-service-monitor/25-errors.yaml (100%) rename tests/{e2e-leg-5 => e2e-disabled}/prometheus-service-monitor/25-undeploy-operator.yaml (100%) rename tests/{e2e-leg-5/metrics-auth-proxy-cert => e2e-disabled/prometheus-service-monitor}/99-delete-ns.yaml (100%) rename tests/e2e-leg-5/{metrics-auth-proxy-token-helm => metrics-auth-cert}/00-create-creds.yaml (100%) rename tests/e2e-leg-5/{metrics-auth-proxy-cert => metrics-auth-cert}/04-assert.yaml (100%) rename tests/e2e-leg-5/{metrics-auth-proxy-cert => metrics-auth-cert}/04-create-cert.yaml (100%) rename tests/e2e-leg-5/{metrics-auth-proxy-cert => metrics-auth-cert}/05-assert.yaml (100%) rename tests/e2e-leg-5/{metrics-auth-proxy-cert => metrics-auth-cert}/05-deploy-operator.yaml (89%) rename tests/e2e-leg-5/{metrics-auth-proxy-cert => metrics-auth-cert}/10-assert.yaml (100%) rename tests/e2e-leg-5/{metrics-auth-proxy-token-olm => metrics-auth-cert}/10-verify-no-authority-to-read-metrics.yaml (99%) rename tests/e2e-leg-5/{metrics-auth-proxy-cert => metrics-auth-cert}/20-assert.yaml (100%) rename tests/e2e-leg-5/{metrics-auth-proxy-cert => metrics-auth-cert}/20-verify-crb-with-metrics.yaml (100%) rename tests/e2e-leg-5/{metrics-auth-proxy-cert => metrics-auth-cert}/90-errors.yaml (100%) rename tests/e2e-leg-5/{metrics-auth-proxy-cert => metrics-auth-cert}/90-uninstall-operator.yaml (100%) rename tests/e2e-leg-5/{metrics-auth-proxy-token-helm => metrics-auth-cert}/99-delete-ns.yaml (100%) rename tests/e2e-leg-5/{metrics-auth-proxy-cert => metrics-auth-cert}/cert.yaml (100%) rename tests/e2e-leg-5/{metrics-auth-proxy-token-olm => metrics-auth-token-helm}/00-create-creds.yaml (100%) rename tests/e2e-leg-5/{metrics-auth-proxy-token-olm => metrics-auth-token-helm}/05-assert.yaml (98%) rename tests/e2e-leg-5/{metrics-auth-proxy-token-helm => metrics-auth-token-helm}/05-deploy-operator.yaml (96%) rename tests/e2e-leg-5/{metrics-auth-proxy-token-helm => metrics-auth-token-helm}/10-assert.yaml (100%) rename tests/e2e-leg-5/{metrics-auth-proxy-cert => metrics-auth-token-helm}/10-verify-no-authority-to-read-metrics.yaml (100%) rename tests/e2e-leg-5/{metrics-auth-proxy-token-helm => metrics-auth-token-helm}/20-assert.yaml (100%) rename tests/e2e-leg-5/{metrics-auth-proxy-token-helm => metrics-auth-token-helm}/20-verify-crb-with-metrics.yaml (100%) rename tests/e2e-leg-5/{metrics-auth-proxy-token-helm => metrics-auth-token-helm}/90-errors.yaml (100%) rename tests/e2e-leg-5/{metrics-auth-proxy-token-helm => metrics-auth-token-helm}/90-uninstall-operator.yaml (100%) rename tests/e2e-leg-5/{metrics-auth-proxy-token-olm => metrics-auth-token-helm}/99-delete-ns.yaml (100%) rename tests/e2e-leg-5/{prometheus-service-monitor => metrics-auth-token-olm}/00-create-creds.yaml (100%) rename tests/e2e-leg-5/{metrics-auth-proxy-token-helm => metrics-auth-token-olm}/05-assert.yaml (98%) rename tests/e2e-leg-5/{metrics-auth-proxy-token-olm => metrics-auth-token-olm}/05-deploy-operator.yaml (95%) rename tests/e2e-leg-5/{metrics-auth-proxy-token-olm => metrics-auth-token-olm}/10-assert.yaml (100%) rename tests/e2e-leg-5/{metrics-auth-proxy-token-helm => metrics-auth-token-olm}/10-verify-no-authority-to-read-metrics.yaml (100%) rename tests/e2e-leg-5/{metrics-auth-proxy-token-olm => metrics-auth-token-olm}/15-apply-cluster-role.yaml (100%) rename tests/e2e-leg-5/{metrics-auth-proxy-token-olm => metrics-auth-token-olm}/20-assert.yaml (100%) rename tests/e2e-leg-5/{metrics-auth-proxy-token-olm => metrics-auth-token-olm}/20-verify-crb-with-metrics.yaml (100%) rename tests/e2e-leg-5/{metrics-auth-proxy-token-olm => metrics-auth-token-olm}/90-errors.yaml (100%) rename tests/e2e-leg-5/{metrics-auth-proxy-token-olm => metrics-auth-token-olm}/90-uninstall-operator.yaml (100%) rename tests/e2e-leg-5/{prometheus-service-monitor => metrics-auth-token-olm}/99-delete-ns.yaml (100%) diff --git a/DEVELOPER.md b/DEVELOPER.md index 2d0bac6a8..01def9c95 100644 --- a/DEVELOPER.md +++ b/DEVELOPER.md @@ -686,7 +686,7 @@ kubectl logs -c vlogger ... - args: - --health-probe-bind-address=:8081 - - --metrics-bind-address=127.0.0.1:8443 + - --metrics-bind-address=0.0.0.0:8443 - --leader-elect - --health-probe-bind-address=:8081 - --enable-profiler diff --git a/Makefile b/Makefile index 9fc5e3dfe..d9d57970c 100644 --- a/Makefile +++ b/Makefile @@ -231,9 +231,35 @@ export VDB_MAX_BACKOFF_DURATION # # The address the operators Prometheus metrics endpoint binds to. Setting this # to 0 will disable metric serving. -METRICS_ADDR?=127.0.0.1:8443 +METRICS_ADDR?=0.0.0.0:8443 export METRICS_ADDR # +# The secret name that will be used to mount cert files in the operator +# for providing server certs to Prometheus metrics endpoint. Setting this +# to "" will use an auto-generated self-signed cert. +export METRICS_TLS_SECRET +# +# Controls exposing of the prometheus metrics endpoint. The valid values are: +# EnableWithAuth: A new service object will be created that exposes the +# metrics endpoint. Access to the metrics are controlled by rbac rules. +# The metrics endpoint will use the https scheme. +# EnableWithoutAuth: Like EnableWithAuth, this will create a service +# object to expose the metrics endpoint. However, there is no authority +# checking when using the endpoint. Anyone who had network access +# endpoint (i.e. any pod in k8s) will be able to read the metrics. The +# metrics endpoint will use the http scheme. +# EnableWithTLS: Like EnableWithAuth, this will create a service +# object to expose the metrics endpoint. However, there is no authority +# checking when using the endpoint. People with network access to the +# endpoint (i.e. any pod in k8s) and the correct certs can read the metrics. +# The metrics endpoint will use the https scheme. +# It needs to be used with tlsSecret. If tlsSecret is not set, the behavior +# will be similar to EnableWithoutAuth, except that the endpoint will use +# https schema. +# Disable: Prometheus metrics are not exposed at all. +METRICS_EXPOSE_MODE?=Disable +export METRICS_EXPOSE_MODE +# # The minimum logging level. Valid values are: debug, info, warn, and error. LOG_LEVEL?=info export LOG_LEVEL diff --git a/changes/unreleased/Added-20250131-221406.yaml b/changes/unreleased/Added-20250131-221406.yaml new file mode 100644 index 000000000..045eaa1a0 --- /dev/null +++ b/changes/unreleased/Added-20250131-221406.yaml @@ -0,0 +1,6 @@ +kind: Added +body: Added "EnableWithTLS" option to Helm parameter "prometheus.expose", allowing secure access + to metrics from outside the cluster +time: 2025-01-31T22:14:06.675326382Z +custom: + Issue: "1040" diff --git a/changes/unreleased/Removed-20250131-221220.yaml b/changes/unreleased/Removed-20250131-221220.yaml new file mode 100644 index 000000000..db414ee91 --- /dev/null +++ b/changes/unreleased/Removed-20250131-221220.yaml @@ -0,0 +1,5 @@ +kind: Removed +body: Removed Helm parameter "prometheus.createServiceMonitor" +time: 2025-01-31T22:12:20.085253713Z +custom: + Issue: "1040" diff --git a/cmd/operator/main.go b/cmd/operator/main.go index 6159f8290..65c37c2d1 100644 --- a/cmd/operator/main.go +++ b/cmd/operator/main.go @@ -18,8 +18,10 @@ package main import ( "context" "crypto/tls" + "crypto/x509" "log" "os" + "strings" "time" // Allows us to pull in things generated from `go generate` @@ -286,10 +288,32 @@ func main() { TLSOpts: webhookTLSOpts, }) - secureMetrics := opcfg.GetMetricsAddr() == "127.0.0.1:8443" + secureMetrics := strings.EqualFold(opcfg.GetMetricsExposeMode(), "EnableWithAuth") + secureByTLS := strings.EqualFold(opcfg.GetMetricsExposeMode(), "EnableWithTLS") var metricCertDir string if opcfg.GetMetricsTLSSecret() != "" { metricCertDir = "/cert" + metricsTLSOpts = append(metricsTLSOpts, func(c *tls.Config) { + // Load the CA certificate + caCert, err := os.ReadFile("/cert/ca.crt") + if err != nil { + log.Fatalf("failed to read CA cert: %v", err) + } + // Create a CertPool and add the CA certificate to it + caCertPool := x509.NewCertPool() + ok := caCertPool.AppendCertsFromPEM(caCert) + if !ok { + log.Fatal("failed to append CA cert to CertPool") + } + c.ClientCAs = caCertPool + // If we enabled authorization, then no client certs are really needed. + // Otherwise, we need the client certs. + if secureMetrics { + c.ClientAuth = tls.VerifyClientCertIfGiven + } else if secureByTLS { + c.ClientAuth = tls.RequireAndVerifyClientCert + } + }) } // Metrics endpoint is enabled in 'config/default/kustomization.yaml'. The Metrics options configure the server. @@ -297,9 +321,9 @@ func main() { // - https://pkg.go.dev/sigs.k8s.io/controller-runtime@v0.18.4/pkg/metrics/server // - https://book.kubebuilder.io/reference/metrics.html metricsServerOptions := metricsserver.Options{ - BindAddress: ":8443", - SecureServing: secureMetrics, - // TODO(user): TLSOpts is used to allow configuring the TLS config used for the server. If certificates are + BindAddress: opcfg.GetMetricsAddr(), + SecureServing: secureMetrics || secureByTLS, + // TLSOpts is used to allow configuring the TLS config used for the server. If certificates are // not provided, self-signed certificates will be generated by default. This option is not recommended for // production environments as self-signed certificates do not offer the same level of trust and security // as certificates issued by a trusted Certificate Authority (CA). The primary risk is potentially allowing diff --git a/config/default/kustomization.yaml b/config/default/kustomization.yaml index 0a3538b30..0b05dc5d2 100644 --- a/config/default/kustomization.yaml +++ b/config/default/kustomization.yaml @@ -24,7 +24,7 @@ resources: - ../webhook - ../clusterpermissions - ../certmanager -- ../prometheus +# - ../prometheus - metrics_service.yaml # Protect the /metrics endpoint by putting it behind auth. diff --git a/config/manager/operator-envs b/config/manager/operator-envs index df6c60ce4..4e4c4fcab 100644 --- a/config/manager/operator-envs +++ b/config/manager/operator-envs @@ -7,7 +7,7 @@ CONTROLLERS_ENABLED=${CONTROLLERS_ENABLED} CONTROLLERS_SCOPE=${CONTROLLERS_SCOPE} METRICS_ADDR=${METRICS_ADDR} METRICS_TLS_SECRET=${METRICS_TLS_SECRET} -METRICS_PROXY_RBAC=${METRICS_PROXY_RBAC} +METRICS_EXPOSE_MODE=${METRICS_EXPOSE_MODE} LOG_LEVEL=${LOG_LEVEL} CONCURRENCY_VERTICADB=${CONCURRENCY_VERTICADB} CONCURRENCY_VERTICAAUTOSCALER=${CONCURRENCY_VERTICAAUTOSCALER} diff --git a/config/rbac/kustomization.yaml b/config/rbac/kustomization.yaml index 69bd5461c..b2ecb96b7 100644 --- a/config/rbac/kustomization.yaml +++ b/config/rbac/kustomization.yaml @@ -18,6 +18,7 @@ resources: - metrics_auth_role.yaml - metrics_auth_role_binding.yaml - metrics_reader_role.yaml +- metrics_reader_role_binding.yaml # The next setup the RBAC rules for the webhook. - webhook_config_clusterrole.yaml - webhook_config_clusterrolebinding.yaml diff --git a/config/rbac/metrics_reader_role_binding.yaml b/config/rbac/metrics_reader_role_binding.yaml new file mode 100644 index 000000000..9dfc1ea64 --- /dev/null +++ b/config/rbac/metrics_reader_role_binding.yaml @@ -0,0 +1,16 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: metrics-reader +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: metrics-reader +subjects: +- kind: ServiceAccount + name: manager + namespace: system +- apiGroup: rbac.authorization.k8s.io + kind: Group + name: system:authenticated + \ No newline at end of file diff --git a/helm-charts/verticadb-operator/README.md b/helm-charts/verticadb-operator/README.md index 66a7fe9a5..2ec0c5311 100644 --- a/helm-charts/verticadb-operator/README.md +++ b/helm-charts/verticadb-operator/README.md @@ -21,9 +21,8 @@ This helm chart will install the operator and an admission controller webhook. | nodeSelector | The [node selector](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodeselector) provides control over which nodes are used to schedule a pod. If this parameter is not set, the node selector is omitted from the pod that is created by the operator's Deployment object. To set this parameter, provide a list of key/value pairs. | Not set | | priorityClassName | The [priority class name](https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/#priorityclass) that is assigned to the operator pod. This affects where the pod gets scheduled. | Not set | | prometheus.createProxyRBAC | Set this to false if you want to avoid creating the rbac rules for accessing the metrics endpoint when it is protected by the rbac auth proxy. By default, we will create those RBAC rules. | true | -| prometheus.createServiceMonitor | Set this to true if you want to create a ServiceMonitor. This object is a CR provided by the prometheus operator to allow for easy service discovery. If set to true, the prometheus operator must be installed before installing this chart.
See: https://github.com/prometheus-operator/prometheus-operator

*This parameter is deprecated and will be removed in a future release.* | false | -| prometheus.expose | Controls exposing of the prometheus metrics endpoint. Valid options are:

- **EnableWithAuthProxy**: A new service object will be created that exposes the metrics endpoint. Access to the metrics are controlled by rbac rules. The metrics endpoint will use the https scheme.

- **EnableWithoutAuth**: Like EnableWithAuthProxy, this will create a service object to expose the metrics endpoint. However, there is no authority checking when using the endpoint. Anyone who has network access to the endpoint (i.e. any pod in k8s) will be able to read the metrics. The metrics endpoint will use the http scheme.

- **Disable**: Prometheus metrics are not exposed at all. | Disable | -| prometheus.tlsSecret | Use this if you want to provide your own certs for the prometheus metrics endpoint. It refers to a secret in the same namespace that the helm chart is deployed in. The secret must have the following keys set:

- **tls.key** – private key
- **tls.crt** – cert for the private key
- **ca.crt** – CA certificate

The prometheus.expose=EnableWithAuthProxy must be set for the operator to use the certs provided. If this field is omitted, the operator will generate its own self-signed cert. | "" | +| prometheus.expose | Controls exposing of the prometheus metrics endpoint. Valid options are:

- **EnableWithAuth**: A new service object will be created that exposes the metrics endpoint. Access to the metrics are controlled by rbac rules. The metrics endpoint will use the https scheme.

- **EnableWithoutAuth**: Like EnableWithAuth, this will create a service object to expose the metrics endpoint. However, there is no authority checking when using the endpoint. Anyone who has network access to the endpoint (i.e. any pod in k8s) will be able to read the metrics. The metrics endpoint will use the http scheme.

- **EnableWithTLS**: Like EnableWithAuth, this will create a service object to expose the metrics endpoint. However, there is no authority checking when using the endpoint. People with network access to the endpoint (i.e., any pod in Kubernetes) and the correct certificates can read the metrics. The metrics endpoint will use HTTPS and must be used with `tlsSecret`. If `tlsSecret` is not set, the behavior will be similar to `EnableWithoutAuth`, except that the endpoint will use HTTPS.

- **Disable**: Prometheus metrics are not exposed at all. | Disable | +| prometheus.tlsSecret | Use this if you want to provide your own certs for the prometheus metrics endpoint. It refers to a secret in the same namespace that the helm chart is deployed in. The secret must have the following keys set:

- **tls.key** – private key
- **tls.crt** – cert for the private key
- **ca.crt** – CA certificate

The prometheus.expose=EnableWithAuth must be set for the operator to use the certs provided. If this field is omitted, the operator will generate its own self-signed cert. | "" | | reconcileConcurrency.eventtrigger | Set this to control the concurrency of reconciliations of EventTrigger CRs | 1 | | reconcileConcurrency.sandboxconfigmap | Set this to control the concurrency of reconciliations of ConfigMaps that contain state for a sandbox | 1 | | reconcileConcurrency.verticaautoscaler | Set this to control the concurrency of reconciliations of VerticaAutoscaler CRs | 1 | diff --git a/helm-charts/verticadb-operator/tests/image-name-and-tag_test.yaml b/helm-charts/verticadb-operator/tests/image-name-and-tag_test.yaml index 82820f637..a43b57a27 100644 --- a/helm-charts/verticadb-operator/tests/image-name-and-tag_test.yaml +++ b/helm-charts/verticadb-operator/tests/image-name-and-tag_test.yaml @@ -21,7 +21,7 @@ tests: imagePullSecrets: - name: image-pull-secrets prometheus: - expose: EnableWithAuthProxy + expose: EnableWithAuth asserts: - equal: path: spec.template.spec.containers[0].image diff --git a/helm-charts/verticadb-operator/tests/metrics-configmap_test.yaml b/helm-charts/verticadb-operator/tests/metrics-configmap_test.yaml index 81b626737..99f4e3b4d 100644 --- a/helm-charts/verticadb-operator/tests/metrics-configmap_test.yaml +++ b/helm-charts/verticadb-operator/tests/metrics-configmap_test.yaml @@ -13,8 +13,8 @@ tests: - it: should cotain ip if expose is with auth set: prometheus: - expose: EnableWithAuthProxy + expose: EnableWithAuth asserts: - equal: path: data.METRICS_ADDR - value: 127.0.0.1:8443 + value: 0.0.0.0:8443 diff --git a/helm-charts/verticadb-operator/tests/serviceaccount-rolebinding_test.yaml b/helm-charts/verticadb-operator/tests/serviceaccount-rolebinding_test.yaml index 16b7d0a6f..2820c3649 100644 --- a/helm-charts/verticadb-operator/tests/serviceaccount-rolebinding_test.yaml +++ b/helm-charts/verticadb-operator/tests/serviceaccount-rolebinding_test.yaml @@ -2,13 +2,15 @@ suite: ServiceAccount tests templates: - verticadb-operator-manager-clusterrolebinding-crb.yaml - verticadb-operator-webhook-config-crb.yaml + - verticadb-operator-metrics-auth-rolebinding-crb.yaml + - verticadb-operator-metrics-reader-crb.yaml - verticadb-operator-leader-election-rolebinding-rb.yaml tests: - it: should include the serviceaccount name when an override is set set: serviceAccountNameOverride: special-override-sa prometheus: - expose: "EnableWithAuthProxy" + expose: "EnableWithAuth" createProxyRBAC: true asserts: - equal: diff --git a/helm-charts/verticadb-operator/tests/servicemonitor_test.yaml b/helm-charts/verticadb-operator/tests/servicemonitor_test.yaml deleted file mode 100644 index 9e9d2d133..000000000 --- a/helm-charts/verticadb-operator/tests/servicemonitor_test.yaml +++ /dev/null @@ -1,22 +0,0 @@ -suite: ServiceMonitor tests -templates: - - verticadb-operator-metrics-monitor-servicemonitor.yaml -tests: - - it: should not include ServiceMonitor by default - set: - prometheus: - createServiceMonitor: false - asserts: - - hasDocuments: - count: 0 - - it: should include ServiceMonitor if value is set - set: - prometheus: - createServiceMonitor: true - asserts: - - hasDocuments: - count: 1 - - isKind: - of: ServiceMonitor - - diff --git a/helm-charts/verticadb-operator/values.yaml b/helm-charts/verticadb-operator/values.yaml index fe38b8523..b3727167f 100644 --- a/helm-charts/verticadb-operator/values.yaml +++ b/helm-charts/verticadb-operator/values.yaml @@ -151,30 +151,32 @@ serviceAccountAnnotations: {} prometheus: # Controls exposing of the prometheus metrics endpoint. Valid options are: # - # EnableWithAuthProxy: A new service object will be created that exposes the + # EnableWithAuth: A new service object will be created that exposes the # metrics endpoint. Access to the metrics are controlled by rbac rules. # The metrics endpoint will use the https scheme. - # EnableWithoutAuth: Like EnableWithAuthProxy, this will create a service + # EnableWithoutAuth: Like EnableWithAuth, this will create a service # object to expose the metrics endpoint. However, there is no authority # checking when using the endpoint. Anyone who had network access # endpoint (i.e. any pod in k8s) will be able to read the metrics. The # metrics endpoint will use the http scheme. + # EnableWithTLS: Like EnableWithAuth, this will create a service + # object to expose the metrics endpoint. However, there is no authority + # checking when using the endpoint. People with network access to the + # endpoint (i.e. any pod in k8s) and the correct certs can read the metrics. + # The metrics endpoint will use the https scheme. + # It needs to be used with tlsSecret. If tlsSecret is not set, the behavior + # will be similar to EnableWithoutAuth, except that the endpoint will use + # https schema. # Disable: Prometheus metrics are not exposed at all. expose: Disable - # If prometheus is exposed with an auth proxy (EnableWithAuthProxy), use this + # If prometheus is exposed with an auth proxy (EnableWithAuth), use this # parameter to control what certificates are used for the https endpoint. If # this is empty, the operator will use a generated self-signed cert. When # provided, the certificates can be used to authenticate with the metrics # endpoint. tlsSecret: "" - # ** This parameter is deprecated and will be removed in a future release. - # Set this to true if you want to create a ServiceMonitor. This object is a - # CR provided by the prometheus operator to allow for easy service discovery. - # https://github.com/prometheus-operator/prometheus-operator - createServiceMonitor: false - # This controls the creation of ClusterRole/ClusterRoleBinding to access # the metrics endpoint. createProxyRBAC: true diff --git a/pkg/opcfg/config.go b/pkg/opcfg/config.go index fc8b28d11..30ca47189 100644 --- a/pkg/opcfg/config.go +++ b/pkg/opcfg/config.go @@ -104,6 +104,11 @@ func GetMetricsTLSSecret() string { return lookupStringEnvVar("METRICS_TLS_SECRET", envCanNotExist) } +// GetMetricsExposeMode returns exposing mode of the manager's Prometheus endpoint. +func GetMetricsExposeMode() string { + return lookupStringEnvVar("METRICS_EXPOSE_MODE", envCanNotExist) +} + // GetUseCertManager returns true if cert-manager is used to setup the webhook's // TLS certs. func GetUseCertManager() bool { diff --git a/scripts/authorize-metrics.sh b/scripts/authorize-metrics.sh index c85df9782..f06e503b2 100755 --- a/scripts/authorize-metrics.sh +++ b/scripts/authorize-metrics.sh @@ -63,6 +63,7 @@ set -o xtrace if [[ -n "$UNDO" ]] then kubectl delete -f $REPO_DIR/config/release-manifests/verticadb-operator-metrics-reader-cr.yaml || : + kubectl delete -f $REPO_DIR/config/release-manifests/verticadb-operator-metrics-reader-crb.yaml || : echo "Finished undoing action" exit 0 fi diff --git a/scripts/gen-csv.sh b/scripts/gen-csv.sh index cdfae2f14..b4d9b7f86 100755 --- a/scripts/gen-csv.sh +++ b/scripts/gen-csv.sh @@ -95,11 +95,11 @@ perl -i -0777 -pe 's/\n\s*- mountPath: \/cert\s*\n\s*name: auth-cert//g' bundle/ # requirement on having the Prometheus Operator installed. We are only # optionally installing this. We will include the manifest in our GitHub # artifacts and have it as an optional helm parameter. -rm bundle/manifests/*servicemonitor.yaml +rm -f bundle/manifests/*servicemonitor.yaml # Remove the metrics-reader clusterrolebinding. When undeploying olm installs, # the clusterrole would get removed but not the clusterrolebinding. We provide # this as an arifact anyway, so it doesn't need to be part of the bundle. -rm bundle/manifests/*metrics-reader*yaml +rm -f bundle/manifests/*metrics-reader*yaml # Add the supported versions at the end of annotations.yaml cat <> bundle/metadata/annotations.yaml diff --git a/scripts/gen-release-artifacts.sh b/scripts/gen-release-artifacts.sh index 542f49bcb..182403c71 100755 --- a/scripts/gen-release-artifacts.sh +++ b/scripts/gen-release-artifacts.sh @@ -52,7 +52,7 @@ fi # command. RELEASE_ARTIFACT_TARGET_DIR=$REPO_DIR/config/release-manifests mkdir -p $RELEASE_ARTIFACT_TARGET_DIR -for f in verticadb-operator-metrics-monitor-servicemonitor.yaml \ +for f in verticadb-operator-metrics-reader-crb.yaml \ verticadb-operator-metrics-reader-cr.yaml do cp $MANIFEST_DIR/$f $RELEASE_ARTIFACT_TARGET_DIR diff --git a/scripts/template-helm-chart.sh b/scripts/template-helm-chart.sh index b8b109f6b..3d2b50a2a 100755 --- a/scripts/template-helm-chart.sh +++ b/scripts/template-helm-chart.sh @@ -94,7 +94,8 @@ for f in \ verticadb-operator-leader-election-rolebinding-rb.yaml \ verticadb-operator-manager-clusterrolebinding-crb.yaml \ verticadb-operator-webhook-config-crb.yaml \ - verticadb-operator-metrics-auth-rolebinding-crb.yaml + verticadb-operator-metrics-auth-rolebinding-crb.yaml \ + verticadb-operator-metrics-reader-crb.yaml do perl -i -0777 -pe 's/kind: ServiceAccount\n.*name: .*/kind: ServiceAccount\n name: {{ include "vdb-op.serviceAccount" . }}/g' $TEMPLATE_DIR/$f done @@ -120,28 +121,26 @@ perl -i -pe 's/^/{{- if hasPrefix "Enable" .Values.prometheus.expose -}}\n/ if 1 echo "{{- end }}" >> $TEMPLATE_DIR/verticadb-operator-metrics-service-svc.yaml # 12. Template the roles/rolebindings for access to prometheus metrics -for f in verticadb-operator-metrics-reader-cr.yaml +for f in verticadb-operator-metrics-reader-cr.yaml \ + verticadb-operator-metrics-reader-crb.yaml do - perl -i -pe 's/^/{{- if and (.Values.prometheus.createProxyRBAC) (eq .Values.prometheus.expose "EnableWithAuthProxy") -}}\n/ if 1 .. 1' $TEMPLATE_DIR/$f + perl -i -pe 's/^/{{- if and (.Values.prometheus.createProxyRBAC) (eq .Values.prometheus.expose "EnableWithAuth") -}}\n/ if 1 .. 1' $TEMPLATE_DIR/$f echo "{{- end }}" >> $TEMPLATE_DIR/$f perl -i -0777 -pe 's/-metrics-reader/-{{ include "vdb-op.metricsRbacPrefix" . }}metrics-reader/g' $TEMPLATE_DIR/$f perl -i -0777 -pe 's/-(proxy-role.*)/-{{ include "vdb-op.metricsRbacPrefix" . }}$1/g' $TEMPLATE_DIR/$f done -# 13. Template the ServiceMonitor object for Promtheus operator -perl -i -pe 's/^/{{- if .Values.prometheus.createServiceMonitor -}}\n/ if 1 .. 1' $TEMPLATE_DIR/verticadb-operator-metrics-monitor-servicemonitor.yaml -echo "{{- end }}" >> $TEMPLATE_DIR/verticadb-operator-metrics-monitor-servicemonitor.yaml -perl -i -0777 -pe 's/(.*endpoints:)/$1\n{{- if eq "EnableWithAuthProxy" .Values.prometheus.expose }}/g' $TEMPLATE_DIR/verticadb-operator-metrics-monitor-servicemonitor.yaml -perl -i -0777 -pe 's/(.*insecureSkipVerify:.*)/$1\n{{- else }}\n - path: \/metrics\n port: metrics\n scheme: http\n{{- end }}/g' $TEMPLATE_DIR/verticadb-operator-metrics-monitor-servicemonitor.yaml +# 13. Template the metrics bind address +perl -i -0777 -pe 's/(METRICS_ADDR: )(.*)/$1 "{{ if eq "EnableWithAuth" .Values.prometheus.expose }}0.0.0.0{{ end }}:8443"/' $TEMPLATE_DIR/verticadb-operator-manager-config-cm.yaml +perl -i -0777 -pe 's/(.*METRICS_ADDR:.*)/{{- if hasPrefix "Enable" .Values.prometheus.expose }}\n$1\n{{- else }}\n METRICS_ADDR: "0"\n{{- end }}/g' $TEMPLATE_DIR/verticadb-operator-manager-config-cm.yaml -# 14. Template the metrics bind address +# 14. Template other metrics attributes perl -i -0777 -pe 's/(METRICS_TLS_SECRET: )(.*)/$1 "{{ .Values.prometheus.tlsSecret }}"/' $TEMPLATE_DIR/verticadb-operator-manager-config-cm.yaml -perl -i -0777 -pe 's/(METRICS_ADDR: )(.*)/$1 "{{ if eq "EnableWithAuthProxy" .Values.prometheus.expose }}127.0.0.1{{ end }}:8443"/' $TEMPLATE_DIR/verticadb-operator-manager-config-cm.yaml -perl -i -0777 -pe 's/(.*METRICS_ADDR:.*)/{{- if hasPrefix "Enable" .Values.prometheus.expose }}\n$1\n{{- else }}\n METRICS_ADDR: "0"\n{{- end }}/g' $TEMPLATE_DIR/verticadb-operator-manager-config-cm.yaml perl -i -0777 -pe 's/(.*ports:\n.*containerPort: 9443\n.*webhook-server.*\n.*)/$1\n{{- if hasPrefix "Enable" .Values.prometheus.expose }}\n - name: metrics\n containerPort: 8443\n protocol: TCP\n{{- end }}/g' $TEMPLATE_DIR/verticadb-operator-manager-deployment.yaml +perl -i -0777 -pe 's/(METRICS_EXPOSE_MODE: )(.*)/$1 "{{ .Values.prometheus.expose }}"/' $TEMPLATE_DIR/verticadb-operator-manager-config-cm.yaml # 15. Template the rbac container -perl -i -0777 -pe 's/(.*- args:.*\n.*secure)/{{- if eq .Values.prometheus.expose "EnableWithAuthProxy" }}\n$1/g' $TEMPLATE_DIR/verticadb-operator-manager-deployment.yaml +perl -i -0777 -pe 's/(.*- args:.*\n.*secure)/{{- if eq .Values.prometheus.expose "EnableWithAuth" }}\n$1/g' $TEMPLATE_DIR/verticadb-operator-manager-deployment.yaml # We need to put the matching end at the end of the container spec. perl -i -0777 -pe 's/(memory: 64Mi)/$1\n{{- end }}/g' $TEMPLATE_DIR/verticadb-operator-manager-deployment.yaml @@ -153,7 +152,6 @@ perl -i -0777 -pe 's/verticadb-operator/{{ include "vdb-op.name" . }}/g' $TEMPLA # 17. Mount TLS certs for prometheus metrics for f in $TEMPLATE_DIR/verticadb-operator-manager-deployment.yaml do - perl -i -0777 -pe 's/(.*--v=[0-9]+)/$1\n{{- if not (empty .Values.prometheus.tlsSecret) }}\n - --tls-cert-file=\/cert\/tls.crt\n - --tls-private-key-file=\/cert\/tls.key\n - --client-ca-file=\/cert\/ca.crt\n{{- end }}/g' $f perl -i -0777 -pe 's/(.*- mountPath: .*\n.*name: auth-cert.*)/\{\{- if not (empty .Values.prometheus.tlsSecret) }}\n - mountPath: \/cert\n name: auth-cert\n{{- end }}/g' $f perl -i -0777 -pe 's/(.*- name: auth-cert.*\n.*secret:.*\n.*defaultMode: 420.*\n.*secretName: custom-cert)/{{- if not \(empty .Values.prometheus.tlsSecret\) }}\n - name: auth-cert\n secret:\n defaultMode: 420\n secretName: {{ .Values.prometheus.tlsSecret }}\n{{- end }}/g' $f done diff --git a/tests/e2e-leg-5/metrics-auth-proxy-cert/00-create-creds.yaml b/tests/e2e-disabled/prometheus-service-monitor/00-create-creds.yaml similarity index 100% rename from tests/e2e-leg-5/metrics-auth-proxy-cert/00-create-creds.yaml rename to tests/e2e-disabled/prometheus-service-monitor/00-create-creds.yaml diff --git a/tests/e2e-leg-5/prometheus-service-monitor/05-install-prometheus-crd.yaml b/tests/e2e-disabled/prometheus-service-monitor/05-install-prometheus-crd.yaml similarity index 100% rename from tests/e2e-leg-5/prometheus-service-monitor/05-install-prometheus-crd.yaml rename to tests/e2e-disabled/prometheus-service-monitor/05-install-prometheus-crd.yaml diff --git a/tests/e2e-leg-5/prometheus-service-monitor/10-assert.yaml b/tests/e2e-disabled/prometheus-service-monitor/10-assert.yaml similarity index 100% rename from tests/e2e-leg-5/prometheus-service-monitor/10-assert.yaml rename to tests/e2e-disabled/prometheus-service-monitor/10-assert.yaml diff --git a/tests/e2e-leg-5/prometheus-service-monitor/10-deploy-operator-with-auth.yaml b/tests/e2e-disabled/prometheus-service-monitor/10-deploy-operator-with-auth.yaml similarity index 88% rename from tests/e2e-leg-5/prometheus-service-monitor/10-deploy-operator-with-auth.yaml rename to tests/e2e-disabled/prometheus-service-monitor/10-deploy-operator-with-auth.yaml index 6b1a7b643..8ffb5f5a7 100644 --- a/tests/e2e-leg-5/prometheus-service-monitor/10-deploy-operator-with-auth.yaml +++ b/tests/e2e-disabled/prometheus-service-monitor/10-deploy-operator-with-auth.yaml @@ -15,4 +15,4 @@ apiVersion: kuttl.dev/v1beta1 kind: TestStep commands: - command: sh -c "cd ../../.. && make undeploy-operator || true" - - command: sh -c "cd ../../.. && DEPLOY_WITH=helm make deploy-operator NAMESPACE=$NAMESPACE HELM_OVERRIDES='--set prometheus.expose=EnableWithAuthProxy --set prometheus.createServiceMonitor=true --set webhook.certSource=cert-manager'" + - command: sh -c "cd ../../.. && DEPLOY_WITH=helm make deploy-operator NAMESPACE=$NAMESPACE HELM_OVERRIDES='--set prometheus.expose=EnableWithAuth --set prometheus.createServiceMonitor=true --set webhook.certSource=cert-manager'" diff --git a/tests/e2e-leg-5/prometheus-service-monitor/15-errors.yaml b/tests/e2e-disabled/prometheus-service-monitor/15-errors.yaml similarity index 100% rename from tests/e2e-leg-5/prometheus-service-monitor/15-errors.yaml rename to tests/e2e-disabled/prometheus-service-monitor/15-errors.yaml diff --git a/tests/e2e-leg-5/prometheus-service-monitor/15-undeploy-operator.yaml b/tests/e2e-disabled/prometheus-service-monitor/15-undeploy-operator.yaml similarity index 100% rename from tests/e2e-leg-5/prometheus-service-monitor/15-undeploy-operator.yaml rename to tests/e2e-disabled/prometheus-service-monitor/15-undeploy-operator.yaml diff --git a/tests/e2e-leg-5/prometheus-service-monitor/20-assert.yaml b/tests/e2e-disabled/prometheus-service-monitor/20-assert.yaml similarity index 100% rename from tests/e2e-leg-5/prometheus-service-monitor/20-assert.yaml rename to tests/e2e-disabled/prometheus-service-monitor/20-assert.yaml diff --git a/tests/e2e-leg-5/prometheus-service-monitor/20-deploy-operator-without-auth.yaml b/tests/e2e-disabled/prometheus-service-monitor/20-deploy-operator-without-auth.yaml similarity index 100% rename from tests/e2e-leg-5/prometheus-service-monitor/20-deploy-operator-without-auth.yaml rename to tests/e2e-disabled/prometheus-service-monitor/20-deploy-operator-without-auth.yaml diff --git a/tests/e2e-leg-5/prometheus-service-monitor/25-errors.yaml b/tests/e2e-disabled/prometheus-service-monitor/25-errors.yaml similarity index 100% rename from tests/e2e-leg-5/prometheus-service-monitor/25-errors.yaml rename to tests/e2e-disabled/prometheus-service-monitor/25-errors.yaml diff --git a/tests/e2e-leg-5/prometheus-service-monitor/25-undeploy-operator.yaml b/tests/e2e-disabled/prometheus-service-monitor/25-undeploy-operator.yaml similarity index 100% rename from tests/e2e-leg-5/prometheus-service-monitor/25-undeploy-operator.yaml rename to tests/e2e-disabled/prometheus-service-monitor/25-undeploy-operator.yaml diff --git a/tests/e2e-leg-5/metrics-auth-proxy-cert/99-delete-ns.yaml b/tests/e2e-disabled/prometheus-service-monitor/99-delete-ns.yaml similarity index 100% rename from tests/e2e-leg-5/metrics-auth-proxy-cert/99-delete-ns.yaml rename to tests/e2e-disabled/prometheus-service-monitor/99-delete-ns.yaml diff --git a/tests/e2e-leg-5/helm-nameoverride/15-deploy-operator.yaml b/tests/e2e-leg-5/helm-nameoverride/15-deploy-operator.yaml index f4687a2b7..b5059763b 100644 --- a/tests/e2e-leg-5/helm-nameoverride/15-deploy-operator.yaml +++ b/tests/e2e-leg-5/helm-nameoverride/15-deploy-operator.yaml @@ -15,4 +15,4 @@ apiVersion: kuttl.dev/v1beta1 kind: TestStep commands: - command: sh -c "cd ../../.. && make undeploy-operator || true" - - command: sh -c "cd ../../.. && DEPLOY_WITH=helm HELM_OVERRIDES='--set nameOverride=matt,prometheus.expose=EnableWithAuthProxy' make deploy-operator NAMESPACE=$NAMESPACE" + - command: sh -c "cd ../../.. && DEPLOY_WITH=helm HELM_OVERRIDES='--set nameOverride=matt,prometheus.expose=EnableWithAuth' make deploy-operator NAMESPACE=$NAMESPACE" diff --git a/tests/e2e-leg-5/metrics-auth-proxy-token-helm/00-create-creds.yaml b/tests/e2e-leg-5/metrics-auth-cert/00-create-creds.yaml similarity index 100% rename from tests/e2e-leg-5/metrics-auth-proxy-token-helm/00-create-creds.yaml rename to tests/e2e-leg-5/metrics-auth-cert/00-create-creds.yaml diff --git a/tests/e2e-leg-5/metrics-auth-proxy-cert/04-assert.yaml b/tests/e2e-leg-5/metrics-auth-cert/04-assert.yaml similarity index 100% rename from tests/e2e-leg-5/metrics-auth-proxy-cert/04-assert.yaml rename to tests/e2e-leg-5/metrics-auth-cert/04-assert.yaml diff --git a/tests/e2e-leg-5/metrics-auth-proxy-cert/04-create-cert.yaml b/tests/e2e-leg-5/metrics-auth-cert/04-create-cert.yaml similarity index 100% rename from tests/e2e-leg-5/metrics-auth-proxy-cert/04-create-cert.yaml rename to tests/e2e-leg-5/metrics-auth-cert/04-create-cert.yaml diff --git a/tests/e2e-leg-5/metrics-auth-proxy-cert/05-assert.yaml b/tests/e2e-leg-5/metrics-auth-cert/05-assert.yaml similarity index 100% rename from tests/e2e-leg-5/metrics-auth-proxy-cert/05-assert.yaml rename to tests/e2e-leg-5/metrics-auth-cert/05-assert.yaml diff --git a/tests/e2e-leg-5/metrics-auth-proxy-cert/05-deploy-operator.yaml b/tests/e2e-leg-5/metrics-auth-cert/05-deploy-operator.yaml similarity index 89% rename from tests/e2e-leg-5/metrics-auth-proxy-cert/05-deploy-operator.yaml rename to tests/e2e-leg-5/metrics-auth-cert/05-deploy-operator.yaml index 159938529..5d0e18e5a 100644 --- a/tests/e2e-leg-5/metrics-auth-proxy-cert/05-deploy-operator.yaml +++ b/tests/e2e-leg-5/metrics-auth-cert/05-deploy-operator.yaml @@ -15,4 +15,4 @@ apiVersion: kuttl.dev/v1beta1 kind: TestStep commands: - command: sh -c "cd ../../.. && make undeploy-operator || true" - - command: sh -c "cd ../../.. && make deploy-operator DEPLOY_WITH=helm NAMESPACE=$NAMESPACE HELM_OVERRIDES='--set prometheus.tlsSecret=custom-cert,prometheus.expose=EnableWithAuthProxy,serviceAccountNameOverride=special-sa'" + - command: sh -c "cd ../../.. && make deploy-operator DEPLOY_WITH=helm NAMESPACE=$NAMESPACE HELM_OVERRIDES='--set prometheus.tlsSecret=custom-cert,prometheus.expose=EnableWithTLS,serviceAccountNameOverride=special-sa'" diff --git a/tests/e2e-leg-5/metrics-auth-proxy-cert/10-assert.yaml b/tests/e2e-leg-5/metrics-auth-cert/10-assert.yaml similarity index 100% rename from tests/e2e-leg-5/metrics-auth-proxy-cert/10-assert.yaml rename to tests/e2e-leg-5/metrics-auth-cert/10-assert.yaml diff --git a/tests/e2e-leg-5/metrics-auth-proxy-token-olm/10-verify-no-authority-to-read-metrics.yaml b/tests/e2e-leg-5/metrics-auth-cert/10-verify-no-authority-to-read-metrics.yaml similarity index 99% rename from tests/e2e-leg-5/metrics-auth-proxy-token-olm/10-verify-no-authority-to-read-metrics.yaml rename to tests/e2e-leg-5/metrics-auth-cert/10-verify-no-authority-to-read-metrics.yaml index ecfc6bba6..ff9c34511 100644 --- a/tests/e2e-leg-5/metrics-auth-proxy-token-olm/10-verify-no-authority-to-read-metrics.yaml +++ b/tests/e2e-leg-5/metrics-auth-cert/10-verify-no-authority-to-read-metrics.yaml @@ -29,7 +29,7 @@ data: SVC_NAME=verticadb-operator-metrics-service # We are expecting an error while trying to access metrics endpoint curl --insecure https://$SVC_NAME:8443/metrics 2>&1 || true - curl --insecure https://$SVC_NAME:8443/metrics 2>&1 | grep 'Unauthorized' + curl --insecure https://$SVC_NAME:8443/metrics 2>&1 | grep 'errno' --- apiVersion: v1 kind: Pod diff --git a/tests/e2e-leg-5/metrics-auth-proxy-cert/20-assert.yaml b/tests/e2e-leg-5/metrics-auth-cert/20-assert.yaml similarity index 100% rename from tests/e2e-leg-5/metrics-auth-proxy-cert/20-assert.yaml rename to tests/e2e-leg-5/metrics-auth-cert/20-assert.yaml diff --git a/tests/e2e-leg-5/metrics-auth-proxy-cert/20-verify-crb-with-metrics.yaml b/tests/e2e-leg-5/metrics-auth-cert/20-verify-crb-with-metrics.yaml similarity index 100% rename from tests/e2e-leg-5/metrics-auth-proxy-cert/20-verify-crb-with-metrics.yaml rename to tests/e2e-leg-5/metrics-auth-cert/20-verify-crb-with-metrics.yaml diff --git a/tests/e2e-leg-5/metrics-auth-proxy-cert/90-errors.yaml b/tests/e2e-leg-5/metrics-auth-cert/90-errors.yaml similarity index 100% rename from tests/e2e-leg-5/metrics-auth-proxy-cert/90-errors.yaml rename to tests/e2e-leg-5/metrics-auth-cert/90-errors.yaml diff --git a/tests/e2e-leg-5/metrics-auth-proxy-cert/90-uninstall-operator.yaml b/tests/e2e-leg-5/metrics-auth-cert/90-uninstall-operator.yaml similarity index 100% rename from tests/e2e-leg-5/metrics-auth-proxy-cert/90-uninstall-operator.yaml rename to tests/e2e-leg-5/metrics-auth-cert/90-uninstall-operator.yaml diff --git a/tests/e2e-leg-5/metrics-auth-proxy-token-helm/99-delete-ns.yaml b/tests/e2e-leg-5/metrics-auth-cert/99-delete-ns.yaml similarity index 100% rename from tests/e2e-leg-5/metrics-auth-proxy-token-helm/99-delete-ns.yaml rename to tests/e2e-leg-5/metrics-auth-cert/99-delete-ns.yaml diff --git a/tests/e2e-leg-5/metrics-auth-proxy-cert/cert.yaml b/tests/e2e-leg-5/metrics-auth-cert/cert.yaml similarity index 100% rename from tests/e2e-leg-5/metrics-auth-proxy-cert/cert.yaml rename to tests/e2e-leg-5/metrics-auth-cert/cert.yaml diff --git a/tests/e2e-leg-5/metrics-auth-proxy-token-olm/00-create-creds.yaml b/tests/e2e-leg-5/metrics-auth-token-helm/00-create-creds.yaml similarity index 100% rename from tests/e2e-leg-5/metrics-auth-proxy-token-olm/00-create-creds.yaml rename to tests/e2e-leg-5/metrics-auth-token-helm/00-create-creds.yaml diff --git a/tests/e2e-leg-5/metrics-auth-proxy-token-olm/05-assert.yaml b/tests/e2e-leg-5/metrics-auth-token-helm/05-assert.yaml similarity index 98% rename from tests/e2e-leg-5/metrics-auth-proxy-token-olm/05-assert.yaml rename to tests/e2e-leg-5/metrics-auth-token-helm/05-assert.yaml index 9fb612105..5c9021b60 100644 --- a/tests/e2e-leg-5/metrics-auth-proxy-token-olm/05-assert.yaml +++ b/tests/e2e-leg-5/metrics-auth-token-helm/05-assert.yaml @@ -23,7 +23,6 @@ status: phase: Running containerStatuses: - ready: true - - ready: true --- apiVersion: v1 kind: Service diff --git a/tests/e2e-leg-5/metrics-auth-proxy-token-helm/05-deploy-operator.yaml b/tests/e2e-leg-5/metrics-auth-token-helm/05-deploy-operator.yaml similarity index 96% rename from tests/e2e-leg-5/metrics-auth-proxy-token-helm/05-deploy-operator.yaml rename to tests/e2e-leg-5/metrics-auth-token-helm/05-deploy-operator.yaml index 7ebbf8bb8..70c5a8973 100644 --- a/tests/e2e-leg-5/metrics-auth-proxy-token-helm/05-deploy-operator.yaml +++ b/tests/e2e-leg-5/metrics-auth-token-helm/05-deploy-operator.yaml @@ -15,4 +15,4 @@ apiVersion: kuttl.dev/v1beta1 kind: TestStep commands: - command: sh -c "cd ../../.. && make undeploy-operator || true" - - command: sh -c "cd ../../.. && make deploy-operator NAMESPACE=$NAMESPACE DEPLOY_WITH=helm HELM_OVERRIDES='--set prometheus.expose=EnableWithAuthProxy'" + - command: sh -c "cd ../../.. && make deploy-operator NAMESPACE=$NAMESPACE DEPLOY_WITH=helm HELM_OVERRIDES='--set prometheus.expose=EnableWithAuth'" diff --git a/tests/e2e-leg-5/metrics-auth-proxy-token-helm/10-assert.yaml b/tests/e2e-leg-5/metrics-auth-token-helm/10-assert.yaml similarity index 100% rename from tests/e2e-leg-5/metrics-auth-proxy-token-helm/10-assert.yaml rename to tests/e2e-leg-5/metrics-auth-token-helm/10-assert.yaml diff --git a/tests/e2e-leg-5/metrics-auth-proxy-cert/10-verify-no-authority-to-read-metrics.yaml b/tests/e2e-leg-5/metrics-auth-token-helm/10-verify-no-authority-to-read-metrics.yaml similarity index 100% rename from tests/e2e-leg-5/metrics-auth-proxy-cert/10-verify-no-authority-to-read-metrics.yaml rename to tests/e2e-leg-5/metrics-auth-token-helm/10-verify-no-authority-to-read-metrics.yaml diff --git a/tests/e2e-leg-5/metrics-auth-proxy-token-helm/20-assert.yaml b/tests/e2e-leg-5/metrics-auth-token-helm/20-assert.yaml similarity index 100% rename from tests/e2e-leg-5/metrics-auth-proxy-token-helm/20-assert.yaml rename to tests/e2e-leg-5/metrics-auth-token-helm/20-assert.yaml diff --git a/tests/e2e-leg-5/metrics-auth-proxy-token-helm/20-verify-crb-with-metrics.yaml b/tests/e2e-leg-5/metrics-auth-token-helm/20-verify-crb-with-metrics.yaml similarity index 100% rename from tests/e2e-leg-5/metrics-auth-proxy-token-helm/20-verify-crb-with-metrics.yaml rename to tests/e2e-leg-5/metrics-auth-token-helm/20-verify-crb-with-metrics.yaml diff --git a/tests/e2e-leg-5/metrics-auth-proxy-token-helm/90-errors.yaml b/tests/e2e-leg-5/metrics-auth-token-helm/90-errors.yaml similarity index 100% rename from tests/e2e-leg-5/metrics-auth-proxy-token-helm/90-errors.yaml rename to tests/e2e-leg-5/metrics-auth-token-helm/90-errors.yaml diff --git a/tests/e2e-leg-5/metrics-auth-proxy-token-helm/90-uninstall-operator.yaml b/tests/e2e-leg-5/metrics-auth-token-helm/90-uninstall-operator.yaml similarity index 100% rename from tests/e2e-leg-5/metrics-auth-proxy-token-helm/90-uninstall-operator.yaml rename to tests/e2e-leg-5/metrics-auth-token-helm/90-uninstall-operator.yaml diff --git a/tests/e2e-leg-5/metrics-auth-proxy-token-olm/99-delete-ns.yaml b/tests/e2e-leg-5/metrics-auth-token-helm/99-delete-ns.yaml similarity index 100% rename from tests/e2e-leg-5/metrics-auth-proxy-token-olm/99-delete-ns.yaml rename to tests/e2e-leg-5/metrics-auth-token-helm/99-delete-ns.yaml diff --git a/tests/e2e-leg-5/prometheus-service-monitor/00-create-creds.yaml b/tests/e2e-leg-5/metrics-auth-token-olm/00-create-creds.yaml similarity index 100% rename from tests/e2e-leg-5/prometheus-service-monitor/00-create-creds.yaml rename to tests/e2e-leg-5/metrics-auth-token-olm/00-create-creds.yaml diff --git a/tests/e2e-leg-5/metrics-auth-proxy-token-helm/05-assert.yaml b/tests/e2e-leg-5/metrics-auth-token-olm/05-assert.yaml similarity index 98% rename from tests/e2e-leg-5/metrics-auth-proxy-token-helm/05-assert.yaml rename to tests/e2e-leg-5/metrics-auth-token-olm/05-assert.yaml index 9fb612105..5c9021b60 100644 --- a/tests/e2e-leg-5/metrics-auth-proxy-token-helm/05-assert.yaml +++ b/tests/e2e-leg-5/metrics-auth-token-olm/05-assert.yaml @@ -23,7 +23,6 @@ status: phase: Running containerStatuses: - ready: true - - ready: true --- apiVersion: v1 kind: Service diff --git a/tests/e2e-leg-5/metrics-auth-proxy-token-olm/05-deploy-operator.yaml b/tests/e2e-leg-5/metrics-auth-token-olm/05-deploy-operator.yaml similarity index 95% rename from tests/e2e-leg-5/metrics-auth-proxy-token-olm/05-deploy-operator.yaml rename to tests/e2e-leg-5/metrics-auth-token-olm/05-deploy-operator.yaml index baf62aefe..365fd16c4 100644 --- a/tests/e2e-leg-5/metrics-auth-proxy-token-olm/05-deploy-operator.yaml +++ b/tests/e2e-leg-5/metrics-auth-token-olm/05-deploy-operator.yaml @@ -15,5 +15,5 @@ apiVersion: kuttl.dev/v1beta1 kind: TestStep commands: - command: sh -c "cd ../../.. && make undeploy-operator || true" - - command: sh -c "cd ../../.. && make setup-olm DEPLOY_WITH=olm" + - command: sh -c "cd ../../.. && make setup-olm DEPLOY_WITH=olm METRICS_EXPOSE_MODE=EnableWithAuth" - command: sh -c "cd ../../.. && make deploy-operator DEPLOY_WITH=olm" diff --git a/tests/e2e-leg-5/metrics-auth-proxy-token-olm/10-assert.yaml b/tests/e2e-leg-5/metrics-auth-token-olm/10-assert.yaml similarity index 100% rename from tests/e2e-leg-5/metrics-auth-proxy-token-olm/10-assert.yaml rename to tests/e2e-leg-5/metrics-auth-token-olm/10-assert.yaml diff --git a/tests/e2e-leg-5/metrics-auth-proxy-token-helm/10-verify-no-authority-to-read-metrics.yaml b/tests/e2e-leg-5/metrics-auth-token-olm/10-verify-no-authority-to-read-metrics.yaml similarity index 100% rename from tests/e2e-leg-5/metrics-auth-proxy-token-helm/10-verify-no-authority-to-read-metrics.yaml rename to tests/e2e-leg-5/metrics-auth-token-olm/10-verify-no-authority-to-read-metrics.yaml diff --git a/tests/e2e-leg-5/metrics-auth-proxy-token-olm/15-apply-cluster-role.yaml b/tests/e2e-leg-5/metrics-auth-token-olm/15-apply-cluster-role.yaml similarity index 100% rename from tests/e2e-leg-5/metrics-auth-proxy-token-olm/15-apply-cluster-role.yaml rename to tests/e2e-leg-5/metrics-auth-token-olm/15-apply-cluster-role.yaml diff --git a/tests/e2e-leg-5/metrics-auth-proxy-token-olm/20-assert.yaml b/tests/e2e-leg-5/metrics-auth-token-olm/20-assert.yaml similarity index 100% rename from tests/e2e-leg-5/metrics-auth-proxy-token-olm/20-assert.yaml rename to tests/e2e-leg-5/metrics-auth-token-olm/20-assert.yaml diff --git a/tests/e2e-leg-5/metrics-auth-proxy-token-olm/20-verify-crb-with-metrics.yaml b/tests/e2e-leg-5/metrics-auth-token-olm/20-verify-crb-with-metrics.yaml similarity index 100% rename from tests/e2e-leg-5/metrics-auth-proxy-token-olm/20-verify-crb-with-metrics.yaml rename to tests/e2e-leg-5/metrics-auth-token-olm/20-verify-crb-with-metrics.yaml diff --git a/tests/e2e-leg-5/metrics-auth-proxy-token-olm/90-errors.yaml b/tests/e2e-leg-5/metrics-auth-token-olm/90-errors.yaml similarity index 100% rename from tests/e2e-leg-5/metrics-auth-proxy-token-olm/90-errors.yaml rename to tests/e2e-leg-5/metrics-auth-token-olm/90-errors.yaml diff --git a/tests/e2e-leg-5/metrics-auth-proxy-token-olm/90-uninstall-operator.yaml b/tests/e2e-leg-5/metrics-auth-token-olm/90-uninstall-operator.yaml similarity index 100% rename from tests/e2e-leg-5/metrics-auth-proxy-token-olm/90-uninstall-operator.yaml rename to tests/e2e-leg-5/metrics-auth-token-olm/90-uninstall-operator.yaml diff --git a/tests/e2e-leg-5/prometheus-service-monitor/99-delete-ns.yaml b/tests/e2e-leg-5/metrics-auth-token-olm/99-delete-ns.yaml similarity index 100% rename from tests/e2e-leg-5/prometheus-service-monitor/99-delete-ns.yaml rename to tests/e2e-leg-5/metrics-auth-token-olm/99-delete-ns.yaml diff --git a/tests/e2e-leg-5/operator-pod-scheduling/override-first-deploy.yaml b/tests/e2e-leg-5/operator-pod-scheduling/override-first-deploy.yaml index 8c8d34a9b..a7e1bd13a 100644 --- a/tests/e2e-leg-5/operator-pod-scheduling/override-first-deploy.yaml +++ b/tests/e2e-leg-5/operator-pod-scheduling/override-first-deploy.yaml @@ -24,4 +24,4 @@ affinity: - another-node-label-value # Expose prometheus to get the rbac sidecar prometheus: - expose: EnableWithAuthProxy + expose: EnableWithAuth diff --git a/tests/e2e-leg-5/operator-pod-scheduling/override-second-deploy.yaml b/tests/e2e-leg-5/operator-pod-scheduling/override-second-deploy.yaml index 6f908584e..f3278fc3c 100644 --- a/tests/e2e-leg-5/operator-pod-scheduling/override-second-deploy.yaml +++ b/tests/e2e-leg-5/operator-pod-scheduling/override-second-deploy.yaml @@ -22,5 +22,5 @@ tolerations: effect: "NoSchedule" # Expose prometheus to get the rbac sidecar prometheus: - expose: EnableWithAuthProxy + expose: EnableWithAuth diff --git a/tests/e2e-operator-upgrade-template/from-1.2.0/15-assert.yaml b/tests/e2e-operator-upgrade-template/from-1.2.0/15-assert.yaml index 4c356678e..56a82472d 100644 --- a/tests/e2e-operator-upgrade-template/from-1.2.0/15-assert.yaml +++ b/tests/e2e-operator-upgrade-template/from-1.2.0/15-assert.yaml @@ -20,5 +20,6 @@ spec: containers: - name: manager image: vertica/verticadb-operator:1.2.0 + - name: kube-rbac-proxy status: phase: Running diff --git a/tests/e2e-operator-upgrade-template/from-1.3.1/15-assert.yaml b/tests/e2e-operator-upgrade-template/from-1.3.1/15-assert.yaml index 1584c986a..5ce70abee 100644 --- a/tests/e2e-operator-upgrade-template/from-1.3.1/15-assert.yaml +++ b/tests/e2e-operator-upgrade-template/from-1.3.1/15-assert.yaml @@ -20,5 +20,6 @@ spec: containers: - name: manager image: vertica/verticadb-operator:1.3.1 + - name: kube-rbac-proxy status: phase: Running diff --git a/tests/e2e-operator-upgrade-template/from-1.4.0/15-assert.yaml b/tests/e2e-operator-upgrade-template/from-1.4.0/15-assert.yaml index ffec13e44..69a34d190 100644 --- a/tests/e2e-operator-upgrade-template/from-1.4.0/15-assert.yaml +++ b/tests/e2e-operator-upgrade-template/from-1.4.0/15-assert.yaml @@ -20,5 +20,6 @@ spec: containers: - name: manager image: vertica/verticadb-operator:1.4.0 + - name: kube-rbac-proxy status: phase: Running diff --git a/tests/e2e-operator-upgrade-template/from-1.6.0/15-assert.yaml b/tests/e2e-operator-upgrade-template/from-1.6.0/15-assert.yaml index 3f52d2724..2377c9990 100644 --- a/tests/e2e-operator-upgrade-template/from-1.6.0/15-assert.yaml +++ b/tests/e2e-operator-upgrade-template/from-1.6.0/15-assert.yaml @@ -20,5 +20,6 @@ spec: containers: - name: manager image: docker.io/vertica/verticadb-operator:1.6.0 + - name: kube-rbac-proxy status: phase: Running diff --git a/tests/e2e-operator-upgrade-template/from-1.7.0/15-assert.yaml b/tests/e2e-operator-upgrade-template/from-1.7.0/15-assert.yaml index 26c0d20d7..29ef828aa 100644 --- a/tests/e2e-operator-upgrade-template/from-1.7.0/15-assert.yaml +++ b/tests/e2e-operator-upgrade-template/from-1.7.0/15-assert.yaml @@ -20,5 +20,6 @@ spec: containers: - name: manager image: docker.io/vertica/verticadb-operator:1.7.0 + - name: kube-rbac-proxy status: phase: Running From d831f4d0690df3cd69a293affb64c1315118a3d0 Mon Sep 17 00:00:00 2001 From: Cai Chen Date: Wed, 5 Feb 2025 18:01:17 +0000 Subject: [PATCH 15/15] addressed the comments --- cmd/operator/main.go | 11 +++--- .../tests/metrics-cert_test.yaml | 37 +++++++++++++++++++ .../tests/metrics-configmap_test.yaml | 2 +- pkg/opcfg/config.go | 11 ++++++ 4 files changed, 54 insertions(+), 7 deletions(-) create mode 100644 helm-charts/verticadb-operator/tests/metrics-cert_test.yaml diff --git a/cmd/operator/main.go b/cmd/operator/main.go index 65c37c2d1..f0d62d334 100644 --- a/cmd/operator/main.go +++ b/cmd/operator/main.go @@ -21,7 +21,6 @@ import ( "crypto/x509" "log" "os" - "strings" "time" // Allows us to pull in things generated from `go generate` @@ -288,8 +287,8 @@ func main() { TLSOpts: webhookTLSOpts, }) - secureMetrics := strings.EqualFold(opcfg.GetMetricsExposeMode(), "EnableWithAuth") - secureByTLS := strings.EqualFold(opcfg.GetMetricsExposeMode(), "EnableWithTLS") + secureByAuth := opcfg.IfSecureByAuth() + secureByTLS := opcfg.IfSecureByTLS() var metricCertDir string if opcfg.GetMetricsTLSSecret() != "" { metricCertDir = "/cert" @@ -308,7 +307,7 @@ func main() { c.ClientCAs = caCertPool // If we enabled authorization, then no client certs are really needed. // Otherwise, we need the client certs. - if secureMetrics { + if secureByAuth { c.ClientAuth = tls.VerifyClientCertIfGiven } else if secureByTLS { c.ClientAuth = tls.RequireAndVerifyClientCert @@ -322,7 +321,7 @@ func main() { // - https://book.kubebuilder.io/reference/metrics.html metricsServerOptions := metricsserver.Options{ BindAddress: opcfg.GetMetricsAddr(), - SecureServing: secureMetrics || secureByTLS, + SecureServing: secureByAuth || secureByTLS, // TLSOpts is used to allow configuring the TLS config used for the server. If certificates are // not provided, self-signed certificates will be generated by default. This option is not recommended for // production environments as self-signed certificates do not offer the same level of trust and security @@ -333,7 +332,7 @@ func main() { CertDir: metricCertDir, } - if secureMetrics { + if secureByAuth { // FilterProvider is used to protect the metrics endpoint with authn/authz. // These configurations ensure that only authorized users and service accounts // can access the metrics endpoint. The RBAC are configured in 'config/rbac/kustomization.yaml'. More info: diff --git a/helm-charts/verticadb-operator/tests/metrics-cert_test.yaml b/helm-charts/verticadb-operator/tests/metrics-cert_test.yaml new file mode 100644 index 000000000..fa9a43f3d --- /dev/null +++ b/helm-charts/verticadb-operator/tests/metrics-cert_test.yaml @@ -0,0 +1,37 @@ +suite: Metrics certificate tests +templates: + - verticadb-operator-manager-deployment.yaml +tests: + - it: should include the cert if prometheus.tlsSecret is set + set: + prometheus: + expose: EnableWithAuth + tlsSecret: my-secret + asserts: + - equal: + path: spec.template.spec.containers[0].volumeMounts[1] + value: + name: auth-cert + mountPath: /cert + - equal: + path: spec.template.spec.volumes[0] + value: + name: auth-cert + secret: + defaultMode: 420 + secretName: my-secret + - it: should not include the cert if prometheus.tlsSecret is not set + set: + prometheus: + expose: EnableWithAuth + tlsSecret: "" + asserts: + - notContains: + path: spec.template.spec.containers[0].volumeMounts + content: + name: auth-cert + mountPath: /cert + - notContains: + path: spec.template.spec.volumes + content: + name: auth-cert \ No newline at end of file diff --git a/helm-charts/verticadb-operator/tests/metrics-configmap_test.yaml b/helm-charts/verticadb-operator/tests/metrics-configmap_test.yaml index 99f4e3b4d..dcbe938fe 100644 --- a/helm-charts/verticadb-operator/tests/metrics-configmap_test.yaml +++ b/helm-charts/verticadb-operator/tests/metrics-configmap_test.yaml @@ -10,7 +10,7 @@ tests: - equal: path: data.METRICS_ADDR value: :8443 - - it: should cotain ip if expose is with auth + - it: should contain ip if expose is with auth set: prometheus: expose: EnableWithAuth diff --git a/pkg/opcfg/config.go b/pkg/opcfg/config.go index 30ca47189..62123dd1a 100644 --- a/pkg/opcfg/config.go +++ b/pkg/opcfg/config.go @@ -20,6 +20,7 @@ import ( "log" "os" "strconv" + "strings" "time" "github.com/go-logr/logr" @@ -109,6 +110,16 @@ func GetMetricsExposeMode() string { return lookupStringEnvVar("METRICS_EXPOSE_MODE", envCanNotExist) } +// IfSecureByAuth returns true if metrics expose mode is set to "EnableWithAuth" +func IfSecureByAuth() bool { + return strings.EqualFold(GetMetricsExposeMode(), "EnableWithAuth") +} + +// IfSecureByTLS returns true if metrics expose mode is set to "EnableWithTLS" +func IfSecureByTLS() bool { + return strings.EqualFold(GetMetricsExposeMode(), "EnableWithTLS") +} + // GetUseCertManager returns true if cert-manager is used to setup the webhook's // TLS certs. func GetUseCertManager() bool {