From d5cf0364c69179b24cadefe45dac9f2e424843e4 Mon Sep 17 00:00:00 2001 From: Deepak Muley Date: Mon, 15 Jan 2024 09:32:09 -0800 Subject: [PATCH] CAPX clusterclass support (#344) * capx clusterclass support have kept the go/v3 kuberbuilder scffolding as is so reduce the code churn * updated yamls and their kustomize files. also review comments incorporated * made user-ca-bundle cluster specific and separated cluster and clusterclass * fixed typos * updated eviction policy and master merge template fixes * merged ccm related changed with the new yaml templates * added first jsonPatches in clusterclass template and cluster topology * added controlPlaneEnpoint variable in clusterclass * added pc endpoint details in patches and variables * fixed nct * added nmt patches for cp and md * reverted removal of ccm ca bundle config map and added test * fixed makefile and docs with correct commands * now using infrav1 instead of kubebuilder default for latest version * update the docs * e2e test fixes * removed sample file for nutanixclustertemplate * added cni-crs related patch * updated test to match the fields * separated dev test makefile commands into separate file this is done to improve maintainability * yaml location * updated name of cm to make the test pass This is required as currently ccm-update.yaml vars are not getting replaced thru code. --- Makefile | 45 +- PROJECT | 12 + api/v1beta1/nutanixclustertemplate_types.go | 55 ++ api/v1beta1/zz_generated.deepcopy.go | 90 +++ clusterctl.yaml.tmpl | 3 + ...ster.x-k8s.io_nutanixclustertemplates.yaml | 247 ++++++ config/crd/kustomization.yaml | 3 + ...ainjection_in_nutanixclustertemplates.yaml | 7 + .../webhook_in_nutanixclustertemplates.yaml | 16 + config/manager/manager.yaml | 4 +- .../nutanixclustertemplate_editor_role.yaml | 25 + .../nutanixclustertemplate_viewer_role.yaml | 21 + docs/developer_workflow.md | 46 +- main.go | 4 +- scripts/ccm_nutanix_update.sh | 2 +- templates/{base => base-root}/cm.yaml | 3 +- templates/base-root/kustomization.yaml | 16 + .../{base => base-root}/nutanix-ccm-crs.yaml | 2 +- .../nutanix-ccm-secret.yaml | 0 .../{base => base-root}/nutanix-ccm.yaml | 4 +- templates/{base => base-root}/secret.yaml | 1 - templates/base/ccm-patch.yaml | 19 +- templates/base/cluster-without-topology.yaml | 21 + .../base/{cluster-with-kcp.yaml => kcp.yaml} | 78 +- templates/base/kct.yaml | 25 + templates/base/kustomization.yaml | 19 +- templates/base/md.yaml | 1 - templates/base/mhc.yaml | 1 - templates/base/nmt.yaml | 1 - templates/base/nutanix-cluster.yaml | 18 + templates/cluster-template-clusterclass.yaml | 765 ++++++++++++++++++ templates/cluster-template-csi.yaml | 13 +- templates/cluster-template-topology.yaml | 61 ++ templates/cluster-template.yaml | 13 +- templates/clusterclass/clusterclass.yaml | 304 +++++++ templates/clusterclass/kcpt.yaml | 111 +++ templates/clusterclass/kct.yaml | 19 + templates/clusterclass/kustomization.yaml | 13 + templates/clusterclass/nct.yaml | 8 + templates/clusterclass/nmt-cp.yaml | 38 + templates/clusterclass/nmt-md.yaml | 38 + templates/topology/ccm-patch.yaml | 6 + templates/topology/cluster-with-topology.yaml | 57 ++ templates/topology/kustomization.yaml | 10 + test-cluster-with-topology.mk | 43 + test-cluster-without-topology.mk | 36 + test/e2e/clusterclass_changes_test.go | 54 ++ test/e2e/config/nutanix.yaml | 5 + .../v1.2.4/base/cluster-with-kcp.yaml | 2 +- .../v1.2.4/base/cm.yaml | 2 +- .../v1alpha4/bases/cm.yaml | 2 +- .../v1beta1/base/cni-patch.yaml | 3 +- .../kustomization.yaml | 2 + .../failure-domain-patch.yaml | 3 - .../kustomization.yaml | 22 +- .../kustomization.yaml | 21 +- .../cluster-with-kcp.yaml | 1 - .../kustomization.yaml | 21 +- .../cluster-template-md-remediation/md.yaml | 1 - .../kustomization.yaml | 21 +- .../kustomization.yaml | 19 +- .../nc.yaml | 1 - .../kustomization.yaml | 13 +- .../kustomization.yaml | 21 +- .../v1beta1/cluster-template-project/nmt.yaml | 1 - .../kustomization.yaml | 6 + .../kustomization.yaml | 21 +- .../v1beta1/clusterclass-e2e.yaml | 765 ++++++++++++++++++ .../kustomization.yaml | 5 + .../kustomization.yaml | 5 + .../no-kubeproxy-clusterclass.yaml | 22 + .../v1beta1/no-kubeproxy/no-kubeproxy.yaml | 2 - 72 files changed, 3123 insertions(+), 242 deletions(-) create mode 100644 api/v1beta1/nutanixclustertemplate_types.go create mode 100644 config/crd/bases/infrastructure.cluster.x-k8s.io_nutanixclustertemplates.yaml create mode 100644 config/crd/patches/cainjection_in_nutanixclustertemplates.yaml create mode 100644 config/crd/patches/webhook_in_nutanixclustertemplates.yaml create mode 100644 config/rbac/nutanixclustertemplate_editor_role.yaml create mode 100644 config/rbac/nutanixclustertemplate_viewer_role.yaml rename templates/{base => base-root}/cm.yaml (67%) create mode 100644 templates/base-root/kustomization.yaml rename templates/{base => base-root}/nutanix-ccm-crs.yaml (88%) rename templates/{base => base-root}/nutanix-ccm-secret.yaml (100%) rename templates/{base => base-root}/nutanix-ccm.yaml (97%) rename templates/{base => base-root}/secret.yaml (91%) create mode 100644 templates/base/cluster-without-topology.yaml rename templates/base/{cluster-with-kcp.yaml => kcp.yaml} (69%) create mode 100644 templates/base/kct.yaml create mode 100644 templates/base/nutanix-cluster.yaml create mode 100644 templates/cluster-template-clusterclass.yaml create mode 100644 templates/cluster-template-topology.yaml create mode 100644 templates/clusterclass/clusterclass.yaml create mode 100644 templates/clusterclass/kcpt.yaml create mode 100644 templates/clusterclass/kct.yaml create mode 100644 templates/clusterclass/kustomization.yaml create mode 100644 templates/clusterclass/nct.yaml create mode 100644 templates/clusterclass/nmt-cp.yaml create mode 100644 templates/clusterclass/nmt-md.yaml create mode 100644 templates/topology/ccm-patch.yaml create mode 100644 templates/topology/cluster-with-topology.yaml create mode 100644 templates/topology/kustomization.yaml create mode 100644 test-cluster-with-topology.mk create mode 100644 test-cluster-without-topology.mk create mode 100644 test/e2e/clusterclass_changes_test.go create mode 100644 test/e2e/data/infrastructure-nutanix/v1beta1/cluster-template-clusterclass/kustomization.yaml create mode 100644 test/e2e/data/infrastructure-nutanix/v1beta1/cluster-template-topology/kustomization.yaml create mode 100644 test/e2e/data/infrastructure-nutanix/v1beta1/clusterclass-e2e.yaml create mode 100644 test/e2e/data/infrastructure-nutanix/v1beta1/no-kubeproxy/cluster-template-clusterclass/kustomization.yaml create mode 100644 test/e2e/data/infrastructure-nutanix/v1beta1/no-kubeproxy/cluster-template-failure-domains/kustomization.yaml create mode 100644 test/e2e/data/infrastructure-nutanix/v1beta1/no-kubeproxy/no-kubeproxy-clusterclass.yaml diff --git a/Makefile b/Makefile index 1d64da7d3d..a1f30a9f14 100644 --- a/Makefile +++ b/Makefile @@ -181,9 +181,6 @@ USE_EXISTING_CLUSTER ?= false GINKGO_NOCOLOR ?= false FLAVOR ?= e2e -TEST_NAMESPACE=capx-test-ns -TEST_CLUSTER_NAME=mycluster - # set ginkgo focus flags, if any ifneq ($(strip $(GINKGO_FOCUS)),) _FOCUS_ARGS := $(foreach arg,$(strip $(GINKGO_FOCUS)),--focus="$(arg)") @@ -328,6 +325,9 @@ cluster-e2e-templates-v1beta1: $(KUSTOMIZE) ## Generate cluster templates for v1 $(KUSTOMIZE) build $(NUTANIX_E2E_TEMPLATES)/v1beta1/cluster-template-kcp-scale-in --load-restrictor LoadRestrictionsNone > $(NUTANIX_E2E_TEMPLATES)/v1beta1/cluster-template-kcp-scale-in.yaml $(KUSTOMIZE) build $(NUTANIX_E2E_TEMPLATES)/v1beta1/cluster-template-csi --load-restrictor LoadRestrictionsNone > $(NUTANIX_E2E_TEMPLATES)/v1beta1/cluster-template-csi.yaml $(KUSTOMIZE) build $(NUTANIX_E2E_TEMPLATES)/v1beta1/cluster-template-failure-domains --load-restrictor LoadRestrictionsNone > $(NUTANIX_E2E_TEMPLATES)/v1beta1/cluster-template-failure-domains.yaml + $(KUSTOMIZE) build $(NUTANIX_E2E_TEMPLATES)/v1beta1/cluster-template-clusterclass --load-restrictor LoadRestrictionsNone > $(NUTANIX_E2E_TEMPLATES)/v1beta1/cluster-template-clusterclass.yaml + $(KUSTOMIZE) build $(NUTANIX_E2E_TEMPLATES)/v1beta1/cluster-template-clusterclass --load-restrictor LoadRestrictionsNone > $(NUTANIX_E2E_TEMPLATES)/v1beta1/clusterclass-e2e.yaml + $(KUSTOMIZE) build $(NUTANIX_E2E_TEMPLATES)/v1beta1/cluster-template-topology --load-restrictor LoadRestrictionsNone > $(NUTANIX_E2E_TEMPLATES)/v1beta1/cluster-template-topology.yaml cluster-e2e-templates-no-kubeproxy: $(KUSTOMIZE) ##Generate cluster templates without kubeproxy # v1alpha4 @@ -346,10 +346,15 @@ cluster-e2e-templates-no-kubeproxy: $(KUSTOMIZE) ##Generate cluster templates wi $(KUSTOMIZE) build $(NUTANIX_E2E_TEMPLATES)/v1beta1/no-kubeproxy/cluster-template-kcp-scale-in --load-restrictor LoadRestrictionsNone > $(NUTANIX_E2E_TEMPLATES)/v1beta1/cluster-template-kcp-scale-in.yaml $(KUSTOMIZE) build $(NUTANIX_E2E_TEMPLATES)/v1beta1/no-kubeproxy/cluster-template-csi --load-restrictor LoadRestrictionsNone > $(NUTANIX_E2E_TEMPLATES)/v1beta1/cluster-template-csi.yaml $(KUSTOMIZE) build $(NUTANIX_E2E_TEMPLATES)/v1beta1/no-kubeproxy/cluster-template-failure-domains --load-restrictor LoadRestrictionsNone > $(NUTANIX_E2E_TEMPLATES)/v1beta1/cluster-template-failure-domains.yaml + $(KUSTOMIZE) build $(NUTANIX_E2E_TEMPLATES)/v1beta1/no-kubeproxy/cluster-template-clusterclass --load-restrictor LoadRestrictionsNone > $(NUTANIX_E2E_TEMPLATES)/v1beta1/cluster-template-clusterclass.yaml + $(KUSTOMIZE) build $(NUTANIX_E2E_TEMPLATES)/v1beta1/no-kubeproxy/cluster-template-clusterclass --load-restrictor LoadRestrictionsNone > $(NUTANIX_E2E_TEMPLATES)/v1beta1/clusterclass-e2e.yaml + $(KUSTOMIZE) build $(NUTANIX_E2E_TEMPLATES)/v1beta1/no-kubeproxy/cluster-template-topology --load-restrictor LoadRestrictionsNone > $(NUTANIX_E2E_TEMPLATES)/v1beta1/cluster-template-topology.yaml cluster-templates: $(KUSTOMIZE) ## Generate cluster templates for all flavors $(KUSTOMIZE) build $(TEMPLATES_DIR)/base > $(TEMPLATES_DIR)/cluster-template.yaml $(KUSTOMIZE) build $(TEMPLATES_DIR)/csi > $(TEMPLATES_DIR)/cluster-template-csi.yaml + $(KUSTOMIZE) build $(TEMPLATES_DIR)/clusterclass > $(TEMPLATES_DIR)/cluster-template-clusterclass.yaml + $(KUSTOMIZE) build $(TEMPLATES_DIR)/topology > $(TEMPLATES_DIR)/cluster-template-topology.yaml ##@ Testing @@ -387,33 +392,6 @@ ifeq ($(EXPORT_RESULT), true) gocov convert profile.cov | gocov-xml > coverage.xml endif -.PHONY: test-clusterctl-create -test-clusterctl-create: $(CLUSTERCTL) ## Run the tests using clusterctl - $(CLUSTERCTL) version - $(CLUSTERCTL) config repositories | grep nutanix - $(CLUSTERCTL) generate cluster ${TEST_CLUSTER_NAME} -i nutanix:${LOCAL_PROVIDER_VERSION} --list-variables -v 10 - $(CLUSTERCTL) generate cluster ${TEST_CLUSTER_NAME} -i nutanix:${LOCAL_PROVIDER_VERSION} --target-namespace ${TEST_NAMESPACE} -v 10 > ./cluster.yaml - kubectl create ns $(TEST_NAMESPACE) || true - kubectl apply -f ./cluster.yaml -n $(TEST_NAMESPACE) - -.PHONY: test-clusterctl-delete -test-clusterctl-delete: ## Delete clusterctl created cluster - kubectl -n ${TEST_NAMESPACE} delete cluster ${TEST_CLUSTER_NAME} - -.PHONY: test-kubectl-bootstrap -test-kubectl-bootstrap: ## Run kubectl queries to get all capx management/bootstrap related objects - kubectl get ns - kubectl get all --all-namespaces - kubectl -n capx-system get all - kubectl -n $(TEST_NAMESPACE) get Cluster,NutanixCluster,Machine,NutanixMachine,KubeAdmControlPlane,MachineHealthCheck,nodes - kubectl -n capx-system get pod - -.PHONY: test-kubectl-workload -test-kubectl-workload: ## Run kubectl queries to get all capx workload related objects - kubectl -n $(TEST_NAMESPACE) get secret - kubectl -n ${TEST_NAMESPACE} get secret ${TEST_CLUSTER_NAME}-kubeconfig -o json | jq -r .data.value | base64 --decode > ${TEST_CLUSTER_NAME}.workload.kubeconfig - kubectl --kubeconfig ./${TEST_CLUSTER_NAME}.workload.kubeconfig get nodes,ns - .PHONY: ginkgo-help ginkgo-help: $(GINKGO) help run @@ -631,3 +609,10 @@ verify-manifests: manifests ## Verify generated manifests are up to date clean: ## Clean the build and test artifacts rm -rf $(ARTIFACTS) $(BIN_DIR) +## -------------------------------------- +## Developer local tests +## -------------------------------------- + +##@ Test Dev Cluster with and without topology +include test-cluster-without-topology.mk +include test-cluster-with-topology.mk \ No newline at end of file diff --git a/PROJECT b/PROJECT index 37205faa85..91100f6f58 100644 --- a/PROJECT +++ b/PROJECT @@ -1,3 +1,7 @@ +# Code generated by tool. DO NOT EDIT. +# This file is used to track the info used to scaffold your project +# and allow the plugins properly work. +# More info: https://book.kubebuilder.io/reference/project-config.html domain: cluster.x-k8s.io layout: - go.kubebuilder.io/v3 @@ -30,4 +34,12 @@ resources: kind: NutanixMachineTemplate path: github.com/nutanix-cloud-native/cluster-api-provider-nutanix/api/v1alpha4 version: v1alpha4 +- api: + crdVersion: v1 + namespaced: true + domain: cluster.x-k8s.io + group: infrastructure + kind: NutanixClusterTemplate + path: github.com/nutanix-cloud-native/cluster-api-provider-nutanix/api/v1beta1 + version: v1beta1 version: "3" diff --git a/api/v1beta1/nutanixclustertemplate_types.go b/api/v1beta1/nutanixclustertemplate_types.go new file mode 100644 index 0000000000..b32812d538 --- /dev/null +++ b/api/v1beta1/nutanixclustertemplate_types.go @@ -0,0 +1,55 @@ +/* +Copyright 2022 Nutanix + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// NutanixClusterTemplateSpec defines the desired state of NutanixClusterTemplate +type NutanixClusterTemplateSpec struct { + Template NutanixClusterTemplateResource `json:"template"` +} + +//+kubebuilder:object:root=true +//+kubebuilder:resource:categories=cluster-api + +// NutanixClusterTemplate is the Schema for the nutanixclustertemplates API +type NutanixClusterTemplate struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec NutanixClusterTemplateSpec `json:"spec,omitempty"` +} + +//+kubebuilder:object:root=true + +// NutanixClusterTemplateList contains a list of NutanixClusterTemplate +type NutanixClusterTemplateList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []NutanixClusterTemplate `json:"items"` +} + +func init() { + SchemeBuilder.Register(&NutanixClusterTemplate{}, &NutanixClusterTemplateList{}) +} + +// NutanixClusterTemplateResource describes the data needed to create a NutanixCluster from a template. +type NutanixClusterTemplateResource struct { + Spec NutanixClusterSpec `json:"spec"` +} diff --git a/api/v1beta1/zz_generated.deepcopy.go b/api/v1beta1/zz_generated.deepcopy.go index ceeef4c9c5..e9e712fe78 100644 --- a/api/v1beta1/zz_generated.deepcopy.go +++ b/api/v1beta1/zz_generated.deepcopy.go @@ -170,6 +170,96 @@ func (in *NutanixClusterStatus) DeepCopy() *NutanixClusterStatus { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NutanixClusterTemplate) DeepCopyInto(out *NutanixClusterTemplate) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NutanixClusterTemplate. +func (in *NutanixClusterTemplate) DeepCopy() *NutanixClusterTemplate { + if in == nil { + return nil + } + out := new(NutanixClusterTemplate) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *NutanixClusterTemplate) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NutanixClusterTemplateList) DeepCopyInto(out *NutanixClusterTemplateList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]NutanixClusterTemplate, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NutanixClusterTemplateList. +func (in *NutanixClusterTemplateList) DeepCopy() *NutanixClusterTemplateList { + if in == nil { + return nil + } + out := new(NutanixClusterTemplateList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *NutanixClusterTemplateList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NutanixClusterTemplateResource) DeepCopyInto(out *NutanixClusterTemplateResource) { + *out = *in + in.Spec.DeepCopyInto(&out.Spec) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NutanixClusterTemplateResource. +func (in *NutanixClusterTemplateResource) DeepCopy() *NutanixClusterTemplateResource { + if in == nil { + return nil + } + out := new(NutanixClusterTemplateResource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NutanixClusterTemplateSpec) DeepCopyInto(out *NutanixClusterTemplateSpec) { + *out = *in + in.Template.DeepCopyInto(&out.Template) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NutanixClusterTemplateSpec. +func (in *NutanixClusterTemplateSpec) DeepCopy() *NutanixClusterTemplateSpec { + if in == nil { + return nil + } + out := new(NutanixClusterTemplateSpec) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *NutanixFailureDomain) DeepCopyInto(out *NutanixFailureDomain) { *out = *in diff --git a/clusterctl.yaml.tmpl b/clusterctl.yaml.tmpl index 3345f8721b..1d4233018c 100644 --- a/clusterctl.yaml.tmpl +++ b/clusterctl.yaml.tmpl @@ -1,5 +1,6 @@ CLUSTERCTL_LOG_LEVEL: 10 EXP_CLUSTER_RESOURCE_SET: "true" +CLUSTER_TOPOLOGY: "true" NUTANIX_ENDPOINT: "" # IP or FQDN of Prism Central NUTANIX_USER: "" # Prism Central user @@ -17,6 +18,8 @@ WORKER_MACHINE_COUNT: 2 # Number of Machines in the worker node KUBEVIP_LB_ENABLE: "false" # See https://kube-vip.io/docs/about/architecture/?query=lb_enable#control-plane-load-balancing KUBEVIP_SVC_ENABLE: "false" # See https://kube-vip.io/docs/about/architecture/?query=lb_enable#kubernetes-service-load-balancing +CLUSTER_CLASS_NAME: "" # ClusterClass Name if any + providers: # add a custom provider - name: "nutanix" diff --git a/config/crd/bases/infrastructure.cluster.x-k8s.io_nutanixclustertemplates.yaml b/config/crd/bases/infrastructure.cluster.x-k8s.io_nutanixclustertemplates.yaml new file mode 100644 index 0000000000..c90478dcda --- /dev/null +++ b/config/crd/bases/infrastructure.cluster.x-k8s.io_nutanixclustertemplates.yaml @@ -0,0 +1,247 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.8.0 + creationTimestamp: null + name: nutanixclustertemplates.infrastructure.cluster.x-k8s.io +spec: + group: infrastructure.cluster.x-k8s.io + names: + categories: + - cluster-api + kind: NutanixClusterTemplate + listKind: NutanixClusterTemplateList + plural: nutanixclustertemplates + singular: nutanixclustertemplate + scope: Namespaced + versions: + - name: v1beta1 + schema: + openAPIV3Schema: + description: NutanixClusterTemplate is the Schema for the nutanixclustertemplates + API + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: NutanixClusterTemplateSpec defines the desired state of NutanixClusterTemplate + properties: + template: + description: NutanixClusterTemplateResource describes the data needed + to create a NutanixCluster from a template. + properties: + spec: + description: NutanixClusterSpec defines the desired state of NutanixCluster + properties: + controlPlaneEndpoint: + description: ControlPlaneEndpoint represents the endpoint + used to communicate with the control plane. host can be + either DNS name or ip address + properties: + host: + description: The hostname on which the API server is serving. + type: string + port: + description: The port on which the API server is serving. + format: int32 + type: integer + required: + - host + - port + type: object + failureDomains: + description: failureDomains configures failure domains information + for the Nutanix platform. When set, the failure domains + defined here may be used to spread Machines across prism + element clusters to improve fault tolerance of the cluster. + items: + description: NutanixFailureDomain configures failure domain + information for Nutanix. + properties: + cluster: + description: cluster is to identify the cluster (the + Prism Element under management of the Prism Central), + in which the Machine's VM will be created. The cluster + identifier (uuid or name) can be obtained from the + Prism Central console or using the prism_central API. + properties: + name: + description: name is the resource name in the PC + type: string + type: + description: Type is the identifier type to use + for this resource. + enum: + - uuid + - name + type: string + uuid: + description: uuid is the UUID of the resource in + the PC. + type: string + required: + - type + type: object + controlPlane: + description: indicates if a failure domain is suited + for control plane nodes + type: boolean + name: + description: name defines the unique name of a failure + domain. Name is required and must be at most 64 characters + in length. It must consist of only lower case alphanumeric + characters and hyphens (-). It must start and end + with an alphanumeric character. This value is arbitrary + and is used to identify the failure domain within + the platform. + maxLength: 64 + minLength: 1 + pattern: '[a-z0-9]([-a-z0-9]*[a-z0-9])?' + type: string + subnets: + description: subnets holds a list of identifiers (one + or more) of the cluster's network subnets for the + Machine's VM to connect to. The subnet identifiers + (uuid or name) can be obtained from the Prism Central + console or using the prism_central API. + items: + description: NutanixResourceIdentifier holds the identity + of a Nutanix PC resource (cluster, image, subnet, + etc.) + properties: + name: + description: name is the resource name in the + PC + type: string + type: + description: Type is the identifier type to use + for this resource. + enum: + - uuid + - name + type: string + uuid: + description: uuid is the UUID of the resource + in the PC. + type: string + required: + - type + type: object + minItems: 1 + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + required: + - cluster + - name + - subnets + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + prismCentral: + description: prismCentral holds the endpoint address and port + to access the Nutanix Prism Central. When a cluster-wide + proxy is installed, by default, this endpoint will be accessed + via the proxy. Should you wish for communication with this + endpoint not to be proxied, please add the endpoint to the + proxy spec.noProxy list. + properties: + additionalTrustBundle: + description: AdditionalTrustBundle is a PEM encoded x509 + cert for the RootCA that was used to create the certificate + for a Prism Central that uses certificates that were + issued by a non-publicly trusted RootCA. The trust bundle + is added to the cert pool used to authenticate the TLS + connection to the Prism Central. + properties: + data: + description: Data of the trust bundle if Kind is String. + type: string + kind: + description: Kind of the Nutanix trust bundle + enum: + - String + - ConfigMap + type: string + name: + description: Name of the credential. + type: string + namespace: + description: namespace of the credential. + type: string + required: + - kind + type: object + address: + description: address is the endpoint address (DNS name + or IP address) of the Nutanix Prism Central or Element + (cluster) + maxLength: 256 + type: string + credentialRef: + description: Pass credential information for the target + Prism instance + properties: + kind: + description: Kind of the Nutanix credential + enum: + - Secret + type: string + name: + description: Name of the credential. + minLength: 1 + type: string + namespace: + description: namespace of the credential. + type: string + required: + - kind + - name + type: object + insecure: + default: false + description: use insecure connection to Prism endpoint + type: boolean + port: + default: 9440 + description: port is the port number to access the Nutanix + Prism Central or Element (cluster) + format: int32 + maximum: 65535 + minimum: 1 + type: integer + required: + - address + - port + type: object + type: object + required: + - spec + type: object + required: + - template + type: object + type: object + served: true + storage: true +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] diff --git a/config/crd/kustomization.yaml b/config/crd/kustomization.yaml index b40e9713d2..159be5c470 100644 --- a/config/crd/kustomization.yaml +++ b/config/crd/kustomization.yaml @@ -5,6 +5,7 @@ resources: - bases/infrastructure.cluster.x-k8s.io_nutanixclusters.yaml - bases/infrastructure.cluster.x-k8s.io_nutanixmachines.yaml - bases/infrastructure.cluster.x-k8s.io_nutanixmachinetemplates.yaml +- bases/infrastructure.cluster.x-k8s.io_nutanixclustertemplates.yaml #+kubebuilder:scaffold:crdkustomizeresource patchesStrategicMerge: @@ -13,6 +14,7 @@ patchesStrategicMerge: #- patches/webhook_in_nutanixclusters.yaml #- patches/webhook_in_nutanixmachines.yaml #- patches/webhook_in_nutanixmachinetemplates.yaml +#- patches/webhook_in_nutanixclustertemplates.yaml #+kubebuilder:scaffold:crdkustomizewebhookpatch # [CERTMANAGER] To enable webhook, uncomment all the sections with [CERTMANAGER] prefix. @@ -20,6 +22,7 @@ patchesStrategicMerge: #- patches/cainjection_in_nutanixclusters.yaml #- patches/cainjection_in_nutanixmachines.yaml #- patches/cainjection_in_nutanixmachinetemplates.yaml +#- patches/cainjection_in_nutanixclustertemplates.yaml #+kubebuilder:scaffold:crdkustomizecainjectionpatch patchesJson6902: diff --git a/config/crd/patches/cainjection_in_nutanixclustertemplates.yaml b/config/crd/patches/cainjection_in_nutanixclustertemplates.yaml new file mode 100644 index 0000000000..5d5016be1e --- /dev/null +++ b/config/crd/patches/cainjection_in_nutanixclustertemplates.yaml @@ -0,0 +1,7 @@ +# The following patch adds a directive for certmanager to inject CA into the CRD +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + cert-manager.io/inject-ca-from: $(CERTIFICATE_NAMESPACE)/$(CERTIFICATE_NAME) + name: nutanixclustertemplates.infrastructure.cluster.x-k8s.io diff --git a/config/crd/patches/webhook_in_nutanixclustertemplates.yaml b/config/crd/patches/webhook_in_nutanixclustertemplates.yaml new file mode 100644 index 0000000000..e9116a04e9 --- /dev/null +++ b/config/crd/patches/webhook_in_nutanixclustertemplates.yaml @@ -0,0 +1,16 @@ +# The following patch enables a conversion webhook for the CRD +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: nutanixclustertemplates.infrastructure.cluster.x-k8s.io +spec: + conversion: + strategy: Webhook + webhook: + clientConfig: + service: + namespace: system + name: webhook-service + path: /convert + conversionReviewVersions: + - v1 diff --git a/config/manager/manager.yaml b/config/manager/manager.yaml index 7ad285e195..ac2b687e3c 100644 --- a/config/manager/manager.yaml +++ b/config/manager/manager.yaml @@ -22,7 +22,7 @@ data: }, "additionalTrustBundle": { "kind": "ConfigMap", - "name": "capx-user-ca-bundle" + "name": "capx-pc-trusted-ca-bundle" } } --- @@ -50,7 +50,7 @@ stringData: apiVersion: v1 kind: ConfigMap metadata: - name: user-ca-bundle + name: pc-trusted-ca-bundle namespace: capi-nutanix-system binaryData: ca.crt: "${NUTANIX_ADDITIONAL_TRUST_BUNDLE=''}" diff --git a/config/rbac/nutanixclustertemplate_editor_role.yaml b/config/rbac/nutanixclustertemplate_editor_role.yaml new file mode 100644 index 0000000000..56d301aa5a --- /dev/null +++ b/config/rbac/nutanixclustertemplate_editor_role.yaml @@ -0,0 +1,25 @@ +# permissions for end users to edit nutanixclustertemplates. +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/name: clusterrole + app.kubernetes.io/instance: nutanixclustertemplate-editor-role + app.kubernetes.io/component: rbac + app.kubernetes.io/created-by: cluster-api-provider-nutanix + app.kubernetes.io/part-of: cluster-api-provider-nutanix + app.kubernetes.io/managed-by: kustomize + name: nutanixclustertemplate-editor-role +rules: +- apiGroups: + - infrastructure.cluster.x-k8s.io + resources: + - nutanixclustertemplates + verbs: + - create + - delete + - get + - list + - patch + - update + - watch \ No newline at end of file diff --git a/config/rbac/nutanixclustertemplate_viewer_role.yaml b/config/rbac/nutanixclustertemplate_viewer_role.yaml new file mode 100644 index 0000000000..89172ed1c5 --- /dev/null +++ b/config/rbac/nutanixclustertemplate_viewer_role.yaml @@ -0,0 +1,21 @@ +# permissions for end users to view nutanixclustertemplates. +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/name: clusterrole + app.kubernetes.io/instance: nutanixclustertemplate-viewer-role + app.kubernetes.io/component: rbac + app.kubernetes.io/created-by: cluster-api-provider-nutanix + app.kubernetes.io/part-of: cluster-api-provider-nutanix + app.kubernetes.io/managed-by: kustomize + name: nutanixclustertemplate-viewer-role +rules: +- apiGroups: + - infrastructure.cluster.x-k8s.io + resources: + - nutanixclustertemplates + verbs: + - get + - list + - watch diff --git a/docs/developer_workflow.md b/docs/developer_workflow.md index 6a24391f9d..3a686ca752 100644 --- a/docs/developer_workflow.md +++ b/docs/developer_workflow.md @@ -62,18 +62,18 @@ This will configure [kubectl](https://kubernetes.io/docs/reference/kubectl/) for kubectl get pods -n capx-system ``` -## Create a test workload cluster +## Create a test workload cluster without topology 1. Create a workload cluster: ```shell - make test-clusterctl-create + make test-cluster-create ``` Optionally, to use a unique cluster name: ```shell - make test-clusterctl-create TEST_CLUSTER_NAME=<> + make test-cluster-create TEST_CLUSTER_NAME=<> ``` 1. Get the workload cluster kubeconfig. This will write out the kubeconfig file in the local directory as `.workload.kubeconfig`: @@ -88,6 +88,29 @@ This will configure [kubectl](https://kubernetes.io/docs/reference/kubectl/) for make test-kubectl-workload TEST_CLUSTER_NAME=<> ``` +## Create a test workload cluster with topology + +1. Create a workload cluster: + + ```shell + make test-cc-cluster-create + ``` + + Optionally, to use a unique cluster name: + + ```shell + make test-cc-cluster-create TEST_TOPOLOGY_CLUSTER_NAME=<> + ``` + +## Upgrade test workload cluster's k8s version + +1. Upgrade workload cluster's k8s version + + ```shell + make test-cc-cluster-upgrade TEST_TOPOLOGY_CLUSTER_NAME=<> UPGRADE_K8S_VERSION_TO= + ``` + + ## Debugging failures 1. Check the cluster resources: @@ -123,16 +146,27 @@ This will configure [kubectl](https://kubernetes.io/docs/reference/kubectl/) for ## Cleanup -1. Delete the test workload cluster: +1. Delete the test workload cluster without topology: + + ```shell + make test-cluster-delete + ``` + + When using a unique cluster name set `TEST_CLUSTER_NAME` variable: + + ```shell + make test-cluster-delete TEST_CLUSTER_NAME=<> + +1. Delete the test workload cluster with topology: ```shell - make test-clusterctl-delete + make test-cc-cluster-delete ``` When using a unique cluster name set `TEST_CLUSTER_NAME` variable: ```shell - make test-clusterctl-delete TEST_CLUSTER_NAME=<> + make test-cluster-delete TEST_CLUSTER_NAME=<> 1. Delete the management KIND cluster: diff --git a/main.go b/main.go index 17a87af985..1b2cf4bb44 100644 --- a/main.go +++ b/main.go @@ -45,7 +45,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/log/zap" infrav1alpha4 "github.com/nutanix-cloud-native/cluster-api-provider-nutanix/api/v1alpha4" - infrav1beta1 "github.com/nutanix-cloud-native/cluster-api-provider-nutanix/api/v1beta1" + infrav1 "github.com/nutanix-cloud-native/cluster-api-provider-nutanix/api/v1beta1" "github.com/nutanix-cloud-native/cluster-api-provider-nutanix/controllers" //+kubebuilder:scaffold:imports ) @@ -66,7 +66,7 @@ func init() { utilruntime.Must(bootstrapv1.AddToScheme(scheme)) utilruntime.Must(infrav1alpha4.AddToScheme(scheme)) - utilruntime.Must(infrav1beta1.AddToScheme(scheme)) + utilruntime.Must(infrav1.AddToScheme(scheme)) //+kubebuilder:scaffold:scheme } diff --git a/scripts/ccm_nutanix_update.sh b/scripts/ccm_nutanix_update.sh index edb1b490c5..69030b343f 100755 --- a/scripts/ccm_nutanix_update.sh +++ b/scripts/ccm_nutanix_update.sh @@ -22,4 +22,4 @@ helm template -n kube-system nutanix-cloud-provider nutanix/nutanix-cloud-provid --set prismCentralEndPoint='${NUTANIX_ENDPOINT}',prismCentralPort='${NUTANIX_PORT=9440}',prismCentralInsecure='${NUTANIX_INSECURE=false}' \ --set image.repository="\${CCM_REPO=$NUTANIX_CCM_REPO}",image.tag="\${CCM_TAG=v$NUTANIX_CCM_VERSION}" \ --set createSecret=false \ - > templates/base/nutanix-ccm.yaml + > templates/base-root/nutanix-ccm.yaml diff --git a/templates/base/cm.yaml b/templates/base-root/cm.yaml similarity index 67% rename from templates/base/cm.yaml rename to templates/base-root/cm.yaml index ff4d33af76..f95f785633 100644 --- a/templates/base/cm.yaml +++ b/templates/base-root/cm.yaml @@ -2,7 +2,6 @@ apiVersion: v1 kind: ConfigMap metadata: - name: user-ca-bundle - namespace: "${NAMESPACE}" + name: ${CLUSTER_NAME}-pc-trusted-ca-bundle binaryData: ca.crt: ${NUTANIX_ADDITIONAL_TRUST_BUNDLE=""} diff --git a/templates/base-root/kustomization.yaml b/templates/base-root/kustomization.yaml new file mode 100644 index 0000000000..9ee5d03569 --- /dev/null +++ b/templates/base-root/kustomization.yaml @@ -0,0 +1,16 @@ +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization + +namespace: "${NAMESPACE}" + +configMapGenerator: +- name: nutanix-ccm + behavior: merge + files: + - nutanix-ccm.yaml + +bases: + - ./secret.yaml + - ./cm.yaml + - ./nutanix-ccm-crs.yaml + - ./nutanix-ccm-secret.yaml diff --git a/templates/base/nutanix-ccm-crs.yaml b/templates/base-root/nutanix-ccm-crs.yaml similarity index 88% rename from templates/base/nutanix-ccm-crs.yaml rename to templates/base-root/nutanix-ccm-crs.yaml index 188d39347c..ede23956fa 100644 --- a/templates/base/nutanix-ccm-crs.yaml +++ b/templates/base-root/nutanix-ccm-crs.yaml @@ -12,7 +12,7 @@ spec: - kind: Secret name: nutanix-ccm-secret - kind: ConfigMap - name: user-ca-bundle + name: ${CLUSTER_NAME}-pc-trusted-ca-bundle strategy: ApplyOnce --- apiVersion: v1 diff --git a/templates/base/nutanix-ccm-secret.yaml b/templates/base-root/nutanix-ccm-secret.yaml similarity index 100% rename from templates/base/nutanix-ccm-secret.yaml rename to templates/base-root/nutanix-ccm-secret.yaml diff --git a/templates/base/nutanix-ccm.yaml b/templates/base-root/nutanix-ccm.yaml similarity index 97% rename from templates/base/nutanix-ccm.yaml rename to templates/base-root/nutanix-ccm.yaml index 2ef443ae5d..57e025a0b8 100644 --- a/templates/base/nutanix-ccm.yaml +++ b/templates/base-root/nutanix-ccm.yaml @@ -2,7 +2,7 @@ apiVersion: v1 kind: ConfigMap metadata: - name: user-ca-bundle + name: ${CLUSTER_NAME}-pc-trusted-ca-bundle namespace: kube-system binaryData: ca.crt: ${NUTANIX_ADDITIONAL_TRUST_BUNDLE=""} @@ -34,7 +34,7 @@ data: }, "additionalTrustBundle": { "kind": "ConfigMap", - "name": "user-ca-bundle", + "name": "${CLUSTER_NAME}-pc-trusted-ca-bundle", "namespace": "kube-system" } }, diff --git a/templates/base/secret.yaml b/templates/base-root/secret.yaml similarity index 91% rename from templates/base/secret.yaml rename to templates/base-root/secret.yaml index 89771a709d..5e52c4c36e 100644 --- a/templates/base/secret.yaml +++ b/templates/base-root/secret.yaml @@ -3,7 +3,6 @@ apiVersion: v1 kind: Secret metadata: name: "${CLUSTER_NAME}" - namespace: "${NAMESPACE}" stringData: credentials: | [ diff --git a/templates/base/ccm-patch.yaml b/templates/base/ccm-patch.yaml index 790de17905..4a7de496e9 100644 --- a/templates/base/ccm-patch.yaml +++ b/templates/base/ccm-patch.yaml @@ -2,7 +2,6 @@ apiVersion: controlplane.cluster.x-k8s.io/v1beta1 kind: KubeadmControlPlane metadata: name: "${CLUSTER_NAME}-kcp" - namespace: "${NAMESPACE}" spec: kubeadmConfigSpec: clusterConfiguration: @@ -21,23 +20,21 @@ spec: kubeletExtraArgs: cloud-provider: external --- +apiVersion: cluster.x-k8s.io/v1beta1 +kind: Cluster +metadata: + labels: + ccm: "nutanix" + name: "${CLUSTER_NAME}" +--- apiVersion: bootstrap.cluster.x-k8s.io/v1beta1 kind: KubeadmConfigTemplate metadata: name: "${CLUSTER_NAME}-kcfg-0" - namespace: "${NAMESPACE}" spec: template: spec: joinConfiguration: nodeRegistration: kubeletExtraArgs: - cloud-provider: external ---- -apiVersion: cluster.x-k8s.io/v1beta1 -kind: Cluster -metadata: - labels: - ccm: "nutanix" - name: "${CLUSTER_NAME}" - namespace: "${NAMESPACE}" + cloud-provider: external \ No newline at end of file diff --git a/templates/base/cluster-without-topology.yaml b/templates/base/cluster-without-topology.yaml new file mode 100644 index 0000000000..58f8690c47 --- /dev/null +++ b/templates/base/cluster-without-topology.yaml @@ -0,0 +1,21 @@ +apiVersion: cluster.x-k8s.io/v1beta1 +kind: Cluster +metadata: + labels: + cluster.x-k8s.io/cluster-name: "${CLUSTER_NAME}" + name: "${CLUSTER_NAME}" +spec: + clusterNetwork: + services: + cidrBlocks: ["172.19.0.0/16"] + pods: + cidrBlocks: ["172.20.0.0/16"] + serviceDomain: "cluster.local" + controlPlaneRef: + apiVersion: controlplane.cluster.x-k8s.io/v1beta1 + kind: KubeadmControlPlane + name: "${CLUSTER_NAME}-kcp" + infrastructureRef: + apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + kind: NutanixCluster + name: "${CLUSTER_NAME}" \ No newline at end of file diff --git a/templates/base/cluster-with-kcp.yaml b/templates/base/kcp.yaml similarity index 69% rename from templates/base/cluster-with-kcp.yaml rename to templates/base/kcp.yaml index f6ba2dc11b..9b1f38168d 100644 --- a/templates/base/cluster-with-kcp.yaml +++ b/templates/base/kcp.yaml @@ -1,52 +1,7 @@ -apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 -kind: NutanixCluster -metadata: - name: "${CLUSTER_NAME}" - namespace: "${NAMESPACE}" -spec: - prismCentral: - address: "${NUTANIX_ENDPOINT}" - port: ${NUTANIX_PORT=9440} - insecure: ${NUTANIX_INSECURE=false} - credentialRef: - name: "${CLUSTER_NAME}" - kind: Secret - additionalTrustBundle: - name: user-ca-bundle - kind: ConfigMap - controlPlaneEndpoint: - host: "${CONTROL_PLANE_ENDPOINT_IP}" - port: ${CONTROL_PLANE_ENDPOINT_PORT=6443} ---- -apiVersion: cluster.x-k8s.io/v1beta1 -kind: Cluster -metadata: - labels: - cluster.x-k8s.io/cluster-name: "${CLUSTER_NAME}" - name: "${CLUSTER_NAME}" - namespace: "${NAMESPACE}" -spec: - clusterNetwork: - services: - cidrBlocks: ["172.19.0.0/16"] - pods: - cidrBlocks: ["172.20.0.0/16"] - serviceDomain: "cluster.local" - controlPlaneRef: - apiVersion: controlplane.cluster.x-k8s.io/v1beta1 - kind: KubeadmControlPlane - name: "${CLUSTER_NAME}-kcp" - infrastructureRef: - apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 - kind: NutanixCluster - name: "${CLUSTER_NAME}" - ---- apiVersion: controlplane.cluster.x-k8s.io/v1beta1 kind: KubeadmControlPlane metadata: name: "${CLUSTER_NAME}-kcp" - namespace: "${NAMESPACE}" spec: replicas: ${CONTROL_PLANE_MACHINE_COUNT=1} version: ${KUBERNETES_VERSION} @@ -166,35 +121,4 @@ spec: - echo export KUBECONFIG=/etc/kubernetes/admin.conf >> /root/.bashrc - echo "after kubeadm call" > /var/log/postkubeadm.log useExperimentalRetryJoin: true - verbosity: 10 - ---- -apiVersion: bootstrap.cluster.x-k8s.io/v1beta1 -kind: KubeadmConfigTemplate -metadata: - name: "${CLUSTER_NAME}-kcfg-0" - namespace: "${NAMESPACE}" -spec: - template: - spec: - joinConfiguration: - nodeRegistration: - kubeletExtraArgs: - # We have to pin the cgroupDriver to cgroupfs as kubeadm >=1.21 defaults to systemd - # kind will implement systemd support in: https://github.com/kubernetes-sigs/kind/issues/1726 - #cgroup-driver: cgroupfs - eviction-hard: nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<15%,memory.available<100Mi,imagefs.inodesFree<10% - tls-cipher-suites: "${TLS_CIPHER_SUITES=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256}" - users: - - name: capiuser - lockPassword: false - sudo: ALL=(ALL) NOPASSWD:ALL - sshAuthorizedKeys: - - ${NUTANIX_SSH_AUTHORIZED_KEY} - preKubeadmCommands: - - echo "before kubeadm call" > /var/log/prekubeadm.log - - hostnamectl set-hostname "{{ ds.meta_data.hostname }}" - postKubeadmCommands: - - echo "after kubeadm call" > /var/log/postkubeadm.log - verbosity: 10 - #useExperimentalRetryJoin: true + verbosity: 10 \ No newline at end of file diff --git a/templates/base/kct.yaml b/templates/base/kct.yaml new file mode 100644 index 0000000000..ca472c987b --- /dev/null +++ b/templates/base/kct.yaml @@ -0,0 +1,25 @@ +apiVersion: bootstrap.cluster.x-k8s.io/v1beta1 +kind: KubeadmConfigTemplate +metadata: + name: "${CLUSTER_NAME}-kcfg-0" +spec: + template: + spec: + joinConfiguration: + nodeRegistration: + kubeletExtraArgs: + cloud-provider: external + eviction-hard: nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<15%,memory.available<100Mi,imagefs.inodesFree<10% + tls-cipher-suites: ${TLS_CIPHER_SUITES=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256} + postKubeadmCommands: + - echo "after kubeadm call" > /var/log/postkubeadm.log + preKubeadmCommands: + - echo "before kubeadm call" > /var/log/prekubeadm.log + - hostnamectl set-hostname "{{ ds.meta_data.hostname }}" + users: + - lockPassword: false + name: capiuser + sshAuthorizedKeys: + - ${NUTANIX_SSH_AUTHORIZED_KEY} + sudo: ALL=(ALL) NOPASSWD:ALL + verbosity: 10 diff --git a/templates/base/kustomization.yaml b/templates/base/kustomization.yaml index b4aa3b1508..384fb80031 100644 --- a/templates/base/kustomization.yaml +++ b/templates/base/kustomization.yaml @@ -1,22 +1,17 @@ apiVersion: kustomize.config.k8s.io/v1beta1 kind: Kustomization -configMapGenerator: -- name: nutanix-ccm - behavior: merge - files: - - nutanix-ccm.yaml +namespace: "${NAMESPACE}" bases: - - ./cluster-with-kcp.yaml - - ./secret.yaml - - ./cm.yaml + - ../base-root + - ./nutanix-cluster.yaml + - ./cluster-without-topology.yaml + - ./kcp.yaml + - ./kct.yaml - ./nmt.yaml - ./md.yaml - ./mhc.yaml - - ./nutanix-ccm-crs.yaml - - ./nutanix-ccm-secret.yaml patchesStrategicMerge: -- ccm-patch.yaml - +- ./ccm-patch.yaml \ No newline at end of file diff --git a/templates/base/md.yaml b/templates/base/md.yaml index b5efd60dab..a140c13dee 100644 --- a/templates/base/md.yaml +++ b/templates/base/md.yaml @@ -4,7 +4,6 @@ metadata: labels: cluster.x-k8s.io/cluster-name: ${CLUSTER_NAME} name: "${CLUSTER_NAME}-wmd" - namespace: "${NAMESPACE}" spec: clusterName: "${CLUSTER_NAME}" replicas: ${WORKER_MACHINE_COUNT} diff --git a/templates/base/mhc.yaml b/templates/base/mhc.yaml index 7c6077e84a..479101b284 100644 --- a/templates/base/mhc.yaml +++ b/templates/base/mhc.yaml @@ -2,7 +2,6 @@ apiVersion: cluster.x-k8s.io/v1beta1 kind: MachineHealthCheck metadata: name: "${CLUSTER_NAME}-mhc" - namespace: "${NAMESPACE}" spec: clusterName: "${CLUSTER_NAME}" maxUnhealthy: 40% diff --git a/templates/base/nmt.yaml b/templates/base/nmt.yaml index 90829f2a76..ac9e27adbc 100644 --- a/templates/base/nmt.yaml +++ b/templates/base/nmt.yaml @@ -3,7 +3,6 @@ apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 kind: NutanixMachineTemplate metadata: name: "${CLUSTER_NAME}-mt-0" - namespace: "${NAMESPACE}" spec: template: spec: diff --git a/templates/base/nutanix-cluster.yaml b/templates/base/nutanix-cluster.yaml new file mode 100644 index 0000000000..d57784565c --- /dev/null +++ b/templates/base/nutanix-cluster.yaml @@ -0,0 +1,18 @@ +apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +kind: NutanixCluster +metadata: + name: "${CLUSTER_NAME}" +spec: + prismCentral: + address: "${NUTANIX_ENDPOINT}" + port: ${NUTANIX_PORT=9440} + insecure: ${NUTANIX_INSECURE=false} + credentialRef: + name: "${CLUSTER_NAME}" + kind: Secret + additionalTrustBundle: + name: ${CLUSTER_NAME}-pc-trusted-ca-bundle + kind: ConfigMap + controlPlaneEndpoint: + host: "${CONTROL_PLANE_ENDPOINT_IP}" + port: ${CONTROL_PLANE_ENDPOINT_PORT=6443} \ No newline at end of file diff --git a/templates/cluster-template-clusterclass.yaml b/templates/cluster-template-clusterclass.yaml new file mode 100644 index 0000000000..8241698d8e --- /dev/null +++ b/templates/cluster-template-clusterclass.yaml @@ -0,0 +1,765 @@ +apiVersion: v1 +binaryData: + ca.crt: ${NUTANIX_ADDITIONAL_TRUST_BUNDLE=""} +kind: ConfigMap +metadata: + name: ${CLUSTER_NAME}-pc-trusted-ca-bundle + namespace: ${NAMESPACE} +--- +apiVersion: v1 +data: + nutanix-ccm.yaml: | + --- + apiVersion: v1 + kind: ConfigMap + metadata: + name: ${CLUSTER_NAME}-pc-trusted-ca-bundle + namespace: kube-system + binaryData: + ca.crt: ${NUTANIX_ADDITIONAL_TRUST_BUNDLE=""} + --- + # Source: nutanix-cloud-provider/templates/rbac.yaml + apiVersion: v1 + kind: ServiceAccount + metadata: + name: cloud-controller-manager + namespace: kube-system + --- + # Source: nutanix-cloud-provider/templates/cm.yaml + kind: ConfigMap + apiVersion: v1 + metadata: + name: nutanix-config + namespace: kube-system + data: + nutanix_config.json: |- + { + "prismCentral": { + "address": "${NUTANIX_ENDPOINT}", + "port": ${NUTANIX_PORT=9440}, + "insecure": ${NUTANIX_INSECURE=false}, + "credentialRef": { + "kind": "secret", + "name": "nutanix-creds", + "namespace": "kube-system" + }, + "additionalTrustBundle": { + "kind": "ConfigMap", + "name": "${CLUSTER_NAME}-pc-trusted-ca-bundle", + "namespace": "kube-system" + } + }, + "enableCustomLabeling": ${CCM_CUSTOM_LABEL=false}, + "topologyDiscovery": { + "type": "Prism" + } + } + --- + # Source: nutanix-cloud-provider/templates/rbac.yaml + apiVersion: rbac.authorization.k8s.io/v1 + kind: ClusterRole + metadata: + annotations: + rbac.authorization.kubernetes.io/autoupdate: "true" + name: system:cloud-controller-manager + rules: + - apiGroups: + - "" + resources: + - secrets + verbs: + - get + - list + - watch + - apiGroups: + - "" + resources: + - configmaps + verbs: + - get + - list + - watch + - apiGroups: + - "" + resources: + - events + verbs: + - create + - patch + - update + - apiGroups: + - "" + resources: + - nodes + verbs: + - "*" + - apiGroups: + - "" + resources: + - nodes/status + verbs: + - patch + - apiGroups: + - "" + resources: + - serviceaccounts + verbs: + - create + - apiGroups: + - "" + resources: + - endpoints + verbs: + - create + - get + - list + - watch + - update + - apiGroups: + - coordination.k8s.io + resources: + - leases + verbs: + - get + - list + - watch + - create + - update + - patch + - delete + --- + # Source: nutanix-cloud-provider/templates/rbac.yaml + kind: ClusterRoleBinding + apiVersion: rbac.authorization.k8s.io/v1 + metadata: + name: system:cloud-controller-manager + roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: system:cloud-controller-manager + subjects: + - kind: ServiceAccount + name: cloud-controller-manager + namespace: kube-system + --- + # Source: nutanix-cloud-provider/templates/cloud-provider-nutanix-deployment.yaml + apiVersion: apps/v1 + kind: Deployment + metadata: + labels: + k8s-app: nutanix-cloud-controller-manager + name: nutanix-cloud-controller-manager + namespace: kube-system + spec: + replicas: 1 + selector: + matchLabels: + k8s-app: nutanix-cloud-controller-manager + strategy: + type: Recreate + template: + metadata: + labels: + k8s-app: nutanix-cloud-controller-manager + spec: + hostNetwork: true + priorityClassName: system-cluster-critical + nodeSelector: + node-role.kubernetes.io/control-plane: "" + serviceAccountName: cloud-controller-manager + affinity: + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchLabels: + k8s-app: nutanix-cloud-controller-manager + topologyKey: kubernetes.io/hostname + dnsPolicy: Default + tolerations: + - effect: NoSchedule + key: node-role.kubernetes.io/master + operator: Exists + - effect: NoSchedule + key: node-role.kubernetes.io/control-plane + operator: Exists + - effect: NoExecute + key: node.kubernetes.io/unreachable + operator: Exists + tolerationSeconds: 120 + - effect: NoExecute + key: node.kubernetes.io/not-ready + operator: Exists + tolerationSeconds: 120 + - effect: NoSchedule + key: node.cloudprovider.kubernetes.io/uninitialized + operator: Exists + - effect: NoSchedule + key: node.kubernetes.io/not-ready + operator: Exists + containers: + - image: "${CCM_REPO=ghcr.io/nutanix-cloud-native/cloud-provider-nutanix/controller}:${CCM_TAG=v0.3.1}" + imagePullPolicy: IfNotPresent + name: nutanix-cloud-controller-manager + env: + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + args: + - "--leader-elect=true" + - "--cloud-config=/etc/cloud/nutanix_config.json" + resources: + requests: + cpu: 100m + memory: 50Mi + volumeMounts: + - mountPath: /etc/cloud + name: nutanix-config-volume + readOnly: true + volumes: + - name: nutanix-config-volume + configMap: + name: nutanix-config +kind: ConfigMap +metadata: + name: nutanix-ccm + namespace: ${NAMESPACE} +--- +apiVersion: v1 +kind: Secret +metadata: + name: ${CLUSTER_NAME} + namespace: ${NAMESPACE} +stringData: + credentials: "[\n {\n \"type\": \"basic_auth\", \n \"data\": { \n \"prismCentral\":{\n + \ \"username\": \"${NUTANIX_USER}\", \n \"password\": \"${NUTANIX_PASSWORD}\"\n + \ }\n }\n }\n]\n" +--- +apiVersion: v1 +kind: Secret +metadata: + name: nutanix-ccm-secret + namespace: ${NAMESPACE} +stringData: + nutanix-ccm-secret.yaml: "apiVersion: v1\nkind: Secret\nmetadata:\n name: nutanix-creds\n + \ namespace: kube-system\nstringData:\n credentials: |\n [\n {\n \"type\": + \"basic_auth\", \n \"data\": { \n \"prismCentral\":{\n \"username\": + \"${NUTANIX_USER}\",\n \"password\": \"${NUTANIX_PASSWORD}\"\n },\n + \ \"prismElements\": null\n }\n }\n ]\n" +type: addons.cluster.x-k8s.io/resource-set +--- +apiVersion: addons.cluster.x-k8s.io/v1beta1 +kind: ClusterResourceSet +metadata: + name: nutanix-ccm-crs + namespace: ${NAMESPACE} +spec: + clusterSelector: + matchLabels: + ccm: nutanix + resources: + - kind: ConfigMap + name: nutanix-ccm + - kind: Secret + name: nutanix-ccm-secret + - kind: ConfigMap + name: ${CLUSTER_NAME}-pc-trusted-ca-bundle + strategy: ApplyOnce +--- +apiVersion: bootstrap.cluster.x-k8s.io/v1beta1 +kind: KubeadmConfigTemplate +metadata: + name: ${CLUSTER_NAME}-kcfg-0 + namespace: ${NAMESPACE} +spec: + template: + spec: + joinConfiguration: + nodeRegistration: + kubeletExtraArgs: + cloud-provider: external + eviction-hard: nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<15%,memory.available<100Mi,imagefs.inodesFree<10% + tls-cipher-suites: ${TLS_CIPHER_SUITES=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256} + postKubeadmCommands: + - echo "after kubeadm call" > /var/log/postkubeadm.log + preKubeadmCommands: + - echo "before kubeadm call" > /var/log/prekubeadm.log + - hostnamectl set-hostname "{{ ds.meta_data.hostname }}" + verbosity: 10 +--- +apiVersion: cluster.x-k8s.io/v1beta1 +kind: ClusterClass +metadata: + name: ${CLUSTER_CLASS_NAME} + namespace: ${NAMESPACE} +spec: + controlPlane: + machineHealthCheck: + maxUnhealthy: 40% + nodeStartupTimeout: 10m + unhealthyConditions: + - status: "False" + timeout: 300s + type: Ready + - status: Unknown + timeout: 300s + type: Ready + - status: "True" + timeout: 300s + type: MemoryPressure + - status: "True" + timeout: 300s + type: DiskPressure + - status: "True" + timeout: 300s + type: PIDPressure + - status: "True" + timeout: 300s + type: NetworkUnavailable + machineInfrastructure: + ref: + apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + kind: NutanixMachineTemplate + name: ${CLUSTER_CLASS_NAME}-cp-nmt + namespace: ${NAMESPACE} + ref: + apiVersion: controlplane.cluster.x-k8s.io/v1beta1 + kind: KubeadmControlPlaneTemplate + name: ${CLUSTER_CLASS_NAME}-kcpt + namespace: ${NAMESPACE} + infrastructure: + ref: + apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + kind: NutanixClusterTemplate + name: ${CLUSTER_CLASS_NAME}-nct + namespace: ${NAMESPACE} + patches: + - definitions: + - jsonPatches: + - op: add + path: /spec/template/spec/kubeadmConfigSpec/users + valueFrom: + template: | + - name: capxuser + lockPassword: false + sudo: ALL=(ALL) NOPASSWD:ALL + sshAuthorizedKeys: + - '{{ .sshKey }}' + selector: + apiVersion: controlplane.cluster.x-k8s.io/v1beta1 + kind: KubeadmControlPlaneTemplate + matchResources: + controlPlane: true + - jsonPatches: + - op: add + path: /spec/template/spec/users + valueFrom: + template: | + - name: capxuser + lockPassword: false + sudo: ALL=(ALL) NOPASSWD:ALL + sshAuthorizedKeys: + - '{{ .sshKey }}' + selector: + apiVersion: bootstrap.cluster.x-k8s.io/v1beta1 + kind: KubeadmConfigTemplate + matchResources: + machineDeploymentClass: + names: + - ${CLUSTER_CLASS_NAME}-worker + name: add-ssh-user + - definitions: + - jsonPatches: + - op: add + path: /spec/template/spec/controlPlaneEndpoint + valueFrom: + template: | + host: '{{ .controlPlaneEndpoint.IP }}' + port: {{ .controlPlaneEndpoint.port }} + selector: + apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + kind: NutanixClusterTemplate + matchResources: + infrastructureCluster: true + name: update-control-plane-endpoint + - definitions: + - jsonPatches: + - op: add + path: /spec/template/spec/prismCentral + valueFrom: + template: | + address: '{{ .prismCentralEndpoint.address }}' + port: {{ .prismCentralEndpoint.port }} + insecure: {{ .prismCentralEndpoint.insecure }} + credentialRef: + name: "${CLUSTER_NAME}" + kind: Secret + additionalTrustBundle: + name: ${CLUSTER_NAME}-pc-trusted-ca-bundle + kind: ConfigMap + selector: + apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + kind: NutanixClusterTemplate + matchResources: + infrastructureCluster: true + name: add-pc-endpoint-and-creds + - definitions: + - jsonPatches: + - op: replace + path: /spec/template/spec/bootType + valueFrom: + variable: controlPlaneMachineDetails.bootType + - op: replace + path: /spec/template/spec/vcpusPerSocket + valueFrom: + variable: controlPlaneMachineDetails.vcpusPerSocket + - op: replace + path: /spec/template/spec/memorySize + valueFrom: + variable: controlPlaneMachineDetails.memorySize + - op: replace + path: /spec/template/spec/systemDiskSize + valueFrom: + variable: controlPlaneMachineDetails.systemDiskSize + - op: replace + path: /spec/template/spec/image/name + valueFrom: + variable: controlPlaneMachineDetails.imageName + - op: replace + path: /spec/template/spec/cluster/name + valueFrom: + variable: controlPlaneMachineDetails.clusterName + - op: replace + path: /spec/template/spec/subnet + valueFrom: + template: | + - type: name + name: {{ .controlPlaneMachineDetails.subnetName }} + selector: + apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + kind: NutanixMachineTemplate + matchResources: + controlPlane: true + name: update-control-plane-machine-template + - definitions: + - jsonPatches: + - op: replace + path: /spec/template/spec/bootType + valueFrom: + variable: workerMachineDetails.bootType + - op: replace + path: /spec/template/spec/vcpusPerSocket + valueFrom: + variable: workerMachineDetails.vcpusPerSocket + - op: replace + path: /spec/template/spec/memorySize + valueFrom: + variable: workerMachineDetails.memorySize + - op: replace + path: /spec/template/spec/systemDiskSize + valueFrom: + variable: workerMachineDetails.systemDiskSize + - op: replace + path: /spec/template/spec/image/name + valueFrom: + variable: workerMachineDetails.imageName + - op: replace + path: /spec/template/spec/cluster/name + valueFrom: + variable: workerMachineDetails.clusterName + - op: replace + path: /spec/template/spec/subnet + valueFrom: + template: | + - type: name + name: {{ .controlPlaneMachineDetails.subnetName }} + selector: + apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + kind: NutanixMachineTemplate + matchResources: + machineDeploymentClass: + names: + - ${CLUSTER_CLASS_NAME}-worker + name: update-worker-machine-template + variables: + - name: sshKey + required: true + schema: + openAPIV3Schema: + description: Public key to SSH onto the cluster nodes. + type: string + - name: controlPlaneEndpoint + required: true + schema: + openAPIV3Schema: + properties: + IP: + type: string + port: + type: integer + type: object + - name: prismCentralEndpoint + required: true + schema: + openAPIV3Schema: + properties: + address: + type: string + insecure: + type: boolean + port: + type: integer + type: object + - name: controlPlaneMachineDetails + required: true + schema: + openAPIV3Schema: + properties: + bootType: + type: string + clusterName: + type: string + imageName: + type: string + memorySize: + type: string + subnetName: + type: string + systemDiskSize: + type: string + vcpuSockets: + type: integer + vcpusPerSocket: + type: integer + type: object + - name: workerMachineDetails + required: true + schema: + openAPIV3Schema: + properties: + bootType: + type: string + clusterName: + type: string + imageName: + type: string + memorySize: + type: string + subnetName: + type: string + systemDiskSize: + type: string + vcpuSockets: + type: integer + vcpusPerSocket: + type: integer + type: object + workers: + machineDeployments: + - class: ${CLUSTER_CLASS_NAME}-worker + machineHealthCheck: + maxUnhealthy: 40% + nodeStartupTimeout: 10m + unhealthyConditions: + - status: "False" + timeout: 300s + type: Ready + - status: Unknown + timeout: 300s + type: Ready + - status: "True" + timeout: 300s + type: MemoryPressure + - status: "True" + timeout: 300s + type: DiskPressure + - status: "True" + timeout: 300s + type: PIDPressure + - status: "True" + timeout: 300s + type: NetworkUnavailable + template: + bootstrap: + ref: + apiVersion: bootstrap.cluster.x-k8s.io/v1beta1 + kind: KubeadmConfigTemplate + name: ${CLUSTER_NAME}-kcfg-0 + namespace: ${NAMESPACE} + infrastructure: + ref: + apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + kind: NutanixMachineTemplate + name: ${CLUSTER_CLASS_NAME}-md-nmt + namespace: ${NAMESPACE} +--- +apiVersion: controlplane.cluster.x-k8s.io/v1beta1 +kind: KubeadmControlPlaneTemplate +metadata: + name: ${CLUSTER_CLASS_NAME}-kcpt + namespace: ${NAMESPACE} +spec: + template: + spec: + kubeadmConfigSpec: + clusterConfiguration: + apiServer: + certSANs: + - localhost + - 127.0.0.1 + - 0.0.0.0 + extraArgs: + cloud-provider: external + tls-cipher-suites: ${TLS_CIPHER_SUITES=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256} + controllerManager: + extraArgs: + cloud-provider: external + enable-hostpath-provisioner: "true" + tls-cipher-suites: ${TLS_CIPHER_SUITES=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256} + scheduler: + extraArgs: + tls-cipher-suites: ${TLS_CIPHER_SUITES=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256} + files: + - content: | + apiVersion: v1 + kind: Pod + metadata: + name: kube-vip + namespace: kube-system + spec: + containers: + - name: kube-vip + image: ghcr.io/kube-vip/kube-vip:v0.6.4 + imagePullPolicy: IfNotPresent + args: + - manager + env: + - name: vip_arp + value: "true" + - name: address + value: "${CONTROL_PLANE_ENDPOINT_IP}" + - name: port + value: "${CONTROL_PLANE_ENDPOINT_PORT=6443}" + - name: vip_cidr + value: "32" + - name: cp_enable + value: "true" + - name: cp_namespace + value: kube-system + - name: vip_ddns + value: "false" + - name: vip_leaderelection + value: "true" + - name: vip_leaseduration + value: "15" + - name: vip_renewdeadline + value: "10" + - name: vip_retryperiod + value: "2" + - name: svc_enable + value: "${KUBEVIP_SVC_ENABLE=false}" + - name: lb_enable + value: "${KUBEVIP_LB_ENABLE=false}" + - name: enableServicesElection + value: "${KUBEVIP_SVC_ELECTION=false}" + securityContext: + capabilities: + add: + - NET_ADMIN + - SYS_TIME + - NET_RAW + volumeMounts: + - mountPath: /etc/kubernetes/admin.conf + name: kubeconfig + resources: {} + hostNetwork: true + hostAliases: + - hostnames: + - kubernetes + ip: 127.0.0.1 + volumes: + - name: kubeconfig + hostPath: + type: FileOrCreate + path: /etc/kubernetes/admin.conf + status: {} + owner: root:root + path: /etc/kubernetes/manifests/kube-vip.yaml + initConfiguration: + nodeRegistration: + kubeletExtraArgs: + cloud-provider: external + eviction-hard: nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<15%,memory.available<100Mi,imagefs.inodesFree<10% + tls-cipher-suites: ${TLS_CIPHER_SUITES=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256} + joinConfiguration: + nodeRegistration: + kubeletExtraArgs: + cloud-provider: external + eviction-hard: nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<15%,memory.available<100Mi,imagefs.inodesFree<10% + tls-cipher-suites: ${TLS_CIPHER_SUITES=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256} + postKubeadmCommands: + - echo export KUBECONFIG=/etc/kubernetes/admin.conf >> /root/.bashrc + - echo "after kubeadm call" > /var/log/postkubeadm.log + preKubeadmCommands: + - echo "before kubeadm call" > /var/log/prekubeadm.log + - hostnamectl set-hostname "{{ ds.meta_data.hostname }}" + useExperimentalRetryJoin: true + verbosity: 10 +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +kind: NutanixClusterTemplate +metadata: + name: ${CLUSTER_CLASS_NAME}-nct + namespace: ${NAMESPACE} +spec: + template: + spec: + failureDomains: [] +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +kind: NutanixMachineTemplate +metadata: + name: ${CLUSTER_CLASS_NAME}-cp-nmt + namespace: ${NAMESPACE} +spec: + template: + spec: + bootType: ${NUTANIX_MACHINE_BOOT_TYPE=legacy} + cluster: + name: ${NUTANIX_PRISM_ELEMENT_CLUSTER_NAME} + type: name + image: + name: ${NUTANIX_MACHINE_TEMPLATE_IMAGE_NAME} + type: name + memorySize: ${NUTANIX_MACHINE_MEMORY_SIZE=4Gi} + providerID: nutanix://${CLUSTER_NAME}-m1 + subnet: + - name: ${NUTANIX_SUBNET_NAME} + type: name + systemDiskSize: ${NUTANIX_SYSTEMDISK_SIZE=40Gi} + vcpuSockets: ${NUTANIX_MACHINE_VCPU_SOCKET=2} + vcpusPerSocket: ${NUTANIX_MACHINE_VCPU_PER_SOCKET=1} +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +kind: NutanixMachineTemplate +metadata: + name: ${CLUSTER_CLASS_NAME}-md-nmt + namespace: ${NAMESPACE} +spec: + template: + spec: + bootType: ${NUTANIX_MACHINE_BOOT_TYPE=legacy} + cluster: + name: ${NUTANIX_PRISM_ELEMENT_CLUSTER_NAME} + type: name + image: + name: ${NUTANIX_MACHINE_TEMPLATE_IMAGE_NAME} + type: name + memorySize: ${NUTANIX_MACHINE_MEMORY_SIZE=4Gi} + providerID: nutanix://${CLUSTER_NAME}-m1 + subnet: + - name: ${NUTANIX_SUBNET_NAME} + type: name + systemDiskSize: ${NUTANIX_SYSTEMDISK_SIZE=40Gi} + vcpuSockets: ${NUTANIX_MACHINE_VCPU_SOCKET=2} + vcpusPerSocket: ${NUTANIX_MACHINE_VCPU_PER_SOCKET=1} diff --git a/templates/cluster-template-csi.yaml b/templates/cluster-template-csi.yaml index 6163807ccc..12c0a7653e 100644 --- a/templates/cluster-template-csi.yaml +++ b/templates/cluster-template-csi.yaml @@ -3,7 +3,7 @@ binaryData: ca.crt: ${NUTANIX_ADDITIONAL_TRUST_BUNDLE=""} kind: ConfigMap metadata: - name: user-ca-bundle + name: ${CLUSTER_NAME}-pc-trusted-ca-bundle namespace: ${NAMESPACE} --- apiVersion: v1 @@ -13,7 +13,7 @@ data: apiVersion: v1 kind: ConfigMap metadata: - name: user-ca-bundle + name: ${CLUSTER_NAME}-pc-trusted-ca-bundle namespace: kube-system binaryData: ca.crt: ${NUTANIX_ADDITIONAL_TRUST_BUNDLE=""} @@ -45,7 +45,7 @@ data: }, "additionalTrustBundle": { "kind": "ConfigMap", - "name": "user-ca-bundle", + "name": "${CLUSTER_NAME}-pc-trusted-ca-bundle", "namespace": "kube-system" } }, @@ -223,6 +223,7 @@ data: kind: ConfigMap metadata: name: nutanix-ccm + namespace: ${NAMESPACE} --- apiVersion: v1 data: @@ -1557,6 +1558,7 @@ apiVersion: v1 kind: Secret metadata: name: nutanix-ccm-secret + namespace: ${NAMESPACE} stringData: nutanix-ccm-secret.yaml: "apiVersion: v1\nkind: Secret\nmetadata:\n name: nutanix-creds\n \ namespace: kube-system\nstringData:\n credentials: |\n [\n {\n \"type\": @@ -1569,6 +1571,7 @@ apiVersion: addons.cluster.x-k8s.io/v1beta1 kind: ClusterResourceSet metadata: name: nutanix-ccm-crs + namespace: ${NAMESPACE} spec: clusterSelector: matchLabels: @@ -1579,7 +1582,7 @@ spec: - kind: Secret name: nutanix-ccm-secret - kind: ConfigMap - name: user-ca-bundle + name: ${CLUSTER_NAME}-pc-trusted-ca-bundle strategy: ApplyOnce --- apiVersion: addons.cluster.x-k8s.io/v1beta1 @@ -1846,7 +1849,7 @@ spec: prismCentral: additionalTrustBundle: kind: ConfigMap - name: user-ca-bundle + name: ${CLUSTER_NAME}-pc-trusted-ca-bundle address: ${NUTANIX_ENDPOINT} credentialRef: kind: Secret diff --git a/templates/cluster-template-topology.yaml b/templates/cluster-template-topology.yaml new file mode 100644 index 0000000000..c20c6b2189 --- /dev/null +++ b/templates/cluster-template-topology.yaml @@ -0,0 +1,61 @@ +apiVersion: cluster.x-k8s.io/v1beta1 +kind: Cluster +metadata: + labels: + ccm: nutanix + cluster.x-k8s.io/cluster-name: ${CLUSTER_NAME} + name: ${CLUSTER_NAME} + namespace: ${NAMESPACE} +spec: + clusterNetwork: + pods: + cidrBlocks: + - 172.20.0.0/16 + serviceDomain: cluster.local + services: + cidrBlocks: + - 172.19.0.0/16 + topology: + class: ${CLUSTER_CLASS_NAME} + controlPlane: + metadata: {} + replicas: ${CONTROL_PLANE_MACHINE_COUNT} + variables: + - name: sshKey + value: ${NUTANIX_SSH_AUTHORIZED_KEY} + - name: controlPlaneEndpoint + value: + IP: ${CONTROL_PLANE_ENDPOINT_IP} + port: ${CONTROL_PLANE_ENDPOINT_PORT=6443} + - name: prismCentralEndpoint + value: + address: ${NUTANIX_ENDPOINT} + insecure: ${NUTANIX_INSECURE=false} + port: ${NUTANIX_PORT=9440} + - name: controlPlaneMachineDetails + value: + bootType: ${NUTANIX_MACHINE_BOOT_TYPE=legacy} + clusterName: ${NUTANIX_PRISM_ELEMENT_CLUSTER_NAME} + imageName: ${NUTANIX_MACHINE_TEMPLATE_IMAGE_NAME} + memorySize: ${NUTANIX_MACHINE_MEMORY_SIZE=4Gi} + subnetName: ${NUTANIX_SUBNET_NAME} + systemDiskSize: ${NUTANIX_SYSTEMDISK_SIZE=40Gi} + vcpuSockets: ${NUTANIX_MACHINE_VCPU_SOCKET=2} + vcpusPerSocket: ${NUTANIX_MACHINE_VCPU_PER_SOCKET=1} + - name: workerMachineDetails + value: + bootType: ${NUTANIX_MACHINE_BOOT_TYPE=legacy} + clusterName: ${NUTANIX_PRISM_ELEMENT_CLUSTER_NAME} + imageName: ${NUTANIX_MACHINE_TEMPLATE_IMAGE_NAME} + memorySize: ${NUTANIX_MACHINE_MEMORY_SIZE=4Gi} + subnetName: ${NUTANIX_SUBNET_NAME} + systemDiskSize: ${NUTANIX_SYSTEMDISK_SIZE=40Gi} + vcpuSockets: ${NUTANIX_MACHINE_VCPU_SOCKET=2} + vcpusPerSocket: ${NUTANIX_MACHINE_VCPU_PER_SOCKET=1} + version: ${KUBERNETES_VERSION} + workers: + machineDeployments: + - class: ${CLUSTER_CLASS_NAME}-worker + metadata: {} + name: md-0 + replicas: ${WORKER_MACHINE_COUNT} diff --git a/templates/cluster-template.yaml b/templates/cluster-template.yaml index c83529564d..94db15e18e 100644 --- a/templates/cluster-template.yaml +++ b/templates/cluster-template.yaml @@ -3,7 +3,7 @@ binaryData: ca.crt: ${NUTANIX_ADDITIONAL_TRUST_BUNDLE=""} kind: ConfigMap metadata: - name: user-ca-bundle + name: ${CLUSTER_NAME}-pc-trusted-ca-bundle namespace: ${NAMESPACE} --- apiVersion: v1 @@ -13,7 +13,7 @@ data: apiVersion: v1 kind: ConfigMap metadata: - name: user-ca-bundle + name: ${CLUSTER_NAME}-pc-trusted-ca-bundle namespace: kube-system binaryData: ca.crt: ${NUTANIX_ADDITIONAL_TRUST_BUNDLE=""} @@ -45,7 +45,7 @@ data: }, "additionalTrustBundle": { "kind": "ConfigMap", - "name": "user-ca-bundle", + "name": "${CLUSTER_NAME}-pc-trusted-ca-bundle", "namespace": "kube-system" } }, @@ -223,6 +223,7 @@ data: kind: ConfigMap metadata: name: nutanix-ccm + namespace: ${NAMESPACE} --- apiVersion: v1 kind: Secret @@ -238,6 +239,7 @@ apiVersion: v1 kind: Secret metadata: name: nutanix-ccm-secret + namespace: ${NAMESPACE} stringData: nutanix-ccm-secret.yaml: "apiVersion: v1\nkind: Secret\nmetadata:\n name: nutanix-creds\n \ namespace: kube-system\nstringData:\n credentials: |\n [\n {\n \"type\": @@ -250,6 +252,7 @@ apiVersion: addons.cluster.x-k8s.io/v1beta1 kind: ClusterResourceSet metadata: name: nutanix-ccm-crs + namespace: ${NAMESPACE} spec: clusterSelector: matchLabels: @@ -260,7 +263,7 @@ spec: - kind: Secret name: nutanix-ccm-secret - kind: ConfigMap - name: user-ca-bundle + name: ${CLUSTER_NAME}-pc-trusted-ca-bundle strategy: ApplyOnce --- apiVersion: bootstrap.cluster.x-k8s.io/v1beta1 @@ -513,7 +516,7 @@ spec: prismCentral: additionalTrustBundle: kind: ConfigMap - name: user-ca-bundle + name: ${CLUSTER_NAME}-pc-trusted-ca-bundle address: ${NUTANIX_ENDPOINT} credentialRef: kind: Secret diff --git a/templates/clusterclass/clusterclass.yaml b/templates/clusterclass/clusterclass.yaml new file mode 100644 index 0000000000..93aa08fb55 --- /dev/null +++ b/templates/clusterclass/clusterclass.yaml @@ -0,0 +1,304 @@ +apiVersion: cluster.x-k8s.io/v1beta1 +kind: ClusterClass +metadata: + name: "${CLUSTER_CLASS_NAME}" +spec: + controlPlane: + ref: + apiVersion: controlplane.cluster.x-k8s.io/v1beta1 + kind: KubeadmControlPlaneTemplate + name: ${CLUSTER_CLASS_NAME}-kcpt + namespace: ${NAMESPACE} + machineInfrastructure: + ref: + kind: NutanixMachineTemplate + apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + name: ${CLUSTER_CLASS_NAME}-cp-nmt + namespace: ${NAMESPACE} + machineHealthCheck: + maxUnhealthy: 40% + nodeStartupTimeout: 10m + unhealthyConditions: + - type: Ready + status: "False" + timeout: 300s + - type: Ready + status: Unknown + timeout: 300s + - type: MemoryPressure + status: "True" + timeout: 300s + - type: DiskPressure + status: "True" + timeout: 300s + - type: PIDPressure + status: "True" + timeout: 300s + - type: NetworkUnavailable + status: "True" + timeout: 300s + workers: + machineDeployments: + - class: ${CLUSTER_CLASS_NAME}-worker + template: + bootstrap: + ref: + apiVersion: bootstrap.cluster.x-k8s.io/v1beta1 + kind: KubeadmConfigTemplate + name: "${CLUSTER_NAME}-kcfg-0" + namespace: ${NAMESPACE} + infrastructure: + ref: + apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + kind: NutanixMachineTemplate + name: ${CLUSTER_CLASS_NAME}-md-nmt + namespace: ${NAMESPACE} + machineHealthCheck: + maxUnhealthy: 40% + nodeStartupTimeout: 10m + unhealthyConditions: + - type: Ready + status: "False" + timeout: 300s + - type: Ready + status: Unknown + timeout: 300s + - type: MemoryPressure + status: "True" + timeout: 300s + - type: DiskPressure + status: "True" + timeout: 300s + - type: PIDPressure + status: "True" + timeout: 300s + - type: NetworkUnavailable + status: "True" + timeout: 300s + infrastructure: + ref: + apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + kind: NutanixClusterTemplate + name: ${CLUSTER_CLASS_NAME}-nct + namespace: ${NAMESPACE} + patches: + - name: add-ssh-user + definitions: + - selector: + apiVersion: controlplane.cluster.x-k8s.io/v1beta1 + kind: KubeadmControlPlaneTemplate + matchResources: + controlPlane: true + jsonPatches: + - op: add + path: /spec/template/spec/kubeadmConfigSpec/users + valueFrom: + template: | + - name: capxuser + lockPassword: false + sudo: ALL=(ALL) NOPASSWD:ALL + sshAuthorizedKeys: + - '{{ .sshKey }}' + - selector: + apiVersion: bootstrap.cluster.x-k8s.io/v1beta1 + kind: KubeadmConfigTemplate + matchResources: + machineDeploymentClass: + names: + - ${CLUSTER_CLASS_NAME}-worker + jsonPatches: + - op: add + path: /spec/template/spec/users + valueFrom: + template: | + - name: capxuser + lockPassword: false + sudo: ALL=(ALL) NOPASSWD:ALL + sshAuthorizedKeys: + - '{{ .sshKey }}' + - name: update-control-plane-endpoint + definitions: + - selector: + apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + kind: NutanixClusterTemplate + matchResources: + infrastructureCluster: true + jsonPatches: + - op: add + path: /spec/template/spec/controlPlaneEndpoint + valueFrom: + template: | + host: '{{ .controlPlaneEndpoint.IP }}' + port: {{ .controlPlaneEndpoint.port }} + - name: add-pc-endpoint-and-creds + definitions: + - selector: + apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + kind: NutanixClusterTemplate + matchResources: + infrastructureCluster: true + jsonPatches: + - op: add + path: /spec/template/spec/prismCentral + valueFrom: + template: | + address: '{{ .prismCentralEndpoint.address }}' + port: {{ .prismCentralEndpoint.port }} + insecure: {{ .prismCentralEndpoint.insecure }} + credentialRef: + name: "${CLUSTER_NAME}" + kind: Secret + additionalTrustBundle: + name: ${CLUSTER_NAME}-pc-trusted-ca-bundle + kind: ConfigMap + - name: update-control-plane-machine-template + definitions: + - selector: + apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + kind: NutanixMachineTemplate + matchResources: + controlPlane: true + jsonPatches: + - op: replace + path: /spec/template/spec/bootType + valueFrom: + variable: controlPlaneMachineDetails.bootType + - op: replace + path: /spec/template/spec/vcpusPerSocket + valueFrom: + variable: controlPlaneMachineDetails.vcpusPerSocket + - op: replace + path: /spec/template/spec/memorySize + valueFrom: + variable: controlPlaneMachineDetails.memorySize + - op: replace + path: /spec/template/spec/systemDiskSize + valueFrom: + variable: controlPlaneMachineDetails.systemDiskSize + - op: replace + path: /spec/template/spec/image/name + valueFrom: + variable: controlPlaneMachineDetails.imageName + - op: replace + path: /spec/template/spec/cluster/name + valueFrom: + variable: controlPlaneMachineDetails.clusterName + - op: replace + path: /spec/template/spec/subnet + valueFrom: + template: | + - type: name + name: {{ .controlPlaneMachineDetails.subnetName }} + - name: update-worker-machine-template + definitions: + - selector: + apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + kind: NutanixMachineTemplate + matchResources: + machineDeploymentClass: + names: + - ${CLUSTER_CLASS_NAME}-worker + jsonPatches: + - op: replace + path: /spec/template/spec/bootType + valueFrom: + variable: workerMachineDetails.bootType + - op: replace + path: /spec/template/spec/vcpusPerSocket + valueFrom: + variable: workerMachineDetails.vcpusPerSocket + - op: replace + path: /spec/template/spec/memorySize + valueFrom: + variable: workerMachineDetails.memorySize + - op: replace + path: /spec/template/spec/systemDiskSize + valueFrom: + variable: workerMachineDetails.systemDiskSize + - op: replace + path: /spec/template/spec/image/name + valueFrom: + variable: workerMachineDetails.imageName + - op: replace + path: /spec/template/spec/cluster/name + valueFrom: + variable: workerMachineDetails.clusterName + - op: replace + path: /spec/template/spec/subnet + valueFrom: + template: | + - type: name + name: {{ .controlPlaneMachineDetails.subnetName }} + variables: + - name: sshKey + required: true + schema: + openAPIV3Schema: + description: Public key to SSH onto the cluster nodes. + type: string + - name: controlPlaneEndpoint + required: true + schema: + openAPIV3Schema: + properties: + IP: + type: string + port: + type: integer + type: object + - name: prismCentralEndpoint + required: true + schema: + openAPIV3Schema: + properties: + address: + type: string + port: + type: integer + insecure: + type: boolean + type: object + - name: controlPlaneMachineDetails + required: true + schema: + openAPIV3Schema: + properties: + bootType: + type: string + vcpusPerSocket: + type: integer + vcpuSockets: + type: integer + memorySize: + type: string + systemDiskSize: + type: string + imageName: + type: string + clusterName: + type: string + subnetName: + type: string + type: object + - name: workerMachineDetails + required: true + schema: + openAPIV3Schema: + properties: + bootType: + type: string + vcpusPerSocket: + type: integer + vcpuSockets: + type: integer + memorySize: + type: string + systemDiskSize: + type: string + imageName: + type: string + clusterName: + type: string + subnetName: + type: string + type: object \ No newline at end of file diff --git a/templates/clusterclass/kcpt.yaml b/templates/clusterclass/kcpt.yaml new file mode 100644 index 0000000000..76f5d36e7f --- /dev/null +++ b/templates/clusterclass/kcpt.yaml @@ -0,0 +1,111 @@ +apiVersion: controlplane.cluster.x-k8s.io/v1beta1 +kind: KubeadmControlPlaneTemplate +metadata: + name: "${CLUSTER_CLASS_NAME}-kcpt" +spec: + template: + spec: + kubeadmConfigSpec: + clusterConfiguration: + apiServer: + certSANs: + - localhost + - 127.0.0.1 + - 0.0.0.0 + extraArgs: + cloud-provider: external + tls-cipher-suites: ${TLS_CIPHER_SUITES=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256} + controllerManager: + extraArgs: + cloud-provider: external + enable-hostpath-provisioner: "true" + tls-cipher-suites: ${TLS_CIPHER_SUITES=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256} + scheduler: + extraArgs: + tls-cipher-suites: ${TLS_CIPHER_SUITES=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256} + files: + - content: | + apiVersion: v1 + kind: Pod + metadata: + name: kube-vip + namespace: kube-system + spec: + containers: + - name: kube-vip + image: ghcr.io/kube-vip/kube-vip:v0.6.4 + imagePullPolicy: IfNotPresent + args: + - manager + env: + - name: vip_arp + value: "true" + - name: address + value: "${CONTROL_PLANE_ENDPOINT_IP}" + - name: port + value: "${CONTROL_PLANE_ENDPOINT_PORT=6443}" + - name: vip_cidr + value: "32" + - name: cp_enable + value: "true" + - name: cp_namespace + value: kube-system + - name: vip_ddns + value: "false" + - name: vip_leaderelection + value: "true" + - name: vip_leaseduration + value: "15" + - name: vip_renewdeadline + value: "10" + - name: vip_retryperiod + value: "2" + - name: svc_enable + value: "${KUBEVIP_SVC_ENABLE=false}" + - name: lb_enable + value: "${KUBEVIP_LB_ENABLE=false}" + - name: enableServicesElection + value: "${KUBEVIP_SVC_ELECTION=false}" + securityContext: + capabilities: + add: + - NET_ADMIN + - SYS_TIME + - NET_RAW + volumeMounts: + - mountPath: /etc/kubernetes/admin.conf + name: kubeconfig + resources: {} + hostNetwork: true + hostAliases: + - hostnames: + - kubernetes + ip: 127.0.0.1 + volumes: + - name: kubeconfig + hostPath: + type: FileOrCreate + path: /etc/kubernetes/admin.conf + status: {} + owner: root:root + path: /etc/kubernetes/manifests/kube-vip.yaml + initConfiguration: + nodeRegistration: + kubeletExtraArgs: + cloud-provider: external + eviction-hard: nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<15%,memory.available<100Mi,imagefs.inodesFree<10% + tls-cipher-suites: ${TLS_CIPHER_SUITES=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256} + joinConfiguration: + nodeRegistration: + kubeletExtraArgs: + cloud-provider: external + eviction-hard: nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<15%,memory.available<100Mi,imagefs.inodesFree<10% + tls-cipher-suites: ${TLS_CIPHER_SUITES=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256} + postKubeadmCommands: + - echo export KUBECONFIG=/etc/kubernetes/admin.conf >> /root/.bashrc + - echo "after kubeadm call" > /var/log/postkubeadm.log + preKubeadmCommands: + - echo "before kubeadm call" > /var/log/prekubeadm.log + - hostnamectl set-hostname "{{ ds.meta_data.hostname }}" + useExperimentalRetryJoin: true + verbosity: 10 \ No newline at end of file diff --git a/templates/clusterclass/kct.yaml b/templates/clusterclass/kct.yaml new file mode 100644 index 0000000000..74fc727819 --- /dev/null +++ b/templates/clusterclass/kct.yaml @@ -0,0 +1,19 @@ +apiVersion: bootstrap.cluster.x-k8s.io/v1beta1 +kind: KubeadmConfigTemplate +metadata: + name: "${CLUSTER_NAME}-kcfg-0" +spec: + template: + spec: + joinConfiguration: + nodeRegistration: + kubeletExtraArgs: + cloud-provider: external + eviction-hard: nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<15%,memory.available<100Mi,imagefs.inodesFree<10% + tls-cipher-suites: ${TLS_CIPHER_SUITES=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256} + postKubeadmCommands: + - echo "after kubeadm call" > /var/log/postkubeadm.log + preKubeadmCommands: + - echo "before kubeadm call" > /var/log/prekubeadm.log + - hostnamectl set-hostname "{{ ds.meta_data.hostname }}" + verbosity: 10 diff --git a/templates/clusterclass/kustomization.yaml b/templates/clusterclass/kustomization.yaml new file mode 100644 index 0000000000..9f88a8f1b4 --- /dev/null +++ b/templates/clusterclass/kustomization.yaml @@ -0,0 +1,13 @@ +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization + +namespace: "${NAMESPACE}" + +bases: + - ../base-root + - ./nct.yaml + - ./clusterclass.yaml + - ./nmt-cp.yaml + - ./nmt-md.yaml + - ./kcpt.yaml + - ./kct.yaml \ No newline at end of file diff --git a/templates/clusterclass/nct.yaml b/templates/clusterclass/nct.yaml new file mode 100644 index 0000000000..89fe7037d1 --- /dev/null +++ b/templates/clusterclass/nct.yaml @@ -0,0 +1,8 @@ +apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +kind: NutanixClusterTemplate +metadata: + name: ${CLUSTER_CLASS_NAME}-nct +spec: + template: + spec: + failureDomains: [] \ No newline at end of file diff --git a/templates/clusterclass/nmt-cp.yaml b/templates/clusterclass/nmt-cp.yaml new file mode 100644 index 0000000000..def651f367 --- /dev/null +++ b/templates/clusterclass/nmt-cp.yaml @@ -0,0 +1,38 @@ +apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +kind: NutanixMachineTemplate +metadata: + name: "${CLUSTER_CLASS_NAME}-cp-nmt" +spec: + template: + spec: + providerID: "nutanix://${CLUSTER_NAME}-m1" + # Supported options for boot type: legacy and uefi + # Defaults to legacy if not set + bootType: ${NUTANIX_MACHINE_BOOT_TYPE=legacy} + vcpusPerSocket: ${NUTANIX_MACHINE_VCPU_PER_SOCKET=1} + vcpuSockets: ${NUTANIX_MACHINE_VCPU_SOCKET=2} + memorySize: "${NUTANIX_MACHINE_MEMORY_SIZE=4Gi}" + systemDiskSize: "${NUTANIX_SYSTEMDISK_SIZE=40Gi}" + image: + type: name + name: "${NUTANIX_MACHINE_TEMPLATE_IMAGE_NAME}" + cluster: + type: name + name: "${NUTANIX_PRISM_ELEMENT_CLUSTER_NAME}" + subnet: + - type: name + name: "${NUTANIX_SUBNET_NAME}" + # Adds additional categories to the virtual machines. + # Note: Categories must already be present in Prism Central + # additionalCategories: + # - key: AppType + # value: Kubernetes + # Adds the cluster virtual machines to a project defined in Prism Central. + # Replace NUTANIX_PROJECT_NAME with the correct project defined in Prism Central + # Note: Project must already be present in Prism Central. + # project: + # type: name + # name: "NUTANIX_PROJECT_NAME" + # gpus: + # - type: name + # name: "GPU NAME" \ No newline at end of file diff --git a/templates/clusterclass/nmt-md.yaml b/templates/clusterclass/nmt-md.yaml new file mode 100644 index 0000000000..6b32870568 --- /dev/null +++ b/templates/clusterclass/nmt-md.yaml @@ -0,0 +1,38 @@ +apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +kind: NutanixMachineTemplate +metadata: + name: "${CLUSTER_CLASS_NAME}-md-nmt" +spec: + template: + spec: + providerID: "nutanix://${CLUSTER_NAME}-m1" + # Supported options for boot type: legacy and uefi + # Defaults to legacy if not set + bootType: ${NUTANIX_MACHINE_BOOT_TYPE=legacy} + vcpusPerSocket: ${NUTANIX_MACHINE_VCPU_PER_SOCKET=1} + vcpuSockets: ${NUTANIX_MACHINE_VCPU_SOCKET=2} + memorySize: "${NUTANIX_MACHINE_MEMORY_SIZE=4Gi}" + systemDiskSize: "${NUTANIX_SYSTEMDISK_SIZE=40Gi}" + image: + type: name + name: "${NUTANIX_MACHINE_TEMPLATE_IMAGE_NAME}" + cluster: + type: name + name: "${NUTANIX_PRISM_ELEMENT_CLUSTER_NAME}" + subnet: + - type: name + name: "${NUTANIX_SUBNET_NAME}" + # Adds additional categories to the virtual machines. + # Note: Categories must already be present in Prism Central + # additionalCategories: + # - key: AppType + # value: Kubernetes + # Adds the cluster virtual machines to a project defined in Prism Central. + # Replace NUTANIX_PROJECT_NAME with the correct project defined in Prism Central + # Note: Project must already be present in Prism Central. + # project: + # type: name + # name: "NUTANIX_PROJECT_NAME" + # gpus: + # - type: name + # name: "GPU NAME" \ No newline at end of file diff --git a/templates/topology/ccm-patch.yaml b/templates/topology/ccm-patch.yaml new file mode 100644 index 0000000000..ef1b91dfa1 --- /dev/null +++ b/templates/topology/ccm-patch.yaml @@ -0,0 +1,6 @@ +apiVersion: cluster.x-k8s.io/v1beta1 +kind: Cluster +metadata: + labels: + ccm: "nutanix" + name: "${CLUSTER_NAME}" \ No newline at end of file diff --git a/templates/topology/cluster-with-topology.yaml b/templates/topology/cluster-with-topology.yaml new file mode 100644 index 0000000000..e31b37f84e --- /dev/null +++ b/templates/topology/cluster-with-topology.yaml @@ -0,0 +1,57 @@ +apiVersion: cluster.x-k8s.io/v1beta1 +kind: Cluster +metadata: + name: "${CLUSTER_NAME}" + labels: + cluster.x-k8s.io/cluster-name: "${CLUSTER_NAME}" +spec: + clusterNetwork: + services: + cidrBlocks: ["172.19.0.0/16"] + pods: + cidrBlocks: ["172.20.0.0/16"] + serviceDomain: "cluster.local" + topology: + class: "${CLUSTER_CLASS_NAME}" + version: ${KUBERNETES_VERSION} + controlPlane: + replicas: ${CONTROL_PLANE_MACHINE_COUNT} + metadata: {} + workers: + machineDeployments: + - class: ${CLUSTER_CLASS_NAME}-worker + name: md-0 + replicas: ${WORKER_MACHINE_COUNT} + metadata: {} + variables: + - name: sshKey + value: '${NUTANIX_SSH_AUTHORIZED_KEY}' + - name: controlPlaneEndpoint + value: + IP: "${CONTROL_PLANE_ENDPOINT_IP}" + port: ${CONTROL_PLANE_ENDPOINT_PORT=6443} + - name: prismCentralEndpoint + value: + address: "${NUTANIX_ENDPOINT}" + port: ${NUTANIX_PORT=9440} + insecure: ${NUTANIX_INSECURE=false} + - name: controlPlaneMachineDetails + value: + bootType: ${NUTANIX_MACHINE_BOOT_TYPE=legacy} + vcpusPerSocket: ${NUTANIX_MACHINE_VCPU_PER_SOCKET=1} + vcpuSockets: ${NUTANIX_MACHINE_VCPU_SOCKET=2} + memorySize: "${NUTANIX_MACHINE_MEMORY_SIZE=4Gi}" + systemDiskSize: "${NUTANIX_SYSTEMDISK_SIZE=40Gi}" + imageName: "${NUTANIX_MACHINE_TEMPLATE_IMAGE_NAME}" + clusterName: "${NUTANIX_PRISM_ELEMENT_CLUSTER_NAME}" + subnetName: "${NUTANIX_SUBNET_NAME}" + - name: workerMachineDetails + value: + bootType: ${NUTANIX_MACHINE_BOOT_TYPE=legacy} + vcpusPerSocket: ${NUTANIX_MACHINE_VCPU_PER_SOCKET=1} + vcpuSockets: ${NUTANIX_MACHINE_VCPU_SOCKET=2} + memorySize: "${NUTANIX_MACHINE_MEMORY_SIZE=4Gi}" + systemDiskSize: "${NUTANIX_SYSTEMDISK_SIZE=40Gi}" + imageName: "${NUTANIX_MACHINE_TEMPLATE_IMAGE_NAME}" + clusterName: "${NUTANIX_PRISM_ELEMENT_CLUSTER_NAME}" + subnetName: "${NUTANIX_SUBNET_NAME}" \ No newline at end of file diff --git a/templates/topology/kustomization.yaml b/templates/topology/kustomization.yaml new file mode 100644 index 0000000000..cf233ad32a --- /dev/null +++ b/templates/topology/kustomization.yaml @@ -0,0 +1,10 @@ +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization + +namespace: "${NAMESPACE}" + +bases: + - ./cluster-with-topology.yaml + +patchesStrategicMerge: +- ./ccm-patch.yaml \ No newline at end of file diff --git a/test-cluster-with-topology.mk b/test-cluster-with-topology.mk new file mode 100644 index 0000000000..6843b4d3ef --- /dev/null +++ b/test-cluster-with-topology.mk @@ -0,0 +1,43 @@ +TEST_NAMESPACE=ns-topology +TEST_CLUSTER_CLASS_NAME=my-clusterclass +TEST_TOPOLOGY_CLUSTER_NAME=mycluster-with-topology + +.PHONY: test-cc-cluster-create +test-cc-cluster-create: cluster-templates ## Create a workload cluster with topology (which uses clusterclass) + clusterctl generate cluster ${TEST_CLUSTER_CLASS_NAME} --from ./templates/cluster-template-clusterclass.yaml -n $(TEST_NAMESPACE) > ${TEST_CLUSTER_CLASS_NAME}.yaml + clusterctl generate cluster ${TEST_TOPOLOGY_CLUSTER_NAME} --from ./templates/cluster-template-topology.yaml -n $(TEST_NAMESPACE) > ${TEST_TOPOLOGY_CLUSTER_NAME}.yaml + kubectl create ns $(TEST_NAMESPACE) --dry-run=client -oyaml | kubectl apply --server-side -f - + kubectl apply --server-side -f ./${TEST_CLUSTER_CLASS_NAME}.yaml + kubectl apply --server-side -f ./${TEST_TOPOLOGY_CLUSTER_NAME}.yaml + +.PHONY: test-cc-cluster-upgrade +test-cc-cluster-upgrade: ## Upgrade K8S version of cluster with topology (which uses clusterclass) + clusterctl generate cluster ${TEST_TOPOLOGY_CLUSTER_NAME} --from ./templates/cluster-template-topology.yaml -n $(TEST_NAMESPACE) --kubernetes-version=${UPGRADE_K8S_VERSION_TO} > ${TEST_TOPOLOGY_CLUSTER_NAME}.yaml + kubectl apply --server-side -f ./${TEST_TOPOLOGY_CLUSTER_NAME}.yaml + +.PHONY: test-cc-cluster-delete +test-cc-cluster-delete: ## Delete workload cluster with topology + kubectl -n $(TEST_NAMESPACE) delete cluster ${TEST_TOPOLOGY_CLUSTER_NAME} --ignore-not-found + kubectl -n $(TEST_NAMESPACE) delete secret ${TEST_TOPOLOGY_CLUSTER_NAME} --ignore-not-found + kubectl -n $(TEST_NAMESPACE) delete cm ${TEST_TOPOLOGY_CLUSTER_NAME}-pc-trusted-ca-bundle --ignore-not-found + rm -f ${TEST_TOPOLOGY_CLUSTER_NAME}.yaml + rm -f ${TEST_CLUSTER_CLASS_NAME}.yaml + +.PHONY: generate-cc-cluster-kubeconfig +generate-cc-cluster-kubeconfig: ## generate kubeconfig of workload cluster with topology + kubectl -n ${TEST_NAMESPACE} get secret ${TEST_TOPOLOGY_CLUSTER_NAME}-kubeconfig -o json | jq -r .data.value | base64 --decode > ${TEST_TOPOLOGY_CLUSTER_NAME}.workload.kubeconfig + +.PHONY: test-cc-cluster-install-cni +test-cc-cluster-install-cni: generate-cc-cluster-kubeconfig ## install cni on workload cluster with topology + kubectl --kubeconfig ./${TEST_TOPOLOGY_CLUSTER_NAME}.workload.kubeconfig apply -f https://raw.githubusercontent.com/nutanix-cloud-native/cluster-api-provider-nutanix/main/test/e2e/data/cni/calico/calico.yaml + +.PHONY: list-cc-cluster-resources +list-cc-cluster-resources: generate-cc-cluster-kubeconfig ## list resources of workload cluster with topology + kubectl -n capx-system get endpoints + kubectl get crd | grep nutanix + kubectl get cluster-api -A + clusterctl describe cluster ${TEST_TOPOLOGY_CLUSTER_NAME} -n ${TEST_NAMESPACE} + kubectl -n $(TEST_NAMESPACE) get Cluster,NutanixCluster,Machine,NutanixMachine,KubeAdmControlPlane,machinedeployments,MachineHealthCheck,nodes + kubectl get ValidatingWebhookConfiguration,MutatingWebhookConfiguration -A + kubectl --kubeconfig ./${TEST_TOPOLOGY_CLUSTER_NAME}.workload.kubeconfig get nodes,ns + kubectl --kubeconfig ./${TEST_TOPOLOGY_CLUSTER_NAME}.workload.kubeconfig get pods -A \ No newline at end of file diff --git a/test-cluster-without-topology.mk b/test-cluster-without-topology.mk new file mode 100644 index 0000000000..402f5e0e4e --- /dev/null +++ b/test-cluster-without-topology.mk @@ -0,0 +1,36 @@ +TEST_NAMESPACE=ns-without-topology +TEST_CLUSTER_NAME=mycluster-without-topology + +.PHONY: test-cluster-create +test-cluster-create: $(CLUSTERCTL) ## Create a workload cluster without topology + $(CLUSTERCTL) version + $(CLUSTERCTL) config repositories | grep nutanix + $(CLUSTERCTL) generate cluster ${TEST_CLUSTER_NAME} -i nutanix:${LOCAL_PROVIDER_VERSION} --list-variables -v 10 + $(CLUSTERCTL) generate cluster ${TEST_CLUSTER_NAME} -i nutanix:${LOCAL_PROVIDER_VERSION} --target-namespace ${TEST_NAMESPACE} -v 10 > ./${TEST_CLUSTER_NAME}.yaml + kubectl create ns $(TEST_NAMESPACE) --dry-run=client -oyaml | kubectl apply --server-side -f - + kubectl apply --server-side -f ./${TEST_CLUSTER_NAME}.yaml + +.PHONY: test-cluster-delete +test-cluster-delete: ## Delete workload cluster without topology + kubectl -n ${TEST_NAMESPACE} delete cluster ${TEST_CLUSTER_NAME} --ignore-not-found + +.PHONY: generate-cluster-kubeconfig +generate-cluster-kubeconfig: ## generate kubeconfig of workload cluster without topology + kubectl -n $(TEST_NAMESPACE) get secret + kubectl -n ${TEST_NAMESPACE} get secret ${TEST_CLUSTER_NAME}-kubeconfig -o json | jq -r .data.value | base64 --decode > ${TEST_CLUSTER_NAME}.workload.kubeconfig + +.PHONY: test-cluster-install-cni +test-cluster-install-cni: generate-cc-cluster-kubeconfig ## install cni on workload cluster without topology + kubectl --kubeconfig ./${TEST_CLUSTER_NAME}.workload.kubeconfig apply -f https://raw.githubusercontent.com/nutanix-cloud-native/cluster-api-provider-nutanix/main/test/e2e/data/cni/calico/calico.yaml + +.PHONY: list-bootstrap-resources +list-bootstrap-resources: ## List resources of bootstrap/management cluster + kubectl get ns + kubectl get all --all-namespaces + kubectl -n capx-system get all + kubectl -n $(TEST_NAMESPACE) get Cluster,NutanixCluster,Machine,NutanixMachine,KubeAdmControlPlane,MachineHealthCheck,nodes + kubectl -n capx-system get pod + +.PHONY: list-workload-resources +list-workload-resources: ## List resources of workload cluster + kubectl --kubeconfig ./${TEST_CLUSTER_NAME}.workload.kubeconfig get nodes,ns \ No newline at end of file diff --git a/test/e2e/clusterclass_changes_test.go b/test/e2e/clusterclass_changes_test.go new file mode 100644 index 0000000000..b46f7fdddd --- /dev/null +++ b/test/e2e/clusterclass_changes_test.go @@ -0,0 +1,54 @@ +//go:build e2e +// +build e2e + +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package e2e + +import ( + . "github.com/onsi/ginkgo/v2" + + capi_e2e "sigs.k8s.io/cluster-api/test/e2e" +) + +var _ = Describe("When testing ClusterClass changes [ClusterClass]", Label("clusterclass", "slow", "network"), func() { + capi_e2e.ClusterClassChangesSpec(ctx, func() capi_e2e.ClusterClassChangesSpecInput { + return capi_e2e.ClusterClassChangesSpecInput{ + E2EConfig: e2eConfig, + ClusterctlConfigPath: clusterctlConfigPath, + BootstrapClusterProxy: bootstrapClusterProxy, + ArtifactFolder: artifactFolder, + SkipCleanup: skipCleanup, + Flavor: "topology", + // ModifyControlPlaneFields are the ControlPlane fields which will be set on the + // ControlPlaneTemplate of the ClusterClass after the initial Cluster creation. + // The test verifies that these fields are rolled out to the ControlPlane. + ModifyControlPlaneFields: map[string]interface{}{ + "spec.machineTemplate.nodeDrainTimeout": "10s", + }, + // ModifyMachineDeploymentBootstrapConfigTemplateFields are the fields which will be set on the + // BootstrapConfigTemplate of all MachineDeploymentClasses of the ClusterClass after the initial Cluster creation. + // The test verifies that these fields are rolled out to the MachineDeployments. + ModifyMachineDeploymentBootstrapConfigTemplateFields: map[string]interface{}{ + "spec.template.spec.verbosity": int64(4), + }, + ModifyMachineDeploymentInfrastructureMachineTemplateFields: map[string]interface{}{ + "spec.template.spec.vcpuSockets": int64(1), + }, + } + }) +}) diff --git a/test/e2e/config/nutanix.yaml b/test/e2e/config/nutanix.yaml index 47f75a73d5..823a60cd1f 100644 --- a/test/e2e/config/nutanix.yaml +++ b/test/e2e/config/nutanix.yaml @@ -268,6 +268,9 @@ providers: - sourcePath: "../data/infrastructure-nutanix/v1beta1/cluster-template-csi.yaml" - sourcePath: "../data/infrastructure-nutanix/v1beta1/cluster-template-failure-domains.yaml" - sourcePath: "../data/infrastructure-nutanix/ccm-update.yaml" + - sourcePath: "../data/infrastructure-nutanix/v1beta1/cluster-template-clusterclass.yaml" + - sourcePath: "../data/infrastructure-nutanix/v1beta1/clusterclass-e2e.yaml" + - sourcePath: "../data/infrastructure-nutanix/v1beta1/cluster-template-topology.yaml" variables: # Default variables for the e2e test; those values could be overridden via env variables, thus @@ -277,6 +280,8 @@ variables: KUBERNETES_VERSION_MANAGEMENT: "v1.23.6" IP_FAMILY: "IPv4" CLUSTERCTL_LOG_LEVEL: 10 + CLUSTER_TOPOLOGY: "true" + CLUSTER_CLASS_NAME: "e2e" NUTANIX_PROVIDER: "true" NUTANIX_ENDPOINT: "" NUTANIX_USER: "" diff --git a/test/e2e/data/infrastructure-nutanix/v1.2.4/base/cluster-with-kcp.yaml b/test/e2e/data/infrastructure-nutanix/v1.2.4/base/cluster-with-kcp.yaml index b87787daee..4b9fe7f647 100644 --- a/test/e2e/data/infrastructure-nutanix/v1.2.4/base/cluster-with-kcp.yaml +++ b/test/e2e/data/infrastructure-nutanix/v1.2.4/base/cluster-with-kcp.yaml @@ -12,7 +12,7 @@ spec: name: "${CLUSTER_NAME}" kind: Secret additionalTrustBundle: - name: user-ca-bundle + name: ${CLUSTER_NAME}-pc-trusted-ca-bundle kind: ConfigMap controlPlaneEndpoint: host: "${CONTROL_PLANE_ENDPOINT_IP_V124}" diff --git a/test/e2e/data/infrastructure-nutanix/v1.2.4/base/cm.yaml b/test/e2e/data/infrastructure-nutanix/v1.2.4/base/cm.yaml index ff4d33af76..5bf1d2f7ef 100644 --- a/test/e2e/data/infrastructure-nutanix/v1.2.4/base/cm.yaml +++ b/test/e2e/data/infrastructure-nutanix/v1.2.4/base/cm.yaml @@ -2,7 +2,7 @@ apiVersion: v1 kind: ConfigMap metadata: - name: user-ca-bundle + name: ${CLUSTER_NAME}-pc-trusted-ca-bundle namespace: "${NAMESPACE}" binaryData: ca.crt: ${NUTANIX_ADDITIONAL_TRUST_BUNDLE=""} diff --git a/test/e2e/data/infrastructure-nutanix/v1alpha4/bases/cm.yaml b/test/e2e/data/infrastructure-nutanix/v1alpha4/bases/cm.yaml index a968f21831..a7332f1690 100644 --- a/test/e2e/data/infrastructure-nutanix/v1alpha4/bases/cm.yaml +++ b/test/e2e/data/infrastructure-nutanix/v1alpha4/bases/cm.yaml @@ -2,7 +2,7 @@ apiVersion: v1 kind: ConfigMap metadata: - name: user-ca-bundle + name: ${CLUSTER_NAME}-pc-trusted-ca-bundle namespace: "${NAMESPACE}" data: ca.crt: "${NUTANIX_ADDITIONAL_TRUST_BUNDLE=''}" diff --git a/test/e2e/data/infrastructure-nutanix/v1beta1/base/cni-patch.yaml b/test/e2e/data/infrastructure-nutanix/v1beta1/base/cni-patch.yaml index 9936ca0cf9..b0da743a10 100644 --- a/test/e2e/data/infrastructure-nutanix/v1beta1/base/cni-patch.yaml +++ b/test/e2e/data/infrastructure-nutanix/v1beta1/base/cni-patch.yaml @@ -3,5 +3,4 @@ kind: Cluster metadata: labels: cni: ${CLUSTER_NAME}-crs-cni - name: "${CLUSTER_NAME}" - namespace: "${NAMESPACE}" + name: "${CLUSTER_NAME}" \ No newline at end of file diff --git a/test/e2e/data/infrastructure-nutanix/v1beta1/cluster-template-clusterclass/kustomization.yaml b/test/e2e/data/infrastructure-nutanix/v1beta1/cluster-template-clusterclass/kustomization.yaml new file mode 100644 index 0000000000..18e9ecc695 --- /dev/null +++ b/test/e2e/data/infrastructure-nutanix/v1beta1/cluster-template-clusterclass/kustomization.yaml @@ -0,0 +1,2 @@ +bases: + - ../../../../../../templates/clusterclass/ diff --git a/test/e2e/data/infrastructure-nutanix/v1beta1/cluster-template-failure-domains/failure-domain-patch.yaml b/test/e2e/data/infrastructure-nutanix/v1beta1/cluster-template-failure-domains/failure-domain-patch.yaml index 3887db614e..989dc1d3b8 100644 --- a/test/e2e/data/infrastructure-nutanix/v1beta1/cluster-template-failure-domains/failure-domain-patch.yaml +++ b/test/e2e/data/infrastructure-nutanix/v1beta1/cluster-template-failure-domains/failure-domain-patch.yaml @@ -3,7 +3,6 @@ apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 kind: NutanixCluster metadata: name: ${CLUSTER_NAME} - namespace: ${NAMESPACE} spec: failureDomains: - name: ${NUTANIX_FAILURE_DOMAIN_1_NAME} @@ -35,7 +34,6 @@ apiVersion: controlplane.cluster.x-k8s.io/v1beta1 kind: KubeadmControlPlane metadata: name: ${CLUSTER_NAME}-kcp - namespace: ${NAMESPACE} spec: replicas: 3 --- @@ -45,7 +43,6 @@ metadata: labels: cluster.x-k8s.io/cluster-name: ${CLUSTER_NAME} name: ${CLUSTER_NAME}-wmd - namespace: ${NAMESPACE} spec: replicas: 0 template: diff --git a/test/e2e/data/infrastructure-nutanix/v1beta1/cluster-template-failure-domains/kustomization.yaml b/test/e2e/data/infrastructure-nutanix/v1beta1/cluster-template-failure-domains/kustomization.yaml index 459dd0533d..90dd08d63f 100644 --- a/test/e2e/data/infrastructure-nutanix/v1beta1/cluster-template-failure-domains/kustomization.yaml +++ b/test/e2e/data/infrastructure-nutanix/v1beta1/cluster-template-failure-domains/kustomization.yaml @@ -1,22 +1,24 @@ bases: - - ../../../../../../templates/base/cluster-with-kcp.yaml - - ../../../../../../templates/base/secret.yaml - - ../../../../../../templates/base/cm.yaml + - ../../../../../../templates/base/nutanix-cluster.yaml + - ../../../../../../templates/base/cluster-without-topology.yaml + - ../../../../../../templates/base/kcp.yaml + - ../../../../../../templates/base/kct.yaml + - ../../../../../../templates/base-root/secret.yaml + - ../../../../../../templates/base-root/cm.yaml - ../../../../../../templates/base/md.yaml - ../../../../../../templates/base/mhc.yaml - - ../../../../../../templates/base/nutanix-ccm-crs.yaml - - ../../../../../../templates/base/nutanix-ccm-secret.yaml + - ../../../../../../templates/base-root/nutanix-ccm-crs.yaml + - ../../../../../../templates/base-root/nutanix-ccm-secret.yaml - ../base/crs.yaml - failure-domain-nmt.yaml configMapGenerator: -- name: nutanix-ccm - behavior: merge - files: - - ../../../../../../templates/base/nutanix-ccm.yaml + - name: nutanix-ccm + behavior: merge + files: + - ../../../../../../templates/base-root/nutanix-ccm.yaml patchesStrategicMerge: - ../base/cni-patch.yaml - failure-domain-patch.yaml - ../../../../../../templates/base/ccm-patch.yaml - diff --git a/test/e2e/data/infrastructure-nutanix/v1beta1/cluster-template-kcp-remediation/kustomization.yaml b/test/e2e/data/infrastructure-nutanix/v1beta1/cluster-template-kcp-remediation/kustomization.yaml index 689951fdca..c5ca9c48fd 100644 --- a/test/e2e/data/infrastructure-nutanix/v1beta1/cluster-template-kcp-remediation/kustomization.yaml +++ b/test/e2e/data/infrastructure-nutanix/v1beta1/cluster-template-kcp-remediation/kustomization.yaml @@ -1,19 +1,22 @@ bases: - - ../../../../../../templates/base/cluster-with-kcp.yaml - - ../../../../../../templates/base/secret.yaml - - ../../../../../../templates/base/cm.yaml + - ../../../../../../templates/base/nutanix-cluster.yaml + - ../../../../../../templates/base/cluster-without-topology.yaml + - ../../../../../../templates/base/kcp.yaml + - ../../../../../../templates/base/kct.yaml + - ../../../../../../templates/base-root/secret.yaml + - ../../../../../../templates/base-root/cm.yaml - ../../../../../../templates/base/nmt.yaml - ../../../../../../templates/base/md.yaml - - ../../../../../../templates/base/nutanix-ccm-crs.yaml - - ../../../../../../templates/base/nutanix-ccm-secret.yaml + - ../../../../../../templates/base-root/nutanix-ccm-crs.yaml + - ../../../../../../templates/base-root/nutanix-ccm-secret.yaml - ../base/crs.yaml - ./mhc.yaml configMapGenerator: -- name: nutanix-ccm - behavior: merge - files: - - ../../../../../../templates/base/nutanix-ccm.yaml + - name: nutanix-ccm + behavior: merge + files: + - ../../../../../../templates/base-root/nutanix-ccm.yaml patchesStrategicMerge: - ../base/cni-patch.yaml diff --git a/test/e2e/data/infrastructure-nutanix/v1beta1/cluster-template-kcp-scale-in/cluster-with-kcp.yaml b/test/e2e/data/infrastructure-nutanix/v1beta1/cluster-template-kcp-scale-in/cluster-with-kcp.yaml index 0de5654cca..7255dd80f7 100644 --- a/test/e2e/data/infrastructure-nutanix/v1beta1/cluster-template-kcp-scale-in/cluster-with-kcp.yaml +++ b/test/e2e/data/infrastructure-nutanix/v1beta1/cluster-template-kcp-scale-in/cluster-with-kcp.yaml @@ -3,7 +3,6 @@ apiVersion: controlplane.cluster.x-k8s.io/v1beta1 kind: KubeadmControlPlane metadata: name: "${CLUSTER_NAME}-kcp" - namespace: "${NAMESPACE}" spec: rolloutStrategy: rollingUpdate: diff --git a/test/e2e/data/infrastructure-nutanix/v1beta1/cluster-template-md-remediation/kustomization.yaml b/test/e2e/data/infrastructure-nutanix/v1beta1/cluster-template-md-remediation/kustomization.yaml index ef15312586..e860f5a55e 100644 --- a/test/e2e/data/infrastructure-nutanix/v1beta1/cluster-template-md-remediation/kustomization.yaml +++ b/test/e2e/data/infrastructure-nutanix/v1beta1/cluster-template-md-remediation/kustomization.yaml @@ -1,19 +1,22 @@ bases: - - ../../../../../../templates/base/cluster-with-kcp.yaml - - ../../../../../../templates/base/secret.yaml - - ../../../../../../templates/base/cm.yaml + - ../../../../../../templates/base/nutanix-cluster.yaml + - ../../../../../../templates/base/cluster-without-topology.yaml + - ../../../../../../templates/base/kcp.yaml + - ../../../../../../templates/base/kct.yaml + - ../../../../../../templates/base-root/secret.yaml + - ../../../../../../templates/base-root/cm.yaml - ../../../../../../templates/base/nmt.yaml - ../../../../../../templates/base/md.yaml - - ../../../../../../templates/base/nutanix-ccm-crs.yaml - - ../../../../../../templates/base/nutanix-ccm-secret.yaml + - ../../../../../../templates/base-root/nutanix-ccm-crs.yaml + - ../../../../../../templates/base-root/nutanix-ccm-secret.yaml - ../base/crs.yaml - ./mhc.yaml configMapGenerator: -- name: nutanix-ccm - behavior: merge - files: - - ../../../../../../templates/base/nutanix-ccm.yaml + - name: nutanix-ccm + behavior: merge + files: + - ../../../../../../templates/base-root/nutanix-ccm.yaml patchesStrategicMerge: - ../base/cni-patch.yaml diff --git a/test/e2e/data/infrastructure-nutanix/v1beta1/cluster-template-md-remediation/md.yaml b/test/e2e/data/infrastructure-nutanix/v1beta1/cluster-template-md-remediation/md.yaml index 510266c5bf..583e04e2c5 100644 --- a/test/e2e/data/infrastructure-nutanix/v1beta1/cluster-template-md-remediation/md.yaml +++ b/test/e2e/data/infrastructure-nutanix/v1beta1/cluster-template-md-remediation/md.yaml @@ -2,7 +2,6 @@ apiVersion: cluster.x-k8s.io/v1beta1 kind: MachineDeployment metadata: name: "${CLUSTER_NAME}-wmd" - namespace: "${NAMESPACE}" spec: template: metadata: diff --git a/test/e2e/data/infrastructure-nutanix/v1beta1/cluster-template-no-nmt/kustomization.yaml b/test/e2e/data/infrastructure-nutanix/v1beta1/cluster-template-no-nmt/kustomization.yaml index 7e15027194..0af1938185 100644 --- a/test/e2e/data/infrastructure-nutanix/v1beta1/cluster-template-no-nmt/kustomization.yaml +++ b/test/e2e/data/infrastructure-nutanix/v1beta1/cluster-template-no-nmt/kustomization.yaml @@ -1,18 +1,21 @@ bases: - - ../../../../../../templates/base/cluster-with-kcp.yaml - - ../../../../../../templates/base/secret.yaml - - ../../../../../../templates/base/cm.yaml + - ../../../../../../templates/base/nutanix-cluster.yaml + - ../../../../../../templates/base/cluster-without-topology.yaml + - ../../../../../../templates/base/kcp.yaml + - ../../../../../../templates/base/kct.yaml + - ../../../../../../templates/base-root/secret.yaml + - ../../../../../../templates/base-root/cm.yaml - ../../../../../../templates/base/md.yaml - ../../../../../../templates/base/mhc.yaml - - ../../../../../../templates/base/nutanix-ccm-crs.yaml - - ../../../../../../templates/base/nutanix-ccm-secret.yaml + - ../../../../../../templates/base-root/nutanix-ccm-crs.yaml + - ../../../../../../templates/base-root/nutanix-ccm-secret.yaml - ../base/crs.yaml configMapGenerator: -- name: nutanix-ccm - behavior: merge - files: - - ../../../../../../templates/base/nutanix-ccm.yaml + - name: nutanix-ccm + behavior: merge + files: + - ../../../../../../templates/base-root/nutanix-ccm.yaml patchesStrategicMerge: - ../base/cni-patch.yaml diff --git a/test/e2e/data/infrastructure-nutanix/v1beta1/cluster-template-no-nutanix-cluster/kustomization.yaml b/test/e2e/data/infrastructure-nutanix/v1beta1/cluster-template-no-nutanix-cluster/kustomization.yaml index ce1814e967..398e1b0d1e 100644 --- a/test/e2e/data/infrastructure-nutanix/v1beta1/cluster-template-no-nutanix-cluster/kustomization.yaml +++ b/test/e2e/data/infrastructure-nutanix/v1beta1/cluster-template-no-nutanix-cluster/kustomization.yaml @@ -1,18 +1,21 @@ bases: - - ../../../../../../templates/base/cluster-with-kcp.yaml + - ../../../../../../templates/base/nutanix-cluster.yaml + - ../../../../../../templates/base/cluster-without-topology.yaml + - ../../../../../../templates/base/kcp.yaml + - ../../../../../../templates/base/kct.yaml - ../../../../../../templates/base/nmt.yaml - - ../../../../../../templates/base/cm.yaml + - ../../../../../../templates/base-root/cm.yaml - ../../../../../../templates/base/md.yaml - ../../../../../../templates/base/mhc.yaml - - ../../../../../../templates/base/nutanix-ccm-crs.yaml - - ../../../../../../templates/base/nutanix-ccm-secret.yaml + - ../../../../../../templates/base-root/nutanix-ccm-crs.yaml + - ../../../../../../templates/base-root/nutanix-ccm-secret.yaml - ../base/crs.yaml configMapGenerator: -- name: nutanix-ccm - behavior: merge - files: - - ../../../../../../templates/base/nutanix-ccm.yaml + - name: nutanix-ccm + behavior: merge + files: + - ../../../../../../templates/base-root/nutanix-ccm.yaml patchesStrategicMerge: - ../base/cni-patch.yaml diff --git a/test/e2e/data/infrastructure-nutanix/v1beta1/cluster-template-no-nutanix-cluster/nc.yaml b/test/e2e/data/infrastructure-nutanix/v1beta1/cluster-template-no-nutanix-cluster/nc.yaml index 52474f21d8..d89c56feb0 100644 --- a/test/e2e/data/infrastructure-nutanix/v1beta1/cluster-template-no-nutanix-cluster/nc.yaml +++ b/test/e2e/data/infrastructure-nutanix/v1beta1/cluster-template-no-nutanix-cluster/nc.yaml @@ -2,5 +2,4 @@ apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 kind: NutanixCluster metadata: name: "${CLUSTER_NAME}" - namespace: "${NAMESPACE}" $patch: delete diff --git a/test/e2e/data/infrastructure-nutanix/v1beta1/cluster-template-no-secret/kustomization.yaml b/test/e2e/data/infrastructure-nutanix/v1beta1/cluster-template-no-secret/kustomization.yaml index 34b8b0ccf7..d920b084ca 100644 --- a/test/e2e/data/infrastructure-nutanix/v1beta1/cluster-template-no-secret/kustomization.yaml +++ b/test/e2e/data/infrastructure-nutanix/v1beta1/cluster-template-no-secret/kustomization.yaml @@ -1,18 +1,21 @@ bases: - - ../../../../../../templates/base/cluster-with-kcp.yaml - - ../../../../../../templates/base/cm.yaml + - ../../../../../../templates/base/nutanix-cluster.yaml + - ../../../../../../templates/base/cluster-without-topology.yaml + - ../../../../../../templates/base/kct.yaml + - ../../../../../../templates/base/kcp.yaml - ../../../../../../templates/base/nmt.yaml - ../../../../../../templates/base/md.yaml - ../../../../../../templates/base/mhc.yaml - - ../../../../../../templates/base/nutanix-ccm-crs.yaml - - ../../../../../../templates/base/nutanix-ccm-secret.yaml + - ../../../../../../templates/base-root/nutanix-ccm-crs.yaml + - ../../../../../../templates/base-root/nutanix-ccm-secret.yaml + - ../../../../../../templates/base-root/cm.yaml - ../base/crs.yaml configMapGenerator: - name: nutanix-ccm behavior: merge files: - - ../../../../../../templates/base/nutanix-ccm.yaml + - ../../../../../../templates/base-root/nutanix-ccm.yaml patchesStrategicMerge: - ../base/cni-patch.yaml diff --git a/test/e2e/data/infrastructure-nutanix/v1beta1/cluster-template-project/kustomization.yaml b/test/e2e/data/infrastructure-nutanix/v1beta1/cluster-template-project/kustomization.yaml index 90391ea7c6..16c9404d47 100644 --- a/test/e2e/data/infrastructure-nutanix/v1beta1/cluster-template-project/kustomization.yaml +++ b/test/e2e/data/infrastructure-nutanix/v1beta1/cluster-template-project/kustomization.yaml @@ -1,19 +1,22 @@ bases: - - ../../../../../../templates/base/cluster-with-kcp.yaml - - ../../../../../../templates/base/secret.yaml - - ../../../../../../templates/base/cm.yaml + - ../../../../../../templates/base/nutanix-cluster.yaml + - ../../../../../../templates/base/cluster-without-topology.yaml + - ../../../../../../templates/base/kcp.yaml + - ../../../../../../templates/base/kct.yaml + - ../../../../../../templates/base-root/secret.yaml + - ../../../../../../templates/base-root/cm.yaml - ../../../../../../templates/base/nmt.yaml - ../../../../../../templates/base/md.yaml - ../../../../../../templates/base/mhc.yaml - - ../../../../../../templates/base/nutanix-ccm-crs.yaml - - ../../../../../../templates/base/nutanix-ccm-secret.yaml + - ../../../../../../templates/base-root/nutanix-ccm-crs.yaml + - ../../../../../../templates/base-root/nutanix-ccm-secret.yaml - ../base/crs.yaml configMapGenerator: -- name: nutanix-ccm - behavior: merge - files: - - ../../../../../../templates/base/nutanix-ccm.yaml + - name: nutanix-ccm + behavior: merge + files: + - ../../../../../../templates/base-root/nutanix-ccm.yaml patchesStrategicMerge: - ../base/cni-patch.yaml diff --git a/test/e2e/data/infrastructure-nutanix/v1beta1/cluster-template-project/nmt.yaml b/test/e2e/data/infrastructure-nutanix/v1beta1/cluster-template-project/nmt.yaml index f7a38a2cf2..559725b30d 100644 --- a/test/e2e/data/infrastructure-nutanix/v1beta1/cluster-template-project/nmt.yaml +++ b/test/e2e/data/infrastructure-nutanix/v1beta1/cluster-template-project/nmt.yaml @@ -3,7 +3,6 @@ apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 kind: NutanixMachineTemplate metadata: name: "${CLUSTER_NAME}-mt-0" - namespace: "${NAMESPACE}" spec: template: spec: diff --git a/test/e2e/data/infrastructure-nutanix/v1beta1/cluster-template-topology/kustomization.yaml b/test/e2e/data/infrastructure-nutanix/v1beta1/cluster-template-topology/kustomization.yaml new file mode 100644 index 0000000000..4e12e96928 --- /dev/null +++ b/test/e2e/data/infrastructure-nutanix/v1beta1/cluster-template-topology/kustomization.yaml @@ -0,0 +1,6 @@ +bases: + - ../../../../../../templates/topology/ + - ../base/crs.yaml + +patchesStrategicMerge: + - ../base/cni-patch.yaml diff --git a/test/e2e/data/infrastructure-nutanix/v1beta1/cluster-template-upgrades/kustomization.yaml b/test/e2e/data/infrastructure-nutanix/v1beta1/cluster-template-upgrades/kustomization.yaml index 0a4f1b8f08..cb02c7b719 100644 --- a/test/e2e/data/infrastructure-nutanix/v1beta1/cluster-template-upgrades/kustomization.yaml +++ b/test/e2e/data/infrastructure-nutanix/v1beta1/cluster-template-upgrades/kustomization.yaml @@ -1,19 +1,22 @@ bases: - - ../../../../../../templates/base/cluster-with-kcp.yaml - - ../../../../../../templates/base/secret.yaml - - ../../../../../../templates/base/cm.yaml + - ../../../../../../templates/base/nutanix-cluster.yaml + - ../../../../../../templates/base/cluster-without-topology.yaml + - ../../../../../../templates/base/kcp.yaml + - ../../../../../../templates/base/kct.yaml + - ../../../../../../templates/base-root/secret.yaml + - ../../../../../../templates/base-root/cm.yaml - ../../../../../../templates/base/md.yaml - ../../../../../../templates/base/mhc.yaml - - ../../../../../../templates/base/nutanix-ccm-crs.yaml - - ../../../../../../templates/base/nutanix-ccm-secret.yaml + - ../../../../../../templates/base-root/nutanix-ccm-crs.yaml + - ../../../../../../templates/base-root/nutanix-ccm-secret.yaml - ../base/crs.yaml - ./nmt.yaml configMapGenerator: -- name: nutanix-ccm - behavior: merge - files: - - ../../../../../../templates/base/nutanix-ccm.yaml + - name: nutanix-ccm + behavior: merge + files: + - ../../../../../../templates/base-root/nutanix-ccm.yaml patchesStrategicMerge: - ../base/cni-patch.yaml diff --git a/test/e2e/data/infrastructure-nutanix/v1beta1/clusterclass-e2e.yaml b/test/e2e/data/infrastructure-nutanix/v1beta1/clusterclass-e2e.yaml new file mode 100644 index 0000000000..8241698d8e --- /dev/null +++ b/test/e2e/data/infrastructure-nutanix/v1beta1/clusterclass-e2e.yaml @@ -0,0 +1,765 @@ +apiVersion: v1 +binaryData: + ca.crt: ${NUTANIX_ADDITIONAL_TRUST_BUNDLE=""} +kind: ConfigMap +metadata: + name: ${CLUSTER_NAME}-pc-trusted-ca-bundle + namespace: ${NAMESPACE} +--- +apiVersion: v1 +data: + nutanix-ccm.yaml: | + --- + apiVersion: v1 + kind: ConfigMap + metadata: + name: ${CLUSTER_NAME}-pc-trusted-ca-bundle + namespace: kube-system + binaryData: + ca.crt: ${NUTANIX_ADDITIONAL_TRUST_BUNDLE=""} + --- + # Source: nutanix-cloud-provider/templates/rbac.yaml + apiVersion: v1 + kind: ServiceAccount + metadata: + name: cloud-controller-manager + namespace: kube-system + --- + # Source: nutanix-cloud-provider/templates/cm.yaml + kind: ConfigMap + apiVersion: v1 + metadata: + name: nutanix-config + namespace: kube-system + data: + nutanix_config.json: |- + { + "prismCentral": { + "address": "${NUTANIX_ENDPOINT}", + "port": ${NUTANIX_PORT=9440}, + "insecure": ${NUTANIX_INSECURE=false}, + "credentialRef": { + "kind": "secret", + "name": "nutanix-creds", + "namespace": "kube-system" + }, + "additionalTrustBundle": { + "kind": "ConfigMap", + "name": "${CLUSTER_NAME}-pc-trusted-ca-bundle", + "namespace": "kube-system" + } + }, + "enableCustomLabeling": ${CCM_CUSTOM_LABEL=false}, + "topologyDiscovery": { + "type": "Prism" + } + } + --- + # Source: nutanix-cloud-provider/templates/rbac.yaml + apiVersion: rbac.authorization.k8s.io/v1 + kind: ClusterRole + metadata: + annotations: + rbac.authorization.kubernetes.io/autoupdate: "true" + name: system:cloud-controller-manager + rules: + - apiGroups: + - "" + resources: + - secrets + verbs: + - get + - list + - watch + - apiGroups: + - "" + resources: + - configmaps + verbs: + - get + - list + - watch + - apiGroups: + - "" + resources: + - events + verbs: + - create + - patch + - update + - apiGroups: + - "" + resources: + - nodes + verbs: + - "*" + - apiGroups: + - "" + resources: + - nodes/status + verbs: + - patch + - apiGroups: + - "" + resources: + - serviceaccounts + verbs: + - create + - apiGroups: + - "" + resources: + - endpoints + verbs: + - create + - get + - list + - watch + - update + - apiGroups: + - coordination.k8s.io + resources: + - leases + verbs: + - get + - list + - watch + - create + - update + - patch + - delete + --- + # Source: nutanix-cloud-provider/templates/rbac.yaml + kind: ClusterRoleBinding + apiVersion: rbac.authorization.k8s.io/v1 + metadata: + name: system:cloud-controller-manager + roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: system:cloud-controller-manager + subjects: + - kind: ServiceAccount + name: cloud-controller-manager + namespace: kube-system + --- + # Source: nutanix-cloud-provider/templates/cloud-provider-nutanix-deployment.yaml + apiVersion: apps/v1 + kind: Deployment + metadata: + labels: + k8s-app: nutanix-cloud-controller-manager + name: nutanix-cloud-controller-manager + namespace: kube-system + spec: + replicas: 1 + selector: + matchLabels: + k8s-app: nutanix-cloud-controller-manager + strategy: + type: Recreate + template: + metadata: + labels: + k8s-app: nutanix-cloud-controller-manager + spec: + hostNetwork: true + priorityClassName: system-cluster-critical + nodeSelector: + node-role.kubernetes.io/control-plane: "" + serviceAccountName: cloud-controller-manager + affinity: + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchLabels: + k8s-app: nutanix-cloud-controller-manager + topologyKey: kubernetes.io/hostname + dnsPolicy: Default + tolerations: + - effect: NoSchedule + key: node-role.kubernetes.io/master + operator: Exists + - effect: NoSchedule + key: node-role.kubernetes.io/control-plane + operator: Exists + - effect: NoExecute + key: node.kubernetes.io/unreachable + operator: Exists + tolerationSeconds: 120 + - effect: NoExecute + key: node.kubernetes.io/not-ready + operator: Exists + tolerationSeconds: 120 + - effect: NoSchedule + key: node.cloudprovider.kubernetes.io/uninitialized + operator: Exists + - effect: NoSchedule + key: node.kubernetes.io/not-ready + operator: Exists + containers: + - image: "${CCM_REPO=ghcr.io/nutanix-cloud-native/cloud-provider-nutanix/controller}:${CCM_TAG=v0.3.1}" + imagePullPolicy: IfNotPresent + name: nutanix-cloud-controller-manager + env: + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + args: + - "--leader-elect=true" + - "--cloud-config=/etc/cloud/nutanix_config.json" + resources: + requests: + cpu: 100m + memory: 50Mi + volumeMounts: + - mountPath: /etc/cloud + name: nutanix-config-volume + readOnly: true + volumes: + - name: nutanix-config-volume + configMap: + name: nutanix-config +kind: ConfigMap +metadata: + name: nutanix-ccm + namespace: ${NAMESPACE} +--- +apiVersion: v1 +kind: Secret +metadata: + name: ${CLUSTER_NAME} + namespace: ${NAMESPACE} +stringData: + credentials: "[\n {\n \"type\": \"basic_auth\", \n \"data\": { \n \"prismCentral\":{\n + \ \"username\": \"${NUTANIX_USER}\", \n \"password\": \"${NUTANIX_PASSWORD}\"\n + \ }\n }\n }\n]\n" +--- +apiVersion: v1 +kind: Secret +metadata: + name: nutanix-ccm-secret + namespace: ${NAMESPACE} +stringData: + nutanix-ccm-secret.yaml: "apiVersion: v1\nkind: Secret\nmetadata:\n name: nutanix-creds\n + \ namespace: kube-system\nstringData:\n credentials: |\n [\n {\n \"type\": + \"basic_auth\", \n \"data\": { \n \"prismCentral\":{\n \"username\": + \"${NUTANIX_USER}\",\n \"password\": \"${NUTANIX_PASSWORD}\"\n },\n + \ \"prismElements\": null\n }\n }\n ]\n" +type: addons.cluster.x-k8s.io/resource-set +--- +apiVersion: addons.cluster.x-k8s.io/v1beta1 +kind: ClusterResourceSet +metadata: + name: nutanix-ccm-crs + namespace: ${NAMESPACE} +spec: + clusterSelector: + matchLabels: + ccm: nutanix + resources: + - kind: ConfigMap + name: nutanix-ccm + - kind: Secret + name: nutanix-ccm-secret + - kind: ConfigMap + name: ${CLUSTER_NAME}-pc-trusted-ca-bundle + strategy: ApplyOnce +--- +apiVersion: bootstrap.cluster.x-k8s.io/v1beta1 +kind: KubeadmConfigTemplate +metadata: + name: ${CLUSTER_NAME}-kcfg-0 + namespace: ${NAMESPACE} +spec: + template: + spec: + joinConfiguration: + nodeRegistration: + kubeletExtraArgs: + cloud-provider: external + eviction-hard: nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<15%,memory.available<100Mi,imagefs.inodesFree<10% + tls-cipher-suites: ${TLS_CIPHER_SUITES=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256} + postKubeadmCommands: + - echo "after kubeadm call" > /var/log/postkubeadm.log + preKubeadmCommands: + - echo "before kubeadm call" > /var/log/prekubeadm.log + - hostnamectl set-hostname "{{ ds.meta_data.hostname }}" + verbosity: 10 +--- +apiVersion: cluster.x-k8s.io/v1beta1 +kind: ClusterClass +metadata: + name: ${CLUSTER_CLASS_NAME} + namespace: ${NAMESPACE} +spec: + controlPlane: + machineHealthCheck: + maxUnhealthy: 40% + nodeStartupTimeout: 10m + unhealthyConditions: + - status: "False" + timeout: 300s + type: Ready + - status: Unknown + timeout: 300s + type: Ready + - status: "True" + timeout: 300s + type: MemoryPressure + - status: "True" + timeout: 300s + type: DiskPressure + - status: "True" + timeout: 300s + type: PIDPressure + - status: "True" + timeout: 300s + type: NetworkUnavailable + machineInfrastructure: + ref: + apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + kind: NutanixMachineTemplate + name: ${CLUSTER_CLASS_NAME}-cp-nmt + namespace: ${NAMESPACE} + ref: + apiVersion: controlplane.cluster.x-k8s.io/v1beta1 + kind: KubeadmControlPlaneTemplate + name: ${CLUSTER_CLASS_NAME}-kcpt + namespace: ${NAMESPACE} + infrastructure: + ref: + apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + kind: NutanixClusterTemplate + name: ${CLUSTER_CLASS_NAME}-nct + namespace: ${NAMESPACE} + patches: + - definitions: + - jsonPatches: + - op: add + path: /spec/template/spec/kubeadmConfigSpec/users + valueFrom: + template: | + - name: capxuser + lockPassword: false + sudo: ALL=(ALL) NOPASSWD:ALL + sshAuthorizedKeys: + - '{{ .sshKey }}' + selector: + apiVersion: controlplane.cluster.x-k8s.io/v1beta1 + kind: KubeadmControlPlaneTemplate + matchResources: + controlPlane: true + - jsonPatches: + - op: add + path: /spec/template/spec/users + valueFrom: + template: | + - name: capxuser + lockPassword: false + sudo: ALL=(ALL) NOPASSWD:ALL + sshAuthorizedKeys: + - '{{ .sshKey }}' + selector: + apiVersion: bootstrap.cluster.x-k8s.io/v1beta1 + kind: KubeadmConfigTemplate + matchResources: + machineDeploymentClass: + names: + - ${CLUSTER_CLASS_NAME}-worker + name: add-ssh-user + - definitions: + - jsonPatches: + - op: add + path: /spec/template/spec/controlPlaneEndpoint + valueFrom: + template: | + host: '{{ .controlPlaneEndpoint.IP }}' + port: {{ .controlPlaneEndpoint.port }} + selector: + apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + kind: NutanixClusterTemplate + matchResources: + infrastructureCluster: true + name: update-control-plane-endpoint + - definitions: + - jsonPatches: + - op: add + path: /spec/template/spec/prismCentral + valueFrom: + template: | + address: '{{ .prismCentralEndpoint.address }}' + port: {{ .prismCentralEndpoint.port }} + insecure: {{ .prismCentralEndpoint.insecure }} + credentialRef: + name: "${CLUSTER_NAME}" + kind: Secret + additionalTrustBundle: + name: ${CLUSTER_NAME}-pc-trusted-ca-bundle + kind: ConfigMap + selector: + apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + kind: NutanixClusterTemplate + matchResources: + infrastructureCluster: true + name: add-pc-endpoint-and-creds + - definitions: + - jsonPatches: + - op: replace + path: /spec/template/spec/bootType + valueFrom: + variable: controlPlaneMachineDetails.bootType + - op: replace + path: /spec/template/spec/vcpusPerSocket + valueFrom: + variable: controlPlaneMachineDetails.vcpusPerSocket + - op: replace + path: /spec/template/spec/memorySize + valueFrom: + variable: controlPlaneMachineDetails.memorySize + - op: replace + path: /spec/template/spec/systemDiskSize + valueFrom: + variable: controlPlaneMachineDetails.systemDiskSize + - op: replace + path: /spec/template/spec/image/name + valueFrom: + variable: controlPlaneMachineDetails.imageName + - op: replace + path: /spec/template/spec/cluster/name + valueFrom: + variable: controlPlaneMachineDetails.clusterName + - op: replace + path: /spec/template/spec/subnet + valueFrom: + template: | + - type: name + name: {{ .controlPlaneMachineDetails.subnetName }} + selector: + apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + kind: NutanixMachineTemplate + matchResources: + controlPlane: true + name: update-control-plane-machine-template + - definitions: + - jsonPatches: + - op: replace + path: /spec/template/spec/bootType + valueFrom: + variable: workerMachineDetails.bootType + - op: replace + path: /spec/template/spec/vcpusPerSocket + valueFrom: + variable: workerMachineDetails.vcpusPerSocket + - op: replace + path: /spec/template/spec/memorySize + valueFrom: + variable: workerMachineDetails.memorySize + - op: replace + path: /spec/template/spec/systemDiskSize + valueFrom: + variable: workerMachineDetails.systemDiskSize + - op: replace + path: /spec/template/spec/image/name + valueFrom: + variable: workerMachineDetails.imageName + - op: replace + path: /spec/template/spec/cluster/name + valueFrom: + variable: workerMachineDetails.clusterName + - op: replace + path: /spec/template/spec/subnet + valueFrom: + template: | + - type: name + name: {{ .controlPlaneMachineDetails.subnetName }} + selector: + apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + kind: NutanixMachineTemplate + matchResources: + machineDeploymentClass: + names: + - ${CLUSTER_CLASS_NAME}-worker + name: update-worker-machine-template + variables: + - name: sshKey + required: true + schema: + openAPIV3Schema: + description: Public key to SSH onto the cluster nodes. + type: string + - name: controlPlaneEndpoint + required: true + schema: + openAPIV3Schema: + properties: + IP: + type: string + port: + type: integer + type: object + - name: prismCentralEndpoint + required: true + schema: + openAPIV3Schema: + properties: + address: + type: string + insecure: + type: boolean + port: + type: integer + type: object + - name: controlPlaneMachineDetails + required: true + schema: + openAPIV3Schema: + properties: + bootType: + type: string + clusterName: + type: string + imageName: + type: string + memorySize: + type: string + subnetName: + type: string + systemDiskSize: + type: string + vcpuSockets: + type: integer + vcpusPerSocket: + type: integer + type: object + - name: workerMachineDetails + required: true + schema: + openAPIV3Schema: + properties: + bootType: + type: string + clusterName: + type: string + imageName: + type: string + memorySize: + type: string + subnetName: + type: string + systemDiskSize: + type: string + vcpuSockets: + type: integer + vcpusPerSocket: + type: integer + type: object + workers: + machineDeployments: + - class: ${CLUSTER_CLASS_NAME}-worker + machineHealthCheck: + maxUnhealthy: 40% + nodeStartupTimeout: 10m + unhealthyConditions: + - status: "False" + timeout: 300s + type: Ready + - status: Unknown + timeout: 300s + type: Ready + - status: "True" + timeout: 300s + type: MemoryPressure + - status: "True" + timeout: 300s + type: DiskPressure + - status: "True" + timeout: 300s + type: PIDPressure + - status: "True" + timeout: 300s + type: NetworkUnavailable + template: + bootstrap: + ref: + apiVersion: bootstrap.cluster.x-k8s.io/v1beta1 + kind: KubeadmConfigTemplate + name: ${CLUSTER_NAME}-kcfg-0 + namespace: ${NAMESPACE} + infrastructure: + ref: + apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + kind: NutanixMachineTemplate + name: ${CLUSTER_CLASS_NAME}-md-nmt + namespace: ${NAMESPACE} +--- +apiVersion: controlplane.cluster.x-k8s.io/v1beta1 +kind: KubeadmControlPlaneTemplate +metadata: + name: ${CLUSTER_CLASS_NAME}-kcpt + namespace: ${NAMESPACE} +spec: + template: + spec: + kubeadmConfigSpec: + clusterConfiguration: + apiServer: + certSANs: + - localhost + - 127.0.0.1 + - 0.0.0.0 + extraArgs: + cloud-provider: external + tls-cipher-suites: ${TLS_CIPHER_SUITES=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256} + controllerManager: + extraArgs: + cloud-provider: external + enable-hostpath-provisioner: "true" + tls-cipher-suites: ${TLS_CIPHER_SUITES=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256} + scheduler: + extraArgs: + tls-cipher-suites: ${TLS_CIPHER_SUITES=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256} + files: + - content: | + apiVersion: v1 + kind: Pod + metadata: + name: kube-vip + namespace: kube-system + spec: + containers: + - name: kube-vip + image: ghcr.io/kube-vip/kube-vip:v0.6.4 + imagePullPolicy: IfNotPresent + args: + - manager + env: + - name: vip_arp + value: "true" + - name: address + value: "${CONTROL_PLANE_ENDPOINT_IP}" + - name: port + value: "${CONTROL_PLANE_ENDPOINT_PORT=6443}" + - name: vip_cidr + value: "32" + - name: cp_enable + value: "true" + - name: cp_namespace + value: kube-system + - name: vip_ddns + value: "false" + - name: vip_leaderelection + value: "true" + - name: vip_leaseduration + value: "15" + - name: vip_renewdeadline + value: "10" + - name: vip_retryperiod + value: "2" + - name: svc_enable + value: "${KUBEVIP_SVC_ENABLE=false}" + - name: lb_enable + value: "${KUBEVIP_LB_ENABLE=false}" + - name: enableServicesElection + value: "${KUBEVIP_SVC_ELECTION=false}" + securityContext: + capabilities: + add: + - NET_ADMIN + - SYS_TIME + - NET_RAW + volumeMounts: + - mountPath: /etc/kubernetes/admin.conf + name: kubeconfig + resources: {} + hostNetwork: true + hostAliases: + - hostnames: + - kubernetes + ip: 127.0.0.1 + volumes: + - name: kubeconfig + hostPath: + type: FileOrCreate + path: /etc/kubernetes/admin.conf + status: {} + owner: root:root + path: /etc/kubernetes/manifests/kube-vip.yaml + initConfiguration: + nodeRegistration: + kubeletExtraArgs: + cloud-provider: external + eviction-hard: nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<15%,memory.available<100Mi,imagefs.inodesFree<10% + tls-cipher-suites: ${TLS_CIPHER_SUITES=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256} + joinConfiguration: + nodeRegistration: + kubeletExtraArgs: + cloud-provider: external + eviction-hard: nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<15%,memory.available<100Mi,imagefs.inodesFree<10% + tls-cipher-suites: ${TLS_CIPHER_SUITES=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256} + postKubeadmCommands: + - echo export KUBECONFIG=/etc/kubernetes/admin.conf >> /root/.bashrc + - echo "after kubeadm call" > /var/log/postkubeadm.log + preKubeadmCommands: + - echo "before kubeadm call" > /var/log/prekubeadm.log + - hostnamectl set-hostname "{{ ds.meta_data.hostname }}" + useExperimentalRetryJoin: true + verbosity: 10 +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +kind: NutanixClusterTemplate +metadata: + name: ${CLUSTER_CLASS_NAME}-nct + namespace: ${NAMESPACE} +spec: + template: + spec: + failureDomains: [] +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +kind: NutanixMachineTemplate +metadata: + name: ${CLUSTER_CLASS_NAME}-cp-nmt + namespace: ${NAMESPACE} +spec: + template: + spec: + bootType: ${NUTANIX_MACHINE_BOOT_TYPE=legacy} + cluster: + name: ${NUTANIX_PRISM_ELEMENT_CLUSTER_NAME} + type: name + image: + name: ${NUTANIX_MACHINE_TEMPLATE_IMAGE_NAME} + type: name + memorySize: ${NUTANIX_MACHINE_MEMORY_SIZE=4Gi} + providerID: nutanix://${CLUSTER_NAME}-m1 + subnet: + - name: ${NUTANIX_SUBNET_NAME} + type: name + systemDiskSize: ${NUTANIX_SYSTEMDISK_SIZE=40Gi} + vcpuSockets: ${NUTANIX_MACHINE_VCPU_SOCKET=2} + vcpusPerSocket: ${NUTANIX_MACHINE_VCPU_PER_SOCKET=1} +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +kind: NutanixMachineTemplate +metadata: + name: ${CLUSTER_CLASS_NAME}-md-nmt + namespace: ${NAMESPACE} +spec: + template: + spec: + bootType: ${NUTANIX_MACHINE_BOOT_TYPE=legacy} + cluster: + name: ${NUTANIX_PRISM_ELEMENT_CLUSTER_NAME} + type: name + image: + name: ${NUTANIX_MACHINE_TEMPLATE_IMAGE_NAME} + type: name + memorySize: ${NUTANIX_MACHINE_MEMORY_SIZE=4Gi} + providerID: nutanix://${CLUSTER_NAME}-m1 + subnet: + - name: ${NUTANIX_SUBNET_NAME} + type: name + systemDiskSize: ${NUTANIX_SYSTEMDISK_SIZE=40Gi} + vcpuSockets: ${NUTANIX_MACHINE_VCPU_SOCKET=2} + vcpusPerSocket: ${NUTANIX_MACHINE_VCPU_PER_SOCKET=1} diff --git a/test/e2e/data/infrastructure-nutanix/v1beta1/no-kubeproxy/cluster-template-clusterclass/kustomization.yaml b/test/e2e/data/infrastructure-nutanix/v1beta1/no-kubeproxy/cluster-template-clusterclass/kustomization.yaml new file mode 100644 index 0000000000..351d69ce04 --- /dev/null +++ b/test/e2e/data/infrastructure-nutanix/v1beta1/no-kubeproxy/cluster-template-clusterclass/kustomization.yaml @@ -0,0 +1,5 @@ +bases: + - ../../cluster-template-clusterclass/ + +patchesStrategicMerge: + - ../no-kubeproxy-clusterclass.yaml diff --git a/test/e2e/data/infrastructure-nutanix/v1beta1/no-kubeproxy/cluster-template-failure-domains/kustomization.yaml b/test/e2e/data/infrastructure-nutanix/v1beta1/no-kubeproxy/cluster-template-failure-domains/kustomization.yaml new file mode 100644 index 0000000000..ed09d2bcda --- /dev/null +++ b/test/e2e/data/infrastructure-nutanix/v1beta1/no-kubeproxy/cluster-template-failure-domains/kustomization.yaml @@ -0,0 +1,5 @@ +bases: + - ../../cluster-template-failure-domains/ + +patchesStrategicMerge: + - ../no-kubeproxy.yaml diff --git a/test/e2e/data/infrastructure-nutanix/v1beta1/no-kubeproxy/no-kubeproxy-clusterclass.yaml b/test/e2e/data/infrastructure-nutanix/v1beta1/no-kubeproxy/no-kubeproxy-clusterclass.yaml new file mode 100644 index 0000000000..4c49b849b7 --- /dev/null +++ b/test/e2e/data/infrastructure-nutanix/v1beta1/no-kubeproxy/no-kubeproxy-clusterclass.yaml @@ -0,0 +1,22 @@ +apiVersion: controlplane.cluster.x-k8s.io/v1beta1 +kind: KubeadmControlPlaneTemplate +metadata: + name: "${CLUSTER_CLASS_NAME}-kcpt" +spec: + template: + spec: + kubeadmConfigSpec: + initConfiguration: + skipPhases: + - addon/kube-proxy +--- +apiVersion: bootstrap.cluster.x-k8s.io/v1beta1 +kind: KubeadmConfigTemplate +metadata: + name: "${CLUSTER_NAME}-kcfg-0" +spec: + template: + spec: + initConfiguration: + skipPhases: + - addon/kube-proxy diff --git a/test/e2e/data/infrastructure-nutanix/v1beta1/no-kubeproxy/no-kubeproxy.yaml b/test/e2e/data/infrastructure-nutanix/v1beta1/no-kubeproxy/no-kubeproxy.yaml index 9e2e035443..c6608b1c55 100644 --- a/test/e2e/data/infrastructure-nutanix/v1beta1/no-kubeproxy/no-kubeproxy.yaml +++ b/test/e2e/data/infrastructure-nutanix/v1beta1/no-kubeproxy/no-kubeproxy.yaml @@ -3,7 +3,6 @@ apiVersion: controlplane.cluster.x-k8s.io/v1beta1 kind: KubeadmControlPlane metadata: name: "${CLUSTER_NAME}-kcp" - namespace: "${NAMESPACE}" spec: kubeadmConfigSpec: initConfiguration: @@ -14,7 +13,6 @@ apiVersion: bootstrap.cluster.x-k8s.io/v1beta1 kind: KubeadmConfigTemplate metadata: name: "${CLUSTER_NAME}-kcfg-0" - namespace: "${NAMESPACE}" spec: template: spec: