From 1dd78f3a1ba0865cc300c1836e39beaee7668c91 Mon Sep 17 00:00:00 2001 From: "hawjia.lim" Date: Fri, 16 Jun 2023 16:06:29 +0800 Subject: [PATCH 001/173] feat: unified federated types --- ...beadmiral.io_clustercollectedstatuses.yaml | 70 ++ ...ubeadmiral.io_clusterfederatedobjects.yaml | 228 +++++++ ...admiral.io_clusterpropagationpolicies.yaml | 8 +- ...core.kubeadmiral.io_collectedstatuses.yaml | 70 ++ ...core.kubeadmiral.io_federatedclusters.yaml | 6 +- .../core.kubeadmiral.io_federatedobjects.yaml | 228 +++++++ ...e.kubeadmiral.io_federatedtypeconfigs.yaml | 174 ++--- ...re.kubeadmiral.io_propagationpolicies.yaml | 8 +- hack/generate-groups.sh | 1 - hack/typos.toml | 6 +- .../v1alpha1/extensions_federatedobject.go | 117 ++++ .../extensions_federatedtypeconfig.go | 41 +- pkg/apis/core/v1alpha1/register.go | 6 + .../core/v1alpha1/types_collectedstatus.go | 95 +++ .../core/v1alpha1/types_federatedobject.go | 254 ++++++++ .../v1alpha1/types_federatedtypeconfig.go | 117 ++-- .../core/v1alpha1/types_propagationpolicy.go | 16 +- .../core/v1alpha1/zz_generated.deepcopy.go | 614 +++++++++++++++++- pkg/apis/schedulerwebhook/v1alpha1/types.go | 2 +- pkg/apis/types/register.go | 19 - pkg/apis/types/v1alpha1/doc.go | 20 - .../types/v1alpha1/extensions_follower.go | 45 -- .../types/v1alpha1/extensions_placements.go | 120 ---- pkg/apis/types/v1alpha1/register.go | 61 -- .../v1alpha1/types_federateddeployment.go | 62 -- pkg/apis/types/v1alpha1/types_follower.go | 46 -- pkg/apis/types/v1alpha1/types_overrides.go | 60 -- .../types/v1alpha1/types_overrides_test.go | 38 -- pkg/apis/types/v1alpha1/types_placements.go | 52 -- pkg/apis/types/v1alpha1/types_status.go | 119 ---- .../types/v1alpha1/zz_generated.deepcopy.go | 464 ------------- pkg/client/clientset/versioned/clientset.go | 16 +- .../versioned/fake/clientset_generated.go | 7 - .../clientset/versioned/fake/register.go | 2 - .../clientset/versioned/scheme/register.go | 2 - .../core/v1alpha1/clustercollectedstatus.go | 152 +++++ .../core/v1alpha1/clusterfederatedobject.go | 168 +++++ .../typed/core/v1alpha1/collectedstatus.go | 162 +++++ .../typed/core/v1alpha1/core_client.go | 20 + .../fake/fake_clustercollectedstatus.go | 106 +++ .../fake/fake_clusterfederatedobject.go | 117 ++++ .../v1alpha1/fake/fake_collectedstatus.go | 114 ++++ .../core/v1alpha1/fake/fake_core_client.go | 16 + .../v1alpha1/fake/fake_federatedobject.go | 126 ++++ .../v1alpha1/fake/fake_federatedtypeconfig.go | 11 - .../typed/core/v1alpha1/federatedobject.go | 179 +++++ .../core/v1alpha1/federatedtypeconfig.go | 16 - .../core/v1alpha1/generated_expansion.go | 8 + .../versioned/typed/types/v1alpha1/doc.go | 4 - .../typed/types/v1alpha1/fake/doc.go | 4 - .../types/v1alpha1/fake/fake_types_client.go | 19 - .../types/v1alpha1/generated_expansion.go | 3 - .../typed/types/v1alpha1/types_client.go | 68 -- pkg/client/generic/scheme/register.go | 2 - .../core/v1alpha1/clustercollectedstatus.go | 73 +++ .../core/v1alpha1/clusterfederatedobject.go | 73 +++ .../core/v1alpha1/collectedstatus.go | 74 +++ .../core/v1alpha1/federatedobject.go | 74 +++ .../core/v1alpha1/interface.go | 28 + .../informers/externalversions/generic.go | 8 + .../core/v1alpha1/clustercollectedstatus.go | 52 ++ .../core/v1alpha1/clusterfederatedobject.go | 52 ++ .../listers/core/v1alpha1/collectedstatus.go | 83 +++ .../core/v1alpha1/expansion_generated.go | 24 + .../listers/core/v1alpha1/federatedobject.go | 83 +++ .../extensions/webhook/v1alpha1/adapter.go | 4 +- .../webhook/v1alpha1/adapter_test.go | 4 +- pkg/controllers/scheduler/scheduler_test.go | 2 +- pkg/controllers/scheduler/schedulingunit.go | 8 +- pkg/controllers/util/placement.go | 59 -- .../framework/policies/propagationpolicy.go | 4 +- 71 files changed, 3584 insertions(+), 1610 deletions(-) create mode 100644 config/crds/core.kubeadmiral.io_clustercollectedstatuses.yaml create mode 100644 config/crds/core.kubeadmiral.io_clusterfederatedobjects.yaml create mode 100644 config/crds/core.kubeadmiral.io_collectedstatuses.yaml create mode 100644 config/crds/core.kubeadmiral.io_federatedobjects.yaml create mode 100644 pkg/apis/core/v1alpha1/extensions_federatedobject.go create mode 100644 pkg/apis/core/v1alpha1/types_collectedstatus.go create mode 100644 pkg/apis/core/v1alpha1/types_federatedobject.go delete mode 100644 pkg/apis/types/register.go delete mode 100644 pkg/apis/types/v1alpha1/doc.go delete mode 100644 pkg/apis/types/v1alpha1/extensions_follower.go delete mode 100644 pkg/apis/types/v1alpha1/extensions_placements.go delete mode 100644 pkg/apis/types/v1alpha1/register.go delete mode 100644 pkg/apis/types/v1alpha1/types_federateddeployment.go delete mode 100644 pkg/apis/types/v1alpha1/types_follower.go delete mode 100644 pkg/apis/types/v1alpha1/types_overrides.go delete mode 100644 pkg/apis/types/v1alpha1/types_overrides_test.go delete mode 100644 pkg/apis/types/v1alpha1/types_placements.go delete mode 100644 pkg/apis/types/v1alpha1/types_status.go delete mode 100644 pkg/apis/types/v1alpha1/zz_generated.deepcopy.go create mode 100644 pkg/client/clientset/versioned/typed/core/v1alpha1/clustercollectedstatus.go create mode 100644 pkg/client/clientset/versioned/typed/core/v1alpha1/clusterfederatedobject.go create mode 100644 pkg/client/clientset/versioned/typed/core/v1alpha1/collectedstatus.go create mode 100644 pkg/client/clientset/versioned/typed/core/v1alpha1/fake/fake_clustercollectedstatus.go create mode 100644 pkg/client/clientset/versioned/typed/core/v1alpha1/fake/fake_clusterfederatedobject.go create mode 100644 pkg/client/clientset/versioned/typed/core/v1alpha1/fake/fake_collectedstatus.go create mode 100644 pkg/client/clientset/versioned/typed/core/v1alpha1/fake/fake_federatedobject.go create mode 100644 pkg/client/clientset/versioned/typed/core/v1alpha1/federatedobject.go delete mode 100644 pkg/client/clientset/versioned/typed/types/v1alpha1/doc.go delete mode 100644 pkg/client/clientset/versioned/typed/types/v1alpha1/fake/doc.go delete mode 100644 pkg/client/clientset/versioned/typed/types/v1alpha1/fake/fake_types_client.go delete mode 100644 pkg/client/clientset/versioned/typed/types/v1alpha1/generated_expansion.go delete mode 100644 pkg/client/clientset/versioned/typed/types/v1alpha1/types_client.go create mode 100644 pkg/client/informers/externalversions/core/v1alpha1/clustercollectedstatus.go create mode 100644 pkg/client/informers/externalversions/core/v1alpha1/clusterfederatedobject.go create mode 100644 pkg/client/informers/externalversions/core/v1alpha1/collectedstatus.go create mode 100644 pkg/client/informers/externalversions/core/v1alpha1/federatedobject.go create mode 100644 pkg/client/listers/core/v1alpha1/clustercollectedstatus.go create mode 100644 pkg/client/listers/core/v1alpha1/clusterfederatedobject.go create mode 100644 pkg/client/listers/core/v1alpha1/collectedstatus.go create mode 100644 pkg/client/listers/core/v1alpha1/federatedobject.go delete mode 100644 pkg/controllers/util/placement.go diff --git a/config/crds/core.kubeadmiral.io_clustercollectedstatuses.yaml b/config/crds/core.kubeadmiral.io_clustercollectedstatuses.yaml new file mode 100644 index 00000000..5159d603 --- /dev/null +++ b/config/crds/core.kubeadmiral.io_clustercollectedstatuses.yaml @@ -0,0 +1,70 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.11.1 + creationTimestamp: null + name: clustercollectedstatuses.core.kubeadmiral.io +spec: + group: core.kubeadmiral.io + names: + kind: ClusterCollectedStatus + listKind: ClusterCollectedStatusList + plural: clustercollectedstatuses + shortNames: + - ccs + singular: clustercollectedstatus + scope: Cluster + versions: + - name: v1alpha1 + schema: + openAPIV3Schema: + description: ClusterCollectedStatus stores the collected fields of Kubernetes + objects from member clusters, that are propagated by a ClusterFederatedObject. + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + clusters: + description: Clusters is the list of member clusters and collected fields + for its propagated Kubernetes object. + items: + description: CollectedFieldsWithCluster stores the collected fields + of a Kubernetes object in a member cluster. + properties: + cluster: + description: Cluster is the name of the member cluster. + type: string + collectedFields: + description: CollectedFields is the the set of fields collected + for the Kubernetes object. + x-kubernetes-preserve-unknown-fields: true + error: + description: Error records any errors encountered while collecting + fields from the cluster. + type: string + required: + - cluster + - collectedFields + type: object + type: array + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + lastUpdateTime: + description: LastUpdateTime is the last time that a collection was performed. + format: date-time + type: string + metadata: + type: object + required: + - clusters + - lastUpdateTime + type: object + served: true + storage: true diff --git a/config/crds/core.kubeadmiral.io_clusterfederatedobjects.yaml b/config/crds/core.kubeadmiral.io_clusterfederatedobjects.yaml new file mode 100644 index 00000000..33dade49 --- /dev/null +++ b/config/crds/core.kubeadmiral.io_clusterfederatedobjects.yaml @@ -0,0 +1,228 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.11.1 + creationTimestamp: null + name: clusterfederatedobjects.core.kubeadmiral.io +spec: + group: core.kubeadmiral.io + names: + kind: ClusterFederatedObject + listKind: ClusterFederatedObjectList + plural: clusterfederatedobjects + shortNames: + - cfo + singular: clusterfederatedobject + scope: Cluster + versions: + - name: v1alpha1 + schema: + openAPIV3Schema: + description: ClusterFederatedObject describes a cluster-scoped Kubernetes + object and how it should be propagated to different member clusters. + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: Spec defines the desired behavior of the ClusterFederatedObject. + properties: + follows: + description: Follows defines other objects, or "leaders", that the + Kubernetes object should follow during propagation, i.e. the Kubernetes + object should be propagated to all member clusters that its "leaders" + are placed in. + items: + description: LeaderReference contains the identifying metadata of + a "leader" Kubernetes object. + properties: + group: + type: string + kind: + type: string + name: + type: string + namespace: + type: string + required: + - kind + - name + type: object + type: array + overrides: + description: Overrides describe the overrides that should be applied + to the base template of the Kubernetes object before it is propagated + to individual member clusters. + items: + description: OverrideWithController describes the overrides that + will be applied to a Kubernetes object before it is propagated + to individual member clusters. + properties: + clusters: + description: Override is the list of member clusters and their + respective override patches. + items: + description: ClusterReferenceWithPatches represents a single + member cluster and a list of override patches for the cluster. + properties: + cluster: + description: Cluster is the name of the member cluster. + type: string + patches: + description: Patches is the list of override patches for + the member cluster. + items: + description: OverridePatch defines a JSON patch. + properties: + op: + type: string + path: + type: string + value: + x-kubernetes-preserve-unknown-fields: true + required: + - path + type: object + type: array + required: + - cluster + type: object + type: array + controller: + description: Controller identifies the controller responsible + for this override. + type: string + required: + - clusters + - controller + type: object + type: array + placements: + description: Placements describe the member clusters that the Kubernetes + object will be propagated to, which is a union of all the listed + clusters. + items: + description: PlacementWithController describes the member clusters + that a Kubernetes object should be propagated to. + properties: + controller: + description: Controller identifies the controller responsible + for this placement. + type: string + placement: + description: Placement is the list of member clusters that the + Kubernetes object should be propagated to. + items: + description: ClusterReference represents a single member cluster. + properties: + cluster: + description: Cluster is the name of the member cluster. + type: string + required: + - cluster + type: object + type: array + required: + - controller + - placement + type: object + type: array + template: + description: Template is the base template of the Kubernetes object + to be propagated. + x-kubernetes-preserve-unknown-fields: true + required: + - follows + - overrides + - placements + - template + type: object + status: + description: Status describes the most recently observed status of the + ClusterFederatedObject. + properties: + clusters: + description: Clusters contains the propagation status of the Kubernetes + object for individual member clusters. + items: + description: PropagationStatus describes the propagation of a Kubernetes + object to a given member cluster. + properties: + cluster: + description: Cluster is the name of the member cluster. + type: string + lastObservedGeneration: + description: LastObservedGeneration is the last observed generation + of the Kubernetes object in the member cluster. + format: int64 + type: integer + status: + description: Status describes the current status of propagating + the Kubernetes object to the member cluster. + type: string + required: + - cluster + - status + type: object + type: array + collisionCount: + description: CollisionCount can be used in conjunction with RevisionHistory + to implement rollbacks. + format: int32 + type: integer + conditions: + description: Conditions describe the current state of this FederatedObject. + items: + description: GenericFederatedObjectCondition contains the current + details about a particular condition of a FederatedObject. + properties: + lastTransitionTime: + description: LastTransitionTime is the last time the status + of this condition changed. + format: date-time + type: string + lastUpdateTime: + description: LastUpdateTime is the last time a reconciliation + for this condition occurred. + format: date-time + type: string + reason: + description: Reason is the reason for the last status change + of this condition. + type: string + status: + description: Status is the status of the condition, one of True, + False or Unknown. + type: string + type: + description: Type is the type of the condition. + type: string + required: + - status + - type + type: object + type: array + syncedGeneration: + description: SyncedGeneration is the generation of this FederatedObject + when it was last synced to selected member clusters. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} diff --git a/config/crds/core.kubeadmiral.io_clusterpropagationpolicies.yaml b/config/crds/core.kubeadmiral.io_clusterpropagationpolicies.yaml index 9c4a19fd..828a179f 100644 --- a/config/crds/core.kubeadmiral.io_clusterpropagationpolicies.yaml +++ b/config/crds/core.kubeadmiral.io_clusterpropagationpolicies.yaml @@ -124,13 +124,13 @@ spec: description: DisableFollowerScheduling is a boolean that determines if follower scheduling is disabled. Resources that depend on other resources (e.g. deployments) are called leaders, and resources that are depended on (e.g. configmaps and secrets) are called followers. If a leader enables follower scheduling, its followers will additionally be scheduled to clusters where the leader is scheduled. type: boolean maxClusters: - description: MaxClusters is the maximum number of replicas that the federated object can be propagated to The maximum number of clusters is unbounded if no value is provided. + description: MaxClusters is the maximum number of replicas that the federated object can be propagated to. The maximum number of clusters is unbounded if no value is provided. format: int64 type: integer placement: - description: Placement is an explicit list of clusters used to select member clusters to propagate resources + description: Placement is an explicit list of clusters used to select member clusters to propagate resources to. items: - description: Placement describes a cluster that a federated object can be propagated to and its propagation preferences. + description: DesiredPlacement describes a cluster that a federated object can be propagated to and its propagation preferences. properties: cluster: description: Cluster is the name of the FederatedCluster to propagate to. @@ -180,7 +180,7 @@ spec: description: StickyCluster determines if a federated object can be rescheduled. type: boolean tolerations: - description: Tolerations describe a set of cluster taints that the policy tolerates + description: Tolerations describe a set of cluster taints that the policy tolerates. items: description: The pod this Toleration is attached to tolerates any taint that matches the triple using the matching operator . properties: diff --git a/config/crds/core.kubeadmiral.io_collectedstatuses.yaml b/config/crds/core.kubeadmiral.io_collectedstatuses.yaml new file mode 100644 index 00000000..815d4ed2 --- /dev/null +++ b/config/crds/core.kubeadmiral.io_collectedstatuses.yaml @@ -0,0 +1,70 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.11.1 + creationTimestamp: null + name: collectedstatuses.core.kubeadmiral.io +spec: + group: core.kubeadmiral.io + names: + kind: CollectedStatus + listKind: CollectedStatusList + plural: collectedstatuses + shortNames: + - cs + singular: collectedstatus + scope: Namespaced + versions: + - name: v1alpha1 + schema: + openAPIV3Schema: + description: CollectedStatus stores the collected fields of Kubernetes objects + from member clusters, that are propagated by a FederatedObject. + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + clusters: + description: Clusters is the list of member clusters and collected fields + for its propagated Kubernetes object. + items: + description: CollectedFieldsWithCluster stores the collected fields + of a Kubernetes object in a member cluster. + properties: + cluster: + description: Cluster is the name of the member cluster. + type: string + collectedFields: + description: CollectedFields is the the set of fields collected + for the Kubernetes object. + x-kubernetes-preserve-unknown-fields: true + error: + description: Error records any errors encountered while collecting + fields from the cluster. + type: string + required: + - cluster + - collectedFields + type: object + type: array + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + lastUpdateTime: + description: LastUpdateTime is the last time that a collection was performed. + format: date-time + type: string + metadata: + type: object + required: + - clusters + - lastUpdateTime + type: object + served: true + storage: true diff --git a/config/crds/core.kubeadmiral.io_federatedclusters.yaml b/config/crds/core.kubeadmiral.io_federatedclusters.yaml index bfd0853f..be926b74 100644 --- a/config/crds/core.kubeadmiral.io_federatedclusters.yaml +++ b/config/crds/core.kubeadmiral.io_federatedclusters.yaml @@ -107,15 +107,13 @@ spec: description: The list of api resource types defined in the federated cluster items: - description: APIResource defines how to configure the dynamic client - for an API resource. + description: APIResource represents a Kubernetes API resource. properties: group: description: Group of the resource. type: string kind: - description: Camel-cased singular name of the resource (e.g. - ConfigMap) + description: Kind of the resource. type: string pluralName: description: Lower-cased plural name of the resource (e.g. configmaps). If diff --git a/config/crds/core.kubeadmiral.io_federatedobjects.yaml b/config/crds/core.kubeadmiral.io_federatedobjects.yaml new file mode 100644 index 00000000..46110d16 --- /dev/null +++ b/config/crds/core.kubeadmiral.io_federatedobjects.yaml @@ -0,0 +1,228 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.11.1 + creationTimestamp: null + name: federatedobjects.core.kubeadmiral.io +spec: + group: core.kubeadmiral.io + names: + kind: FederatedObject + listKind: FederatedObjectList + plural: federatedobjects + shortNames: + - fo + singular: federatedobject + scope: Namespaced + versions: + - name: v1alpha1 + schema: + openAPIV3Schema: + description: FederatedObject describes a namespace-scoped Kubernetes object + and how it should be propagated to different member clusters. + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: Spec defines the desired behavior of the FederatedObject. + properties: + follows: + description: Follows defines other objects, or "leaders", that the + Kubernetes object should follow during propagation, i.e. the Kubernetes + object should be propagated to all member clusters that its "leaders" + are placed in. + items: + description: LeaderReference contains the identifying metadata of + a "leader" Kubernetes object. + properties: + group: + type: string + kind: + type: string + name: + type: string + namespace: + type: string + required: + - kind + - name + type: object + type: array + overrides: + description: Overrides describe the overrides that should be applied + to the base template of the Kubernetes object before it is propagated + to individual member clusters. + items: + description: OverrideWithController describes the overrides that + will be applied to a Kubernetes object before it is propagated + to individual member clusters. + properties: + clusters: + description: Override is the list of member clusters and their + respective override patches. + items: + description: ClusterReferenceWithPatches represents a single + member cluster and a list of override patches for the cluster. + properties: + cluster: + description: Cluster is the name of the member cluster. + type: string + patches: + description: Patches is the list of override patches for + the member cluster. + items: + description: OverridePatch defines a JSON patch. + properties: + op: + type: string + path: + type: string + value: + x-kubernetes-preserve-unknown-fields: true + required: + - path + type: object + type: array + required: + - cluster + type: object + type: array + controller: + description: Controller identifies the controller responsible + for this override. + type: string + required: + - clusters + - controller + type: object + type: array + placements: + description: Placements describe the member clusters that the Kubernetes + object will be propagated to, which is a union of all the listed + clusters. + items: + description: PlacementWithController describes the member clusters + that a Kubernetes object should be propagated to. + properties: + controller: + description: Controller identifies the controller responsible + for this placement. + type: string + placement: + description: Placement is the list of member clusters that the + Kubernetes object should be propagated to. + items: + description: ClusterReference represents a single member cluster. + properties: + cluster: + description: Cluster is the name of the member cluster. + type: string + required: + - cluster + type: object + type: array + required: + - controller + - placement + type: object + type: array + template: + description: Template is the base template of the Kubernetes object + to be propagated. + x-kubernetes-preserve-unknown-fields: true + required: + - follows + - overrides + - placements + - template + type: object + status: + description: Status describes the most recently observed status of the + FederatedObject. + properties: + clusters: + description: Clusters contains the propagation status of the Kubernetes + object for individual member clusters. + items: + description: PropagationStatus describes the propagation of a Kubernetes + object to a given member cluster. + properties: + cluster: + description: Cluster is the name of the member cluster. + type: string + lastObservedGeneration: + description: LastObservedGeneration is the last observed generation + of the Kubernetes object in the member cluster. + format: int64 + type: integer + status: + description: Status describes the current status of propagating + the Kubernetes object to the member cluster. + type: string + required: + - cluster + - status + type: object + type: array + collisionCount: + description: CollisionCount can be used in conjunction with RevisionHistory + to implement rollbacks. + format: int32 + type: integer + conditions: + description: Conditions describe the current state of this FederatedObject. + items: + description: GenericFederatedObjectCondition contains the current + details about a particular condition of a FederatedObject. + properties: + lastTransitionTime: + description: LastTransitionTime is the last time the status + of this condition changed. + format: date-time + type: string + lastUpdateTime: + description: LastUpdateTime is the last time a reconciliation + for this condition occurred. + format: date-time + type: string + reason: + description: Reason is the reason for the last status change + of this condition. + type: string + status: + description: Status is the status of the condition, one of True, + False or Unknown. + type: string + type: + description: Type is the type of the condition. + type: string + required: + - status + - type + type: object + type: array + syncedGeneration: + description: SyncedGeneration is the generation of this FederatedObject + when it was last synced to selected member clusters. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} diff --git a/config/crds/core.kubeadmiral.io_federatedtypeconfigs.yaml b/config/crds/core.kubeadmiral.io_federatedtypeconfigs.yaml index 44ed05d5..c38a577f 100644 --- a/config/crds/core.kubeadmiral.io_federatedtypeconfigs.yaml +++ b/config/crds/core.kubeadmiral.io_federatedtypeconfigs.yaml @@ -20,8 +20,8 @@ spec: - name: v1alpha1 schema: openAPIV3Schema: - description: FederatedTypeConfig is the Schema for the federatedtypeconfigs - API + description: FederatedTypeConfig specifies an API resource type to federate + and various type-specific options. properties: apiVersion: description: 'APIVersion defines the versioned schema of this representation @@ -38,54 +38,29 @@ spec: spec: properties: autoMigration: - description: Configurations for auto migration. + description: Configuration for AutoMigration. If left empty, the AutoMigration + feature will be disabled. properties: enabled: - description: Whether or not to enable auto migration. + description: Whether or not to automatically migrate unschedulable + pods to a different cluster. type: boolean required: - enabled type: object controllers: - description: The controllers that must run before the resource can - be propagated to member clusters. Each inner slice specifies a step. - Step T must complete before step T+1 can commence. Controllers within - each step can execute in parallel. + description: The controllers that must run before the source object + can be propagated to member clusters. Each inner slice specifies + a step. Step T must complete before step T+1 can commence. Controllers + within each step can execute in parallel. items: items: type: string type: array type: array - federatedType: - description: Configuration for the federated type that defines (via - template, placement and overrides fields) how the target type should - appear in multiple cluster. - properties: - group: - description: Group of the resource. - type: string - kind: - description: Camel-cased singular name of the resource (e.g. ConfigMap) - type: string - pluralName: - description: Lower-cased plural name of the resource (e.g. configmaps). If - not provided, it will be computed by lower-casing the kind and - suffixing an 's'. - type: string - scope: - description: Scope of the resource. - type: string - version: - description: Version of the resource. - type: string - required: - - kind - - pluralName - - scope - - version - type: object pathDefinition: - description: Defines the paths in the target object schema. + description: Defines the paths to various fields in the target object's + schema. properties: availableReplicasStatus: description: Path to a numeric field that reflects the number @@ -114,22 +89,35 @@ spec: type: string type: object revisionHistory: - description: Whether or not keep revisionHistory for the federatedType - resource - type: string + description: Configuration for RevisionHistory. If left empty, the + RevisionHistory feature will be disabled. + properties: + enabled: + description: Whether or not preserve a RevisionHistory for the + federated object during updates. + type: boolean + required: + - enabled + type: object rolloutPlan: - description: Whether or not to plan the rollout process - type: string + description: Configuration for RolloutPlan. If left empty, the RolloutPlan + feature will be disabled. + properties: + enabled: + description: Whether or not to synchronize the rollout process + across clusters. + type: boolean + required: + - enabled + type: object sourceType: - description: The configuration of the source type. If set, each object - of the source type will be federated to object of the federated - type with the same name and namespace. + description: The API resource type to be federated. properties: group: description: Group of the resource. type: string kind: - description: Camel-cased singular name of the resource (e.g. ConfigMap) + description: Kind of the resource. type: string pluralName: description: Lower-cased plural name of the resource (e.g. configmaps). If @@ -149,89 +137,37 @@ spec: - version type: object statusAggregation: - description: Whether or not Status should be aggregated to source - type object - type: string + description: Configuration for StatusAggregation. If left empty, the + StatusAggregation feature will be disabled. + properties: + enabled: + description: Whether or not to enable status aggregation. + type: boolean + required: + - enabled + type: object statusCollection: - description: Whether or not Status object should be populated. + description: Configuration for StatusCollection. If left empty, the + StatusCollection feature will be disabled. properties: + enabled: + description: Whether or not to enable status collection. + type: boolean fields: + description: Contains the fields to be collected during status + collection. Each field is a dot separated string that corresponds + to its path in the source object's schema. E.g. `metadata.creationTimestamp`. items: type: string type: array - type: object - statusType: - description: Configuration for the status type that holds information - about which type holds the status of the federated resource. If - not provided, the group and version will default to those provided - for the federated type api resource. - properties: - group: - description: Group of the resource. - type: string - kind: - description: Camel-cased singular name of the resource (e.g. ConfigMap) - type: string - pluralName: - description: Lower-cased plural name of the resource (e.g. configmaps). If - not provided, it will be computed by lower-casing the kind and - suffixing an 's'. - type: string - scope: - description: Scope of the resource. - type: string - version: - description: Version of the resource. - type: string - required: - - kind - - pluralName - - scope - - version - type: object - targetType: - description: The configuration of the target type. If not set, the - pluralName and groupName fields will be set from the metadata.name - of this resource. The kind field must be set. - properties: - group: - description: Group of the resource. - type: string - kind: - description: Camel-cased singular name of the resource (e.g. ConfigMap) - type: string - pluralName: - description: Lower-cased plural name of the resource (e.g. configmaps). If - not provided, it will be computed by lower-casing the kind and - suffixing an 's'. - type: string - scope: - description: Scope of the resource. - type: string - version: - description: Version of the resource. - type: string required: - - kind - - pluralName - - scope - - version + - enabled type: object required: - - federatedType - - targetType - type: object - status: - description: FederatedTypeConfigStatus defines the observed state of FederatedTypeConfig - properties: - observedGeneration: - description: ObservedGeneration is the generation as observed by the - controller consuming the FederatedTypeConfig. - format: int64 - type: integer - required: - - observedGeneration + - sourceType type: object + required: + - spec type: object served: true storage: true diff --git a/config/crds/core.kubeadmiral.io_propagationpolicies.yaml b/config/crds/core.kubeadmiral.io_propagationpolicies.yaml index a936c82e..780b1563 100644 --- a/config/crds/core.kubeadmiral.io_propagationpolicies.yaml +++ b/config/crds/core.kubeadmiral.io_propagationpolicies.yaml @@ -124,13 +124,13 @@ spec: description: DisableFollowerScheduling is a boolean that determines if follower scheduling is disabled. Resources that depend on other resources (e.g. deployments) are called leaders, and resources that are depended on (e.g. configmaps and secrets) are called followers. If a leader enables follower scheduling, its followers will additionally be scheduled to clusters where the leader is scheduled. type: boolean maxClusters: - description: MaxClusters is the maximum number of replicas that the federated object can be propagated to The maximum number of clusters is unbounded if no value is provided. + description: MaxClusters is the maximum number of replicas that the federated object can be propagated to. The maximum number of clusters is unbounded if no value is provided. format: int64 type: integer placement: - description: Placement is an explicit list of clusters used to select member clusters to propagate resources + description: Placement is an explicit list of clusters used to select member clusters to propagate resources to. items: - description: Placement describes a cluster that a federated object can be propagated to and its propagation preferences. + description: DesiredPlacement describes a cluster that a federated object can be propagated to and its propagation preferences. properties: cluster: description: Cluster is the name of the FederatedCluster to propagate to. @@ -180,7 +180,7 @@ spec: description: StickyCluster determines if a federated object can be rescheduled. type: boolean tolerations: - description: Tolerations describe a set of cluster taints that the policy tolerates + description: Tolerations describe a set of cluster taints that the policy tolerates. items: description: The pod this Toleration is attached to tolerates any taint that matches the triple using the matching operator . properties: diff --git a/hack/generate-groups.sh b/hack/generate-groups.sh index 2696a43e..5d0fd12a 100644 --- a/hack/generate-groups.sh +++ b/hack/generate-groups.sh @@ -30,7 +30,6 @@ YQ_VERSION=${YQ_VERSION:-"v4.33.1"} MODULE_NAME=${MODULE_NAME:-"github.com/kubewharf/kubeadmiral"} groups=( core/v1alpha1 - types/v1alpha1 ) # install code-generator binaries diff --git a/hack/typos.toml b/hack/typos.toml index f878a8ac..f3299954 100644 --- a/hack/typos.toml +++ b/hack/typos.toml @@ -1,5 +1,9 @@ [files] -extend-exclude = ["go.mod", "go.sum"] +extend-exclude = ["go.mod", "go.sum", "config/crds/"] + +[default] +extend-ignore-re = ["shortName=[a-z]+"] [default.extend-identifiers] ANDed = "ANDed" + diff --git a/pkg/apis/core/v1alpha1/extensions_federatedobject.go b/pkg/apis/core/v1alpha1/extensions_federatedobject.go new file mode 100644 index 00000000..af48d8f5 --- /dev/null +++ b/pkg/apis/core/v1alpha1/extensions_federatedobject.go @@ -0,0 +1,117 @@ +/* +Copyright 2023 The KubeAdmiral Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + "reflect" + "sort" + + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/util/sets" +) + +// Placement extensions + +// GetPlacementUnion returns the union of all clusters listed under the Placement field of the FederatedObject. +func (o *FederatedObject) GetPlacementUnion() sets.Set[string] { + set := sets.New[string]() + for _, placement := range o.Spec.Placements { + for _, cluster := range placement.Placement { + set.Insert(cluster.Cluster) + } + } + return set +} + +// GetControllerPlacement returns the slice containing all the ClusterPlacements from a given controller. Returns nil if +// the controller is not present. +func (o *FederatedObject) GetControllerPlacement(controller string) []ClusterReference { + for _, placement := range o.Spec.Placements { + if placement.Controller == controller { + return placement.Placement + } + } + return nil +} + +// SetControllerPlacement sets the ClusterPlacements for a given controller. If clusterNames is nil or empty, the previous +// placement for the given controller will be deleted. Returns a bool indicating if the FederatedObject has changed. +func (o *FederatedObject) SetControllerPlacement(controller string, clusterNames []string) bool { + if len(clusterNames) == 0 { + return o.DeleteControllerPlacement(controller) + } + + newPlacement := make([]ClusterReference, len(clusterNames)) + for i, name := range clusterNames { + newPlacement[i] = ClusterReference{Cluster: name} + } + // sort the clusters by name for readability and to avoid unnecessary updates + sort.Slice(newPlacement, func(i, j int) bool { + return newPlacement[i].Cluster < newPlacement[j].Cluster + }) + + oldPlacementWithControllerIdx := -1 + for i := range o.Spec.Placements { + if o.Spec.Placements[i].Controller == controller { + oldPlacementWithControllerIdx = i + break + } + } + + newPlacmentWithController := PlacementWithController{ + Controller: controller, + Placement: newPlacement, + } + if oldPlacementWithControllerIdx == -1 { + o.Spec.Placements = append(o.Spec.Placements, newPlacmentWithController) + return true + } + if !reflect.DeepEqual(newPlacmentWithController, o.Spec.Placements[oldPlacementWithControllerIdx]) { + o.Spec.Placements[oldPlacementWithControllerIdx] = newPlacmentWithController + return true + } + + return false +} + +// DeleteClusterPlacement deletes a controller's placement, returning a bool to indicate if the FederatedObject has +// changed. +func (o *FederatedObject) DeleteControllerPlacement(controller string) bool { + oldPlacementIdx := -1 + for i := range o.Spec.Placements { + if o.Spec.Placements[i].Controller == controller { + oldPlacementIdx = i + break + } + } + + if oldPlacementIdx == -1 { + return false + } + + o.Spec.Placements = append(o.Spec.Placements[:oldPlacementIdx], o.Spec.Placements[(oldPlacementIdx+1):]...) + return true +} + +// Follower extensions + +func (l *LeaderReference) GroupKind() schema.GroupKind { + return schema.GroupKind{ + Group: l.Group, + Kind: l.Kind, + } +} diff --git a/pkg/apis/core/v1alpha1/extensions_federatedtypeconfig.go b/pkg/apis/core/v1alpha1/extensions_federatedtypeconfig.go index 901d0d3a..a2e3b3b5 100644 --- a/pkg/apis/core/v1alpha1/extensions_federatedtypeconfig.go +++ b/pkg/apis/core/v1alpha1/extensions_federatedtypeconfig.go @@ -33,63 +33,36 @@ func (f *FederatedTypeConfig) GetObjectMeta() metav1.ObjectMeta { return f.ObjectMeta } -func (f *FederatedTypeConfig) GetTargetType() metav1.APIResource { - return apiResourceToMeta(f.Spec.TargetType) -} - func (f *FederatedTypeConfig) GetNamespaced() bool { - return f.Spec.TargetType.Namespaced() + return f.Spec.SourceType.Namespaced() } func (f *FederatedTypeConfig) GetPropagationEnabled() bool { return true } -func (f *FederatedTypeConfig) GetFederatedType() metav1.APIResource { - return apiResourceToMeta(f.Spec.FederatedType) -} - -func (f *FederatedTypeConfig) GetStatusType() *metav1.APIResource { - if f.Spec.StatusType == nil { - return nil - } - metaAPIResource := apiResourceToMeta(*f.Spec.StatusType) - return &metaAPIResource -} - -func (f *FederatedTypeConfig) GetSourceType() *metav1.APIResource { - if f.Spec.SourceType == nil { - return nil - } - meta := apiResourceToMeta(*f.Spec.SourceType) - return &meta +func (f *FederatedTypeConfig) GetSourceType() metav1.APIResource { + return apiResourceToMeta(f.Spec.SourceType) } -func (f *FederatedTypeConfig) GetStatusEnabled() bool { +func (f *FederatedTypeConfig) GetStatusCollectionEnabled() bool { return f.Spec.StatusCollection != nil } func (f *FederatedTypeConfig) GetStatusAggregationEnabled() bool { - return f.Spec.StatusAggregation != nil && - *f.Spec.StatusAggregation == StatusAggregationEnabled + return f.Spec.StatusAggregation != nil && f.Spec.StatusAggregation.Enabled } func (f *FederatedTypeConfig) GetPolicyRcEnabled() bool { return true // TODO: should this be configurable? } -func (f *FederatedTypeConfig) GetFederateEnabled() bool { - return f.Spec.SourceType != nil -} - func (f *FederatedTypeConfig) GetRevisionHistoryEnabled() bool { - return f.Spec.RevisionHistory != nil && - *f.Spec.RevisionHistory == RevisionHistoryEnabled + return f.Spec.RevisionHistory != nil && f.Spec.RevisionHistory.Enabled } func (f *FederatedTypeConfig) GetRolloutPlanEnabled() bool { - return f.Spec.RolloutPlan != nil && - *f.Spec.RolloutPlan == RolloutPlanEnabled + return f.Spec.RolloutPlan != nil && f.Spec.RolloutPlan.Enabled } func (f *FederatedTypeConfig) GetControllers() [][]string { diff --git a/pkg/apis/core/v1alpha1/register.go b/pkg/apis/core/v1alpha1/register.go index 275677e9..41cde200 100644 --- a/pkg/apis/core/v1alpha1/register.go +++ b/pkg/apis/core/v1alpha1/register.go @@ -53,6 +53,12 @@ func init() { // Adds the list of known types to api.Scheme. func addKnownTypes(scheme *runtime.Scheme) error { scheme.AddKnownTypes(SchemeGroupVersion, + &FederatedObject{}, + &FederatedObjectList{}, + &ClusterFederatedObject{}, + &ClusterFederatedObjectList{}, + &CollectedStatus{}, + &CollectedStatusList{}, &FederatedCluster{}, &FederatedClusterList{}, &FederatedTypeConfig{}, diff --git a/pkg/apis/core/v1alpha1/types_collectedstatus.go b/pkg/apis/core/v1alpha1/types_collectedstatus.go new file mode 100644 index 00000000..ec59748b --- /dev/null +++ b/pkg/apis/core/v1alpha1/types_collectedstatus.go @@ -0,0 +1,95 @@ +/* +Copyright 2023 The KubeAdmiral Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +kubebuilder:validation:Required +// +kubebuilder:resource:path=collectedstatuses,shortName=cs,singular=collectedstatus +// +kubebuilder:object:root=true + +// CollectedStatus stores the collected fields of Kubernetes objects from member clusters, that are propagated by a +// FederatedObject. +type CollectedStatus struct { + metav1.TypeMeta `json:",inline"` + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + metav1.ObjectMeta `json:"metadata,omitempty"` + + GenericCollectedStatus `json:",inline"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +kubebuilder:object:root=true + +// CollectedStatusList contains a list of CollectedStatuses. +type CollectedStatusList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []CollectedStatus `json:"items"` +} + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +kubebuilder:validation:Required +// +kubebuilder:resource:path=clustercollectedstatuses,shortName=ccs,singular=clustercollectedstatus,scope=Cluster +// +kubebuilder:object:root=true + +// ClusterCollectedStatus stores the collected fields of Kubernetes objects from member clusters, that are propagated by +// a ClusterFederatedObject. +type ClusterCollectedStatus struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + GenericCollectedStatus `json:",inline"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ClusterCollectedStatusList contains a list of ClusterCollectedStatuses. +type ClusterCollectedStatusList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []ClusterCollectedStatus `json:"items"` +} + +// GenericCollectedStatus contains the shared fields of CollectedStatus and ClusterCollectedStatus +type GenericCollectedStatus struct { + // Clusters is the list of member clusters and collected fields for its propagated Kubernetes object. + Clusters []CollectedFieldsWithCluster `json:"clusters"` + + // LastUpdateTime is the last time that a collection was performed. + LastUpdateTime metav1.Time `json:"lastUpdateTime"` +} + +// CollectedFieldsWithCluster stores the collected fields of a Kubernetes object in a member cluster. +type CollectedFieldsWithCluster struct { + // Cluster is the name of the member cluster. + Cluster string `json:"cluster"` + // CollectedFields is the the set of fields collected for the Kubernetes object. + CollectedFields apiextensionsv1.JSON `json:"collectedFields"` + // Error records any errors encountered while collecting fields from the cluster. + // +optional + Error string `json:"error,omitempty"` +} diff --git a/pkg/apis/core/v1alpha1/types_federatedobject.go b/pkg/apis/core/v1alpha1/types_federatedobject.go new file mode 100644 index 00000000..2ceb348c --- /dev/null +++ b/pkg/apis/core/v1alpha1/types_federatedobject.go @@ -0,0 +1,254 @@ +/* +Copyright 2023 The KubeAdmiral Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + corev1 "k8s.io/api/core/v1" + apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +kubebuilder:validation:Required +// +kubebuilder:resource:path=federatedobjects,shortName=fo,singular=federatedobject +// +kubebuilder:subresource:status +// +kubebuilder:object:root=true + +// FederatedObject describes a namespace-scoped Kubernetes object and how it should be propagated to different member +// clusters. +type FederatedObject struct { + metav1.TypeMeta `json:",inline"` + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + metav1.ObjectMeta `json:"metadata,omitempty"` + + // Spec defines the desired behavior of the FederatedObject. + Spec GenericFederatedObjectSpec `json:"spec"` + + // Status describes the most recently observed status of the FederatedObject. + // +optional + Status GenericFederatedObjectStatus `json:"status,omitempty"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +kubebuilder:object:root=true + +// FederatedObjectList contains a list of FederatedObject. +type FederatedObjectList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []FederatedObject `json:"items"` +} + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +kubebuilder:validation:Required +// +kubebuilder:resource:path=clusterfederatedobjects,shortName=cfo,singular=clusterfederatedobject,scope=Cluster +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// ClusterFederatedObject describes a cluster-scoped Kubernetes object and how it should be propagated to different +// member clusters. +type ClusterFederatedObject struct { + metav1.TypeMeta `json:",inline"` + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + metav1.ObjectMeta `json:"metadata,omitempty"` + + // Spec defines the desired behavior of the ClusterFederatedObject. + Spec GenericFederatedObjectSpec `json:"spec"` + + // Status describes the most recently observed status of the ClusterFederatedObject. + // +optional + Status GenericFederatedObjectStatus `json:"status,omitempty"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ClusterFederatedObjectList contains a list of ClusterFederatedObject. +type ClusterFederatedObjectList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []ClusterFederatedObject `json:"items"` +} + +// GenericFederatedObjectSpec defines the desired behavior of a FederatedObject or ClusterFederatedObject. +type GenericFederatedObjectSpec struct { + // Template is the base template of the Kubernetes object to be propagated. + Template apiextensionsv1.JSON `json:"template"` + // Overrides describe the overrides that should be applied to the base template of the Kubernetes object before it + // is propagated to individual member clusters. + Overrides []OverrideWithController `json:"overrides"` + // Placements describe the member clusters that the Kubernetes object will be propagated to, which is a union of all + // the listed clusters. + Placements []PlacementWithController `json:"placements"` + // Follows defines other objects, or "leaders", that the Kubernetes object should follow during propagation, i.e. + // the Kubernetes object should be propagated to all member clusters that its "leaders" are placed in. + Follows []LeaderReference `json:"follows"` +} + +// GenericFederatedObjectStatus describes the most recently observed status of a FederatedObject or ClusterFederatedObject. +type GenericFederatedObjectStatus struct { + // SyncedGeneration is the generation of this FederatedObject when it was last synced to selected member clusters. + SyncedGeneration int64 `json:"syncedGeneration,omitempty"` + // Conditions describe the current state of this FederatedObject. + Conditions []GenericFederatedObjectCondition `json:"conditions,omitempty"` + // Clusters contains the propagation status of the Kubernetes object for individual member clusters. + Clusters []PropagationStatus `json:"clusters,omitempty"` + + // CollisionCount can be used in conjunction with RevisionHistory to implement rollbacks. + // +optional + CollisionCount *int32 `json:"collisionCount,omitempty"` +} + +// PlacementWithController describes the member clusters that a Kubernetes object should be propagated to. +type PlacementWithController struct { + // Controller identifies the controller responsible for this placement. + Controller string `json:"controller"` + // Placement is the list of member clusters that the Kubernetes object should be propagated to. + Placement []ClusterReference `json:"placement"` +} + +// ClusterReference represents a single member cluster. +type ClusterReference struct { + // Cluster is the name of the member cluster. + Cluster string `json:"cluster"` +} + +// OverrideWithController describes the overrides that will be applied to a Kubernetes object before it is propagated to +// individual member clusters. +type OverrideWithController struct { + // Controller identifies the controller responsible for this override. + Controller string `json:"controller"` + // Override is the list of member clusters and their respective override patches. + Override []ClusterReferenceWithPatches `json:"clusters"` +} + +// ClusterReferenceWithPatches represents a single member cluster and a list of override patches for the cluster. +type ClusterReferenceWithPatches struct { + // Cluster is the name of the member cluster. + Cluster string `json:"cluster"` + // Patches is the list of override patches for the member cluster. + Patches OverridePatches `json:"patches,omitempty"` +} + +// OverridePatch defines a JSON patch. +type OverridePatch struct { + Op string `json:"op,omitempty"` + Path string `json:"path"` + Value apiextensionsv1.JSON `json:"value,omitempty"` +} + +// OverridePatches is a list of OverridePatch. +type OverridePatches []OverridePatch + +// LeaderReference contains the identifying metadata of a "leader" Kubernetes object. +type LeaderReference struct { + Group string `json:"group,omitempty"` + Kind string `json:"kind"` + Namespace string `json:"namespace,omitempty"` + Name string `json:"name"` +} + +// GenericFederatedObjectCondition contains the current details about a particular condition of a FederatedObject. +type GenericFederatedObjectCondition struct { + // Type is the type of the condition. + Type FederatedObjectConditionType `json:"type"` + // Status is the status of the condition, one of True, False or Unknown. + Status corev1.ConditionStatus `json:"status"` + // LastUpdateTime is the last time a reconciliation for this condition occurred. + // +optional + LastUpdateTime metav1.Time `json:"lastUpdateTime,omitempty"` + // LastTransitionTime is the last time the status of this condition changed. + // +optional + LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty"` + // Reason is the reason for the last status change of this condition. + // +optional + Reason FederatedObjectConditionReason `json:"reason,omitempty"` +} + +// PropagationStatus describes the propagation of a Kubernetes object to a given member cluster. +type PropagationStatus struct { + // Cluster is the name of the member cluster. + Cluster string `json:"cluster"` + // Status describes the current status of propagating the Kubernetes object to the member cluster. + Status PropagationStatusType `json:"status"` + // LastObservedGeneration is the last observed generation of the Kubernetes object in the member cluster. + LastObservedGeneration int64 `json:"lastObservedGeneration,omitempty"` +} + +// FederatedObjectConditionType is a unique, camel-case word to describe the type of a FederatedObjectCondition. +type FederatedObjectConditionType string + +const ( + PropagationConditionType FederatedObjectConditionType = "Propagated" +) + +// FederatedObjectConditionReason is a unique, camel-case word to describe the reason for the last status change of a +// FederatedObjectCondition. +type FederatedObjectConditionReason string + +const ( + AggregateSuccess FederatedObjectConditionReason = "" + SyncRevisionsFailed FederatedObjectConditionReason = "SyncRevisionsFailed" + ClusterRetrievalFailed FederatedObjectConditionReason = "ClusterRetrievalFailed" + ComputePlacementFailed FederatedObjectConditionReason = "ComputePlacementFailed" + PlanRolloutFailed FederatedObjectConditionReason = "PlanRolloutFailed" + CheckClusters FederatedObjectConditionReason = "CheckClusters" + NamespaceNotFederated FederatedObjectConditionReason = "NamespaceNotFederated" + EnsureDeletionFailed FederatedObjectConditionReason = "EnsureDeletionFailed" +) + +// PropagationStatusType is a unique, camel-case word to describe the current status of propagating a Kubernetes object +// to a member cluster. +type PropagationStatusType string + +const ( + ClusterPropagationOK PropagationStatusType = "OK" + WaitingForRemoval PropagationStatusType = "WaitingForRemoval" + + // Cluster-specific errors + + ClusterNotReady PropagationStatusType = "ClusterNotReady" + ClusterTerminating PropagationStatusType = "ClusterTerminating" + CachedRetrievalFailed PropagationStatusType = "CachedRetrievalFailed" + ComputeResourceFailed PropagationStatusType = "ComputeResourceFailed" + ApplyOverridesFailed PropagationStatusType = "ApplyOverridesFailed" + CreationFailed PropagationStatusType = "CreationFailed" + UpdateFailed PropagationStatusType = "UpdateFailed" + DeletionFailed PropagationStatusType = "DeletionFailed" + LabelRemovalFailed PropagationStatusType = "LabelRemovalFailed" + RetrievalFailed PropagationStatusType = "RetrievalFailed" + AlreadyExists PropagationStatusType = "AlreadyExists" + FieldRetentionFailed PropagationStatusType = "FieldRetentionFailed" + SetLastReplicasetNameFailed PropagationStatusType = "SetLastReplicasetNameFailed" + VersionRetrievalFailed PropagationStatusType = "VersionRetrievalFailed" + ClientRetrievalFailed PropagationStatusType = "ClientRetrievalFailed" + ManagedLabelFalse PropagationStatusType = "ManagedLabelFalse" + FinalizerCheckFailed PropagationStatusType = "FinalizerCheckFailed" + + // Operation timeout errors + + CreationTimedOut PropagationStatusType = "CreationTimedOut" + UpdateTimedOut PropagationStatusType = "UpdateTimedOut" + DeletionTimedOut PropagationStatusType = "DeletionTimedOut" + LabelRemovalTimedOut PropagationStatusType = "LabelRemovalTimedOut" +) diff --git a/pkg/apis/core/v1alpha1/types_federatedtypeconfig.go b/pkg/apis/core/v1alpha1/types_federatedtypeconfig.go index 5847bff8..641622f4 100644 --- a/pkg/apis/core/v1alpha1/types_federatedtypeconfig.go +++ b/pkg/apis/core/v1alpha1/types_federatedtypeconfig.go @@ -25,30 +25,19 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -const ( - StatusAggregationEnabled StatusAggregationMode = "Enabled" - StatusAggregationDisabled StatusAggregationMode = "Disabled" - - RevisionHistoryEnabled RevisionHistoryMode = "Enabled" - RevisionHistoryDisabled RevisionHistoryMode = "Disabled" - - RolloutPlanEnabled RolloutPlanMode = "Enabled" - RolloutPlanDisabled RolloutPlanMode = "Disabled" -) - // +genclient // +genclient:nonNamespaced // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +kubebuilder:validation:Required // +kubebuilder:resource:path=federatedtypeconfigs,shortName=ftc,scope=Cluster // +kubebuilder:subresource:status -// FederatedTypeConfig is the Schema for the federatedtypeconfigs API +// FederatedTypeConfig specifies an API resource type to federate and various type-specific options. type FederatedTypeConfig struct { metav1.TypeMeta `json:",inline"` metav1.ObjectMeta `json:"metadata,omitempty"` - Spec FederatedTypeConfigSpec `json:"spec,omitempty"` - Status FederatedTypeConfigStatus `json:"status,omitempty"` + Spec FederatedTypeConfigSpec `json:"spec"` } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object @@ -61,53 +50,37 @@ type FederatedTypeConfigList struct { } type FederatedTypeConfigSpec struct { - // INSERT ADDITIONAL SPEC FIELDS - desired state of cluster - // Important: Run "make" to regenerate code after modifying this file - - // The configuration of the source type. If set, each object of the source - // type will be federated to object of the federated type with the same name - // and namespace. - SourceType *APIResource `json:"sourceType,omitempty"` - // The configuration of the target type. If not set, the pluralName and - // groupName fields will be set from the metadata.name of this resource. The - // kind field must be set. - TargetType APIResource `json:"targetType"` - // Configuration for the federated type that defines (via - // template, placement and overrides fields) how the target type - // should appear in multiple cluster. - FederatedType APIResource `json:"federatedType"` - // Configuration for the status type that holds information about which type - // holds the status of the federated resource. If not provided, the group - // and version will default to those provided for the federated type api - // resource. - // +optional - StatusType *APIResource `json:"statusType,omitempty"` + // The API resource type to be federated. + SourceType APIResource `json:"sourceType"` - // Whether or not Status object should be populated. + // Configuration for StatusAggregation. If left empty, the StatusAggregation feature will be disabled. // +optional - StatusCollection *StatusCollection `json:"statusCollection,omitempty"` - // Whether or not Status should be aggregated to source type object - StatusAggregation *StatusAggregationMode `json:"statusAggregation,omitempty"` - // Whether or not keep revisionHistory for the federatedType resource - RevisionHistory *RevisionHistoryMode `json:"revisionHistory,omitempty"` - // Whether or not to plan the rollout process + StatusAggregation *StatusAggregationConfig `json:"statusAggregation,omitempty"` + // Configuration for RevisionHistory. If left empty, the RevisionHistory feature will be disabled. // +optional - RolloutPlan *RolloutPlanMode `json:"rolloutPlan,omitempty"` - // Configurations for auto migration. + RevisionHistory *RevisionHistoryConfig `json:"revisionHistory,omitempty"` + // Configuration for RolloutPlan. If left empty, the RolloutPlan feature will be disabled. + // +optional + RolloutPlan *RolloutPlanConfig `json:"rolloutPlan,omitempty"` + // Configuration for StatusCollection. If left empty, the StatusCollection feature will be disabled. + // +optional + StatusCollection *StatusCollectionConfig `json:"statusCollection,omitempty"` + // Configuration for AutoMigration. If left empty, the AutoMigration feature will be disabled. // +optional AutoMigration *AutoMigrationConfig `json:"autoMigration,omitempty"` - // The controllers that must run before the resource can be propagated to member clusters. + // The controllers that must run before the source object can be propagated to member clusters. // Each inner slice specifies a step. Step T must complete before step T+1 can commence. // Controllers within each step can execute in parallel. // +optional Controllers [][]string `json:"controllers,omitempty"` - // Defines the paths in the target object schema. + // Defines the paths to various fields in the target object's schema. // +optional PathDefinition PathDefinition `json:"pathDefinition,omitempty"` } +// PathDefinition contains paths to various fields in the source object that are required by controllers. type PathDefinition struct { // Path to a metav1.LabelSelector field that selects the replicas for this object. // E.g. `spec.selector` for Deployment and ReplicaSet. @@ -135,47 +108,51 @@ type PathDefinition struct { ReadyReplicasStatus string `json:"readyReplicasStatus,omitempty"` } -// FederatedTypeConfigStatus defines the observed state of FederatedTypeConfig -type FederatedTypeConfigStatus struct { - // INSERT ADDITIONAL STATUS FIELD - define observed state of cluster - // Important: Run "make" to regenerate code after modifying this file - - // ObservedGeneration is the generation as observed by the controller consuming the FederatedTypeConfig. - ObservedGeneration int64 `json:"observedGeneration"` -} - -// StatusCollection defines the fields that the status controller needs to collect -type StatusCollection struct { +// StatusCollectionConfig defines the configurations for the StatusCollection feature. +type StatusCollectionConfig struct { + // Whether or not to enable status collection. + Enabled bool `json:"enabled"` + // Contains the fields to be collected during status collection. Each field is a dot separated string that + // corresponds to its path in the source object's schema. + // E.g. `metadata.creationTimestamp`. Fields []string `json:"fields,omitempty"` } -// StatusAggregationMode defines the state of status aggregation. -type StatusAggregationMode string +// StatusAggregationConfig defines the configurations for the StatusAggregation feature. +type StatusAggregationConfig struct { + // Whether or not to enable status aggregation. + Enabled bool `json:"enabled"` +} -type RevisionHistoryMode string +// RevisionHistoryConfig defines the configurations for the RevisionHistory feature. +type RevisionHistoryConfig struct { + // Whether or not preserve a RevisionHistory for the federated object during updates. + Enabled bool `json:"enabled"` +} -type RolloutPlanMode string +// RolloutPlanConfig defines the configurations for the RolloutPlan feature. +type RolloutPlanConfig struct { + // Whether or not to synchronize the rollout process across clusters. + Enabled bool `json:"enabled"` +} +// AutoMigrationConfig defines the configurations for the AutoMigration feature. type AutoMigrationConfig struct { - // Whether or not to enable auto migration. + // Whether or not to automatically migrate unschedulable pods to a different cluster. Enabled bool `json:"enabled"` } -// APIResource defines how to configure the dynamic client for an API resource. +// APIResource represents a Kubernetes API resource. type APIResource struct { - // metav1.GroupVersion is not used since the json annotation of - // the fields enforces them as mandatory. - // Group of the resource. // +optional Group string `json:"group,omitempty"` // Version of the resource. Version string `json:"version"` - // Camel-cased singular name of the resource (e.g. ConfigMap) + // Kind of the resource. Kind string `json:"kind"` - // Lower-cased plural name of the resource (e.g. configmaps). If - // not provided, it will be computed by lower-casing the kind and - // suffixing an 's'. + // Lower-cased plural name of the resource (e.g. configmaps). If not provided, + // it will be computed by lower-casing the kind and suffixing an 's'. PluralName string `json:"pluralName"` // Scope of the resource. Scope apiextv1beta1.ResourceScope `json:"scope"` diff --git a/pkg/apis/core/v1alpha1/types_propagationpolicy.go b/pkg/apis/core/v1alpha1/types_propagationpolicy.go index 0055eb41..5773cb86 100644 --- a/pkg/apis/core/v1alpha1/types_propagationpolicy.go +++ b/pkg/apis/core/v1alpha1/types_propagationpolicy.go @@ -78,17 +78,17 @@ type PropagationPolicySpec struct { // A empty or nil ClusterAffinity selects everything. // +optional ClusterAffinity []ClusterSelectorTerm `json:"clusterAffinity,omitempty"` - // Tolerations describe a set of cluster taints that the policy tolerates + // Tolerations describe a set of cluster taints that the policy tolerates. // +optional Tolerations []corev1.Toleration `json:"tolerations,omitempty"` - // MaxClusters is the maximum number of replicas that the federated object can be propagated to + // MaxClusters is the maximum number of replicas that the federated object can be propagated to. // The maximum number of clusters is unbounded if no value is provided. // +optional MaxClusters *int64 `json:"maxClusters,omitempty"` - // Placement is an explicit list of clusters used to select member clusters to propagate resources + // Placement is an explicit list of clusters used to select member clusters to propagate resources to. // +optional - Placements []Placement `json:"placement,omitempty"` + Placements []DesiredPlacement `json:"placement,omitempty"` // DisableFollowerScheduling is a boolean that determines if follower scheduling is disabled. // Resources that depend on other resources (e.g. deployments) are called leaders, @@ -118,14 +118,14 @@ type PropagationPolicyStatus struct { type SchedulingMode string const ( - // Duplicate mode means the federated object will be duplicated to member clusters + // Duplicate mode means the federated object will be duplicated to member clusters. SchedulingModeDuplicate SchedulingMode = "Duplicate" - // Divide mode means the federated object's replicas will be divided between member clusters + // Divide mode means the federated object's replicas will be divided between member clusters. SchedulingModeDivide SchedulingMode = "Divide" ) -// Placement describes a cluster that a federated object can be propagated to and its propagation preferences. -type Placement struct { +// DesiredPlacement describes a cluster that a federated object can be propagated to and its propagation preferences. +type DesiredPlacement struct { // Cluster is the name of the FederatedCluster to propagate to. Cluster string `json:"cluster"` // Preferences contains the cluster's propagation preferences. diff --git a/pkg/apis/core/v1alpha1/zz_generated.deepcopy.go b/pkg/apis/core/v1alpha1/zz_generated.deepcopy.go index 13f5f4a1..1226a6c0 100644 --- a/pkg/apis/core/v1alpha1/zz_generated.deepcopy.go +++ b/pkg/apis/core/v1alpha1/zz_generated.deepcopy.go @@ -81,6 +81,66 @@ func (in *AutoMigrationTrigger) DeepCopy() *AutoMigrationTrigger { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterCollectedStatus) DeepCopyInto(out *ClusterCollectedStatus) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.GenericCollectedStatus.DeepCopyInto(&out.GenericCollectedStatus) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterCollectedStatus. +func (in *ClusterCollectedStatus) DeepCopy() *ClusterCollectedStatus { + if in == nil { + return nil + } + out := new(ClusterCollectedStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ClusterCollectedStatus) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterCollectedStatusList) DeepCopyInto(out *ClusterCollectedStatusList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ClusterCollectedStatus, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterCollectedStatusList. +func (in *ClusterCollectedStatusList) DeepCopy() *ClusterCollectedStatusList { + if in == nil { + return nil + } + out := new(ClusterCollectedStatusList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ClusterCollectedStatusList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ClusterCondition) DeepCopyInto(out *ClusterCondition) { *out = *in @@ -99,6 +159,67 @@ func (in *ClusterCondition) DeepCopy() *ClusterCondition { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterFederatedObject) DeepCopyInto(out *ClusterFederatedObject) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterFederatedObject. +func (in *ClusterFederatedObject) DeepCopy() *ClusterFederatedObject { + if in == nil { + return nil + } + out := new(ClusterFederatedObject) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ClusterFederatedObject) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterFederatedObjectList) DeepCopyInto(out *ClusterFederatedObjectList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ClusterFederatedObject, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterFederatedObjectList. +func (in *ClusterFederatedObjectList) DeepCopy() *ClusterFederatedObjectList { + if in == nil { + return nil + } + out := new(ClusterFederatedObjectList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ClusterFederatedObjectList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ClusterObjectVersion) DeepCopyInto(out *ClusterObjectVersion) { *out = *in @@ -297,6 +418,45 @@ func (in *ClusterPropagationPolicyList) DeepCopyObject() runtime.Object { return nil } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterReference) DeepCopyInto(out *ClusterReference) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterReference. +func (in *ClusterReference) DeepCopy() *ClusterReference { + if in == nil { + return nil + } + out := new(ClusterReference) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterReferenceWithPatches) DeepCopyInto(out *ClusterReferenceWithPatches) { + *out = *in + if in.Patches != nil { + in, out := &in.Patches, &out.Patches + *out = make(OverridePatches, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterReferenceWithPatches. +func (in *ClusterReferenceWithPatches) DeepCopy() *ClusterReferenceWithPatches { + if in == nil { + return nil + } + out := new(ClusterReferenceWithPatches) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ClusterSelectorRequirement) DeepCopyInto(out *ClusterSelectorRequirement) { *out = *in @@ -348,6 +508,100 @@ func (in *ClusterSelectorTerm) DeepCopy() *ClusterSelectorTerm { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CollectedFieldsWithCluster) DeepCopyInto(out *CollectedFieldsWithCluster) { + *out = *in + in.CollectedFields.DeepCopyInto(&out.CollectedFields) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CollectedFieldsWithCluster. +func (in *CollectedFieldsWithCluster) DeepCopy() *CollectedFieldsWithCluster { + if in == nil { + return nil + } + out := new(CollectedFieldsWithCluster) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CollectedStatus) DeepCopyInto(out *CollectedStatus) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.GenericCollectedStatus.DeepCopyInto(&out.GenericCollectedStatus) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CollectedStatus. +func (in *CollectedStatus) DeepCopy() *CollectedStatus { + if in == nil { + return nil + } + out := new(CollectedStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *CollectedStatus) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CollectedStatusList) DeepCopyInto(out *CollectedStatusList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]CollectedStatus, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CollectedStatusList. +func (in *CollectedStatusList) DeepCopy() *CollectedStatusList { + if in == nil { + return nil + } + out := new(CollectedStatusList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *CollectedStatusList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DesiredPlacement) DeepCopyInto(out *DesiredPlacement) { + *out = *in + in.Preferences.DeepCopyInto(&out.Preferences) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DesiredPlacement. +func (in *DesiredPlacement) DeepCopy() *DesiredPlacement { + if in == nil { + return nil + } + out := new(DesiredPlacement) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *FederatedCluster) DeepCopyInto(out *FederatedCluster) { *out = *in @@ -462,13 +716,73 @@ func (in *FederatedClusterStatus) DeepCopy() *FederatedClusterStatus { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FederatedObject) DeepCopyInto(out *FederatedObject) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FederatedObject. +func (in *FederatedObject) DeepCopy() *FederatedObject { + if in == nil { + return nil + } + out := new(FederatedObject) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *FederatedObject) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FederatedObjectList) DeepCopyInto(out *FederatedObjectList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]FederatedObject, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FederatedObjectList. +func (in *FederatedObjectList) DeepCopy() *FederatedObjectList { + if in == nil { + return nil + } + out := new(FederatedObjectList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *FederatedObjectList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *FederatedTypeConfig) DeepCopyInto(out *FederatedTypeConfig) { *out = *in out.TypeMeta = in.TypeMeta in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) in.Spec.DeepCopyInto(&out.Spec) - out.Status = in.Status return } @@ -526,38 +840,27 @@ func (in *FederatedTypeConfigList) DeepCopyObject() runtime.Object { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *FederatedTypeConfigSpec) DeepCopyInto(out *FederatedTypeConfigSpec) { *out = *in - if in.SourceType != nil { - in, out := &in.SourceType, &out.SourceType - *out = new(APIResource) - **out = **in - } - out.TargetType = in.TargetType - out.FederatedType = in.FederatedType - if in.StatusType != nil { - in, out := &in.StatusType, &out.StatusType - *out = new(APIResource) - **out = **in - } - if in.StatusCollection != nil { - in, out := &in.StatusCollection, &out.StatusCollection - *out = new(StatusCollection) - (*in).DeepCopyInto(*out) - } + out.SourceType = in.SourceType if in.StatusAggregation != nil { in, out := &in.StatusAggregation, &out.StatusAggregation - *out = new(StatusAggregationMode) + *out = new(StatusAggregationConfig) **out = **in } if in.RevisionHistory != nil { in, out := &in.RevisionHistory, &out.RevisionHistory - *out = new(RevisionHistoryMode) + *out = new(RevisionHistoryConfig) **out = **in } if in.RolloutPlan != nil { in, out := &in.RolloutPlan, &out.RolloutPlan - *out = new(RolloutPlanMode) + *out = new(RolloutPlanConfig) **out = **in } + if in.StatusCollection != nil { + in, out := &in.StatusCollection, &out.StatusCollection + *out = new(StatusCollectionConfig) + (*in).DeepCopyInto(*out) + } if in.AutoMigration != nil { in, out := &in.AutoMigration, &out.AutoMigration *out = new(AutoMigrationConfig) @@ -589,17 +892,112 @@ func (in *FederatedTypeConfigSpec) DeepCopy() *FederatedTypeConfigSpec { } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *FederatedTypeConfigStatus) DeepCopyInto(out *FederatedTypeConfigStatus) { +func (in *GenericCollectedStatus) DeepCopyInto(out *GenericCollectedStatus) { + *out = *in + if in.Clusters != nil { + in, out := &in.Clusters, &out.Clusters + *out = make([]CollectedFieldsWithCluster, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + in.LastUpdateTime.DeepCopyInto(&out.LastUpdateTime) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GenericCollectedStatus. +func (in *GenericCollectedStatus) DeepCopy() *GenericCollectedStatus { + if in == nil { + return nil + } + out := new(GenericCollectedStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GenericFederatedObjectCondition) DeepCopyInto(out *GenericFederatedObjectCondition) { + *out = *in + in.LastUpdateTime.DeepCopyInto(&out.LastUpdateTime) + in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GenericFederatedObjectCondition. +func (in *GenericFederatedObjectCondition) DeepCopy() *GenericFederatedObjectCondition { + if in == nil { + return nil + } + out := new(GenericFederatedObjectCondition) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GenericFederatedObjectSpec) DeepCopyInto(out *GenericFederatedObjectSpec) { + *out = *in + in.Template.DeepCopyInto(&out.Template) + if in.Overrides != nil { + in, out := &in.Overrides, &out.Overrides + *out = make([]OverrideWithController, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Placements != nil { + in, out := &in.Placements, &out.Placements + *out = make([]PlacementWithController, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Follows != nil { + in, out := &in.Follows, &out.Follows + *out = make([]LeaderReference, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GenericFederatedObjectSpec. +func (in *GenericFederatedObjectSpec) DeepCopy() *GenericFederatedObjectSpec { + if in == nil { + return nil + } + out := new(GenericFederatedObjectSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GenericFederatedObjectStatus) DeepCopyInto(out *GenericFederatedObjectStatus) { *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]GenericFederatedObjectCondition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Clusters != nil { + in, out := &in.Clusters, &out.Clusters + *out = make([]PropagationStatus, len(*in)) + copy(*out, *in) + } + if in.CollisionCount != nil { + in, out := &in.CollisionCount, &out.CollisionCount + *out = new(int32) + **out = **in + } return } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FederatedTypeConfigStatus. -func (in *FederatedTypeConfigStatus) DeepCopy() *FederatedTypeConfigStatus { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GenericFederatedObjectStatus. +func (in *GenericFederatedObjectStatus) DeepCopy() *GenericFederatedObjectStatus { if in == nil { return nil } - out := new(FederatedTypeConfigStatus) + out := new(GenericFederatedObjectStatus) in.DeepCopyInto(out) return out } @@ -665,6 +1063,22 @@ func (in *JsonPatchOverrider) DeepCopy() *JsonPatchOverrider { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LeaderReference) DeepCopyInto(out *LeaderReference) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LeaderReference. +func (in *LeaderReference) DeepCopy() *LeaderReference { + if in == nil { + return nil + } + out := new(LeaderReference) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *LocalSecretReference) DeepCopyInto(out *LocalSecretReference) { *out = *in @@ -681,6 +1095,45 @@ func (in *LocalSecretReference) DeepCopy() *LocalSecretReference { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OverridePatch) DeepCopyInto(out *OverridePatch) { + *out = *in + in.Value.DeepCopyInto(&out.Value) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OverridePatch. +func (in *OverridePatch) DeepCopy() *OverridePatch { + if in == nil { + return nil + } + out := new(OverridePatch) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in OverridePatches) DeepCopyInto(out *OverridePatches) { + { + in := &in + *out = make(OverridePatches, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + return + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OverridePatches. +func (in OverridePatches) DeepCopy() OverridePatches { + if in == nil { + return nil + } + out := new(OverridePatches) + in.DeepCopyInto(out) + return *out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *OverridePolicy) DeepCopyInto(out *OverridePolicy) { *out = *in @@ -785,6 +1238,29 @@ func (in *OverrideRule) DeepCopy() *OverrideRule { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OverrideWithController) DeepCopyInto(out *OverrideWithController) { + *out = *in + if in.Override != nil { + in, out := &in.Override, &out.Override + *out = make([]ClusterReferenceWithPatches, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OverrideWithController. +func (in *OverrideWithController) DeepCopy() *OverrideWithController { + if in == nil { + return nil + } + out := new(OverrideWithController) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Overriders) DeepCopyInto(out *Overriders) { *out = *in @@ -825,18 +1301,22 @@ func (in *PathDefinition) DeepCopy() *PathDefinition { } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Placement) DeepCopyInto(out *Placement) { +func (in *PlacementWithController) DeepCopyInto(out *PlacementWithController) { *out = *in - in.Preferences.DeepCopyInto(&out.Preferences) + if in.Placement != nil { + in, out := &in.Placement, &out.Placement + *out = make([]ClusterReference, len(*in)) + copy(*out, *in) + } return } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Placement. -func (in *Placement) DeepCopy() *Placement { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PlacementWithController. +func (in *PlacementWithController) DeepCopy() *PlacementWithController { if in == nil { return nil } - out := new(Placement) + out := new(PlacementWithController) in.DeepCopyInto(out) return out } @@ -1118,7 +1598,7 @@ func (in *PropagationPolicySpec) DeepCopyInto(out *PropagationPolicySpec) { } if in.Placements != nil { in, out := &in.Placements, &out.Placements - *out = make([]Placement, len(*in)) + *out = make([]DesiredPlacement, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } @@ -1163,6 +1643,22 @@ func (in *PropagationPolicyStatus) DeepCopy() *PropagationPolicyStatus { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PropagationStatus) DeepCopyInto(out *PropagationStatus) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PropagationStatus. +func (in *PropagationStatus) DeepCopy() *PropagationStatus { + if in == nil { + return nil + } + out := new(PropagationStatus) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ReplicaRescheduling) DeepCopyInto(out *ReplicaRescheduling) { *out = *in @@ -1214,6 +1710,38 @@ func (in *Resources) DeepCopy() *Resources { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RevisionHistoryConfig) DeepCopyInto(out *RevisionHistoryConfig) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RevisionHistoryConfig. +func (in *RevisionHistoryConfig) DeepCopy() *RevisionHistoryConfig { + if in == nil { + return nil + } + out := new(RevisionHistoryConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RolloutPlanConfig) DeepCopyInto(out *RolloutPlanConfig) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RolloutPlanConfig. +func (in *RolloutPlanConfig) DeepCopy() *RolloutPlanConfig { + if in == nil { + return nil + } + out := new(RolloutPlanConfig) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *SchedulerPluginWebhookConfiguration) DeepCopyInto(out *SchedulerPluginWebhookConfiguration) { *out = *in @@ -1390,7 +1918,23 @@ func (in *SchedulingProfileSpec) DeepCopy() *SchedulingProfileSpec { } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *StatusCollection) DeepCopyInto(out *StatusCollection) { +func (in *StatusAggregationConfig) DeepCopyInto(out *StatusAggregationConfig) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StatusAggregationConfig. +func (in *StatusAggregationConfig) DeepCopy() *StatusAggregationConfig { + if in == nil { + return nil + } + out := new(StatusAggregationConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StatusCollectionConfig) DeepCopyInto(out *StatusCollectionConfig) { *out = *in if in.Fields != nil { in, out := &in.Fields, &out.Fields @@ -1400,12 +1944,12 @@ func (in *StatusCollection) DeepCopyInto(out *StatusCollection) { return } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StatusCollection. -func (in *StatusCollection) DeepCopy() *StatusCollection { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StatusCollectionConfig. +func (in *StatusCollectionConfig) DeepCopy() *StatusCollectionConfig { if in == nil { return nil } - out := new(StatusCollection) + out := new(StatusCollectionConfig) in.DeepCopyInto(out) return out } diff --git a/pkg/apis/schedulerwebhook/v1alpha1/types.go b/pkg/apis/schedulerwebhook/v1alpha1/types.go index e58013d2..3832313f 100644 --- a/pkg/apis/schedulerwebhook/v1alpha1/types.go +++ b/pkg/apis/schedulerwebhook/v1alpha1/types.go @@ -62,7 +62,7 @@ type SchedulingUnit struct { // MaxClusters is the max clusters set in the PropgationPolicy. MaxClusters *int64 `json:"maxClusters,omitempty"` // Placements is the placements set in the PropgationPolicy. - Placements []fedcorev1a1.Placement `json:"placements,omitempty"` + Placements []fedcorev1a1.DesiredPlacement `json:"placements,omitempty"` } type FilterRequest struct { diff --git a/pkg/apis/types/register.go b/pkg/apis/types/register.go deleted file mode 100644 index a7dcef5b..00000000 --- a/pkg/apis/types/register.go +++ /dev/null @@ -1,19 +0,0 @@ -/* -Copyright 2023 The KubeAdmiral Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package types - -const GroupName = "types.kubeadmiral.io" diff --git a/pkg/apis/types/v1alpha1/doc.go b/pkg/apis/types/v1alpha1/doc.go deleted file mode 100644 index 7398f16f..00000000 --- a/pkg/apis/types/v1alpha1/doc.go +++ /dev/null @@ -1,20 +0,0 @@ -/* -Copyright 2023 The KubeAdmiral Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// +k8s:deepcopy-gen=package -// +groupName=types.kubeadmiral.io - -package v1alpha1 diff --git a/pkg/apis/types/v1alpha1/extensions_follower.go b/pkg/apis/types/v1alpha1/extensions_follower.go deleted file mode 100644 index 7fb7a832..00000000 --- a/pkg/apis/types/v1alpha1/extensions_follower.go +++ /dev/null @@ -1,45 +0,0 @@ -/* -Copyright 2023 The KubeAdmiral Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1alpha1 - -import ( - "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/runtime/schema" - - "github.com/kubewharf/kubeadmiral/pkg/controllers/common" -) - -func SetFollows(uns *unstructured.Unstructured, leaders []LeaderReference) error { - leaderMaps := make([]interface{}, 0, len(leaders)) - for _, leader := range leaders { - leaderMap, err := runtime.DefaultUnstructuredConverter.ToUnstructured(&leader) - if err != nil { - return err - } - leaderMaps = append(leaderMaps, leaderMap) - } - - return unstructured.SetNestedSlice(uns.Object, leaderMaps, common.FollowsPath...) -} - -func (l *LeaderReference) GroupKind() schema.GroupKind { - return schema.GroupKind{ - Group: l.Group, - Kind: l.Kind, - } -} diff --git a/pkg/apis/types/v1alpha1/extensions_placements.go b/pkg/apis/types/v1alpha1/extensions_placements.go deleted file mode 100644 index e5a0ec77..00000000 --- a/pkg/apis/types/v1alpha1/extensions_placements.go +++ /dev/null @@ -1,120 +0,0 @@ -/* -Copyright 2023 The KubeAdmiral Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1alpha1 - -import ( - "reflect" - "sort" -) - -func (object *GenericObjectWithPlacements) ClusterNameUnion() map[string]struct{} { - set := map[string]struct{}{} - for _, placement := range object.Spec.Placements { - for _, cluster := range placement.Placement.Clusters { - set[cluster.Name] = struct{}{} - } - } - - return set -} - -func (spec *GenericSpecWithPlacements) GetPlacementOrNil(controller string) *Placement { - for i := range spec.Placements { - placement := &spec.Placements[i] - if placement.Controller == controller { - return &placement.Placement - } - } - - return nil -} - -func (spec *GenericSpecWithPlacements) GetOrCreatePlacement(controller string) *Placement { - for i := range spec.Placements { - placement := &spec.Placements[i] - if placement.Controller == controller { - return &placement.Placement - } - } - - spec.Placements = append(spec.Placements, PlacementWithController{ - Controller: controller, - }) - return &spec.Placements[len(spec.Placements)-1].Placement -} - -func (spec *GenericSpecWithPlacements) DeletePlacement(controller string) (hasChange bool) { - index := -1 - for i, placement := range spec.Placements { - if placement.Controller == controller { - index = i - break - } - } - - if index == -1 { - return false - } - - spec.Placements = append(spec.Placements[:index], spec.Placements[(index+1):]...) - - return true -} - -func (spec *GenericSpecWithPlacements) SetPlacementNames(controller string, newClusterNames map[string]struct{}) (hasChange bool) { - if len(newClusterNames) == 0 { - return spec.DeletePlacement(controller) - } - - placement := spec.GetOrCreatePlacement(controller) - oldClusterNames := placement.ClusterNames() - - if !reflect.DeepEqual(newClusterNames, oldClusterNames) { - placement.Clusters = nil - - // write the clusters in ascending order for better readability - sortedClusterNames := make([]string, 0, len(newClusterNames)) - for clusterName := range newClusterNames { - sortedClusterNames = append(sortedClusterNames, clusterName) - } - sort.Strings(sortedClusterNames) - for _, name := range sortedClusterNames { - placement.Clusters = append(placement.Clusters, GenericClusterReference{Name: name}) - } - - return true - } - - return false -} - -func (spec *Placement) ClusterNames() map[string]struct{} { - set := map[string]struct{}{} - - for _, cluster := range spec.Clusters { - set[cluster.Name] = struct{}{} - } - - return set -} - -func (spec *Placement) SetClusterNames(names []string) { - spec.Clusters = nil - for _, name := range names { - spec.Clusters = append(spec.Clusters, GenericClusterReference{Name: name}) - } -} diff --git a/pkg/apis/types/v1alpha1/register.go b/pkg/apis/types/v1alpha1/register.go deleted file mode 100644 index 39048c4d..00000000 --- a/pkg/apis/types/v1alpha1/register.go +++ /dev/null @@ -1,61 +0,0 @@ -/* -Copyright 2023 The KubeAdmiral Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1alpha1 - -import ( - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/runtime/schema" - - "github.com/kubewharf/kubeadmiral/pkg/apis/types" -) - -// GroupVersion is the identifier for the API which includes -// the name of the group and the version of the API -var SchemeGroupVersion = schema.GroupVersion{ - Group: types.GroupName, - Version: "v1alpha1", -} - -// Resource takes an unqualified resource and returns a Group qualified GroupResource -func Resource(resource string) schema.GroupResource { - return SchemeGroupVersion.WithResource(resource).GroupResource() -} - -var ( - // localSchemeBuilder and AddToScheme will stay in k8s.io/kubernetes. - SchemeBuilder runtime.SchemeBuilder - localSchemeBuilder = &SchemeBuilder - AddToScheme = localSchemeBuilder.AddToScheme -) - -func init() { - // We only register manually written functions here. The registration of the - // generated functions takes place in the generated files. The separation - // makes the code compile even when the generated files are missing. - localSchemeBuilder.Register(addKnownTypes) -} - -// Adds the list of known types to api.Scheme. -func addKnownTypes(scheme *runtime.Scheme) error { - scheme.AddKnownTypes(SchemeGroupVersion, - &FederatedDeployment{}, - &FederatedDeploymentList{}, - ) - metav1.AddToGroupVersion(scheme, SchemeGroupVersion) - return nil -} diff --git a/pkg/apis/types/v1alpha1/types_federateddeployment.go b/pkg/apis/types/v1alpha1/types_federateddeployment.go deleted file mode 100644 index 63333b42..00000000 --- a/pkg/apis/types/v1alpha1/types_federateddeployment.go +++ /dev/null @@ -1,62 +0,0 @@ -/* -Copyright 2023 The KubeAdmiral Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1alpha1 - -import ( - appsv1 "k8s.io/api/apps/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -// +kubebuilder:skip - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// FederatedDeployment is the Schema for the federateddeployments API -type FederatedDeployment struct { - metav1.TypeMeta `json:",inline"` - metav1.ObjectMeta `json:"metadata,omitempty"` - - Spec FederatedDeploymentSpec `json:"spec,omitempty"` - Status GenericFederatedStatus `json:"status,omitempty"` -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// FederatedDeploymentList contains a list of FederatedDeployment -type FederatedDeploymentList struct { - metav1.TypeMeta `json:",inline"` - metav1.ListMeta `json:"metadata,omitempty"` - Items []FederatedDeployment `json:"items"` -} - -// FederatedDeploymentSpec defines the desired state of FederatedDeployment -type FederatedDeploymentSpec struct { - GenericSpecWithPlacements `json:",inline"` - GenericSpecWithOverrides `json:",inline"` - - Template appsv1.Deployment `json:"template,omitempty"` - - // revisionHistoryLimit is the maximum number of revisions that will - // be maintained in the FederatedDeployment's revision history. The revision history - // consists of all revisions not represented by a currently applied - // FederatedDeploymentSpec version. The default value is 10. - - // +kubebuilder:default:=10 - RevisionHistoryLimit int64 `json:"revisionHistoryLimit,omitempty"` - - RetainReplicas bool `json:"retainReplicas,omitempty"` -} diff --git a/pkg/apis/types/v1alpha1/types_follower.go b/pkg/apis/types/v1alpha1/types_follower.go deleted file mode 100644 index 01fda934..00000000 --- a/pkg/apis/types/v1alpha1/types_follower.go +++ /dev/null @@ -1,46 +0,0 @@ -/* -Copyright 2023 The KubeAdmiral Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1alpha1 - -import ( - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -// +kubebuilder:skip - -type GenericFederatedFollower struct { - metav1.TypeMeta `json:",inline"` - metav1.ObjectMeta `json:"metadata,omitempty"` - - Spec GenericFollowerSpec `json:"spec,omitempty"` -} - -type GenericFollowerSpec struct { - GenericSpecWithPlacements `json:",inline"` - GenericSpecWithFollows `json:",inline"` -} - -type GenericSpecWithFollows struct { - Follows []LeaderReference `json:"follows,omitempty"` -} - -type LeaderReference struct { - Group string `json:"group,omitempty"` - Kind string `json:"kind"` - Namespace string `json:"namespace,omitempty"` - Name string `json:"name"` -} diff --git a/pkg/apis/types/v1alpha1/types_overrides.go b/pkg/apis/types/v1alpha1/types_overrides.go deleted file mode 100644 index 4731b2c2..00000000 --- a/pkg/apis/types/v1alpha1/types_overrides.go +++ /dev/null @@ -1,60 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. - -This file may have been modified by The KubeAdmiral Authors -("KubeAdmiral Modifications"). All KubeAdmiral Modifications -are Copyright 2023 The KubeAdmiral Authors. -*/ - -package v1alpha1 - -import "github.com/jinzhu/copier" - -// +kubebuilder:skip - -// GenericObjectWithOverrides represents a generic FederatedObject and its overrides field -type GenericObjectWithOverrides struct { - Spec *GenericSpecWithOverrides `json:"spec,omitempty"` -} - -type GenericSpecWithOverrides struct { - Overrides []ControllerOverride `json:"overrides,omitempty"` -} - -type ControllerOverride struct { - Controller string `json:"controller"` - Clusters []ClusterOverride `json:"clusters"` -} - -type ClusterOverride struct { - ClusterName string `json:"clusterName"` - Patches []OverridePatch `json:"paths,omitempty"` -} - -// +k8s:deepcopy-gen=false - -type OverridePatch struct { - Op string `json:"op,omitempty"` - Path string `json:"path"` - Value interface{} `json:"value,omitempty"` -} - -// As controller-gen doesn't support interface member by now(2019-12-13), we hack it. -// ref: https://github.com/kubernetes-sigs/kubebuilder/issues/528 -func (in *OverridePatch) DeepCopyInto(out *OverridePatch) { - copier.Copy(out, in) -} - -type OverridePatches []OverridePatch diff --git a/pkg/apis/types/v1alpha1/types_overrides_test.go b/pkg/apis/types/v1alpha1/types_overrides_test.go deleted file mode 100644 index 9235dad3..00000000 --- a/pkg/apis/types/v1alpha1/types_overrides_test.go +++ /dev/null @@ -1,38 +0,0 @@ -/* -Copyright 2023 The KubeAdmiral Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1alpha1 - -import ( - "testing" - - "github.com/stretchr/testify/assert" -) - -func TestClusterOverride_DeepCopyInto(t *testing.T) { - in := &OverridePatch{ - Op: "foo", - Path: "bar", - Value: []interface{}{1, "foo", "haha"}, - } - - out := new(OverridePatch) - in.DeepCopyInto(out) - - if !assert.Equal(t, in, out) { - t.Fail() - } -} diff --git a/pkg/apis/types/v1alpha1/types_placements.go b/pkg/apis/types/v1alpha1/types_placements.go deleted file mode 100644 index 828ed4be..00000000 --- a/pkg/apis/types/v1alpha1/types_placements.go +++ /dev/null @@ -1,52 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. - -This file may have been modified by The KubeAdmiral Authors -("KubeAdmiral Modifications"). All KubeAdmiral Modifications -are Copyright 2023 The KubeAdmiral Authors. -*/ - -package v1alpha1 - -import ( - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -// +kubebuilder:skip - -// GenericObjectWithPlacements represents a generic FederatedObject and its placement field -type GenericObjectWithPlacements struct { - metav1.TypeMeta `json:",inline"` - metav1.ObjectMeta `json:"metadata,omitempty"` - - Spec GenericSpecWithPlacements `json:"spec,omitempty"` -} - -type GenericSpecWithPlacements struct { - Placements []PlacementWithController `json:"placements,omitempty"` -} - -type PlacementWithController struct { - Controller string `json:"controller"` - Placement Placement `json:"placement"` -} - -type Placement struct { - Clusters []GenericClusterReference `json:"clusters,omitempty"` -} - -type GenericClusterReference struct { - Name string `json:"name"` -} diff --git a/pkg/apis/types/v1alpha1/types_status.go b/pkg/apis/types/v1alpha1/types_status.go deleted file mode 100644 index 7ab123f4..00000000 --- a/pkg/apis/types/v1alpha1/types_status.go +++ /dev/null @@ -1,119 +0,0 @@ -/* -Copyright 2019 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. - -This file may have been modified by The KubeAdmiral Authors -("KubeAdmiral Modifications"). All KubeAdmiral Modifications -are Copyright 2023 The KubeAdmiral Authors. -*/ - -package v1alpha1 - -import ( - corev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -// +kubebuilder:skip - -// GenericObjectWithStatus represents a generic FederatedObject and its status field -type GenericObjectWithStatus struct { - metav1.TypeMeta `json:",inline"` - metav1.ObjectMeta `json:"metadata,omitempty"` - - Status *GenericFederatedStatus `json:"status,omitempty"` -} - -type GenericFederatedStatus struct { - CollisionCount *int32 `json:"collisionCount,omitempty"` - SyncedGeneration int64 `json:"syncedGeneration,omitempty"` - Conditions []*GenericCondition `json:"conditions,omitempty"` - Clusters []GenericClusterStatus `json:"clusters,omitempty"` -} - -type GenericCondition struct { - // Type of cluster condition - Type ConditionType `json:"type"` - // Status of the condition, one of True, False, Unknown. - Status corev1.ConditionStatus `json:"status"` - // Last time reconciliation resulted in an error or the last time a - // change was propagated to member clusters. - // +optional - LastUpdateTime string `json:"lastUpdateTime,omitempty"` - // Last time the condition transit from one status to another. - // +optional - LastTransitionTime string `json:"lastTransitionTime,omitempty"` - // (brief) reason for the condition's last transition. - // +optional - Reason AggregateReason `json:"reason,omitempty"` -} - -type GenericClusterStatus struct { - Name string `json:"name"` - Status PropagationStatus `json:"status,omitempty"` - Generation int64 `json:"generation,omitempty"` -} - -type PropagationStatus string - -const ( - ClusterPropagationOK PropagationStatus = "OK" - WaitingForRemoval PropagationStatus = "WaitingForRemoval" - - // Cluster-specific errors - - ClusterNotReady PropagationStatus = "ClusterNotReady" - ClusterTerminating PropagationStatus = "ClusterTerminating" - CachedRetrievalFailed PropagationStatus = "CachedRetrievalFailed" - ComputeResourceFailed PropagationStatus = "ComputeResourceFailed" - ApplyOverridesFailed PropagationStatus = "ApplyOverridesFailed" - CreationFailed PropagationStatus = "CreationFailed" - UpdateFailed PropagationStatus = "UpdateFailed" - DeletionFailed PropagationStatus = "DeletionFailed" - LabelRemovalFailed PropagationStatus = "LabelRemovalFailed" - RetrievalFailed PropagationStatus = "RetrievalFailed" - AlreadyExists PropagationStatus = "AlreadyExists" - FieldRetentionFailed PropagationStatus = "FieldRetentionFailed" - SetLastReplicasetNameFailed PropagationStatus = "SetLastReplicasetNameFailed" - VersionRetrievalFailed PropagationStatus = "VersionRetrievalFailed" - ClientRetrievalFailed PropagationStatus = "ClientRetrievalFailed" - ManagedLabelFalse PropagationStatus = "ManagedLabelFalse" - FinalizerCheckFailed PropagationStatus = "FinalizerCheckFailed" - - // Operation timeout errors - - CreationTimedOut PropagationStatus = "CreationTimedOut" - UpdateTimedOut PropagationStatus = "UpdateTimedOut" - DeletionTimedOut PropagationStatus = "DeletionTimedOut" - LabelRemovalTimedOut PropagationStatus = "LabelRemovalTimedOut" -) - -type AggregateReason string - -const ( - AggregateSuccess AggregateReason = "" - SyncRevisionsFailed AggregateReason = "SyncRevisionsFailed" - ClusterRetrievalFailed AggregateReason = "ClusterRetrievalFailed" - ComputePlacementFailed AggregateReason = "ComputePlacementFailed" - PlanRolloutFailed AggregateReason = "PlanRolloutFailed" - CheckClusters AggregateReason = "CheckClusters" - NamespaceNotFederated AggregateReason = "NamespaceNotFederated" - EnsureDeletionFailed AggregateReason = "EnsureDeletionFailed" -) - -type ConditionType string - -const ( - PropagationConditionType ConditionType = "Propagation" -) diff --git a/pkg/apis/types/v1alpha1/zz_generated.deepcopy.go b/pkg/apis/types/v1alpha1/zz_generated.deepcopy.go deleted file mode 100644 index 4ab1d070..00000000 --- a/pkg/apis/types/v1alpha1/zz_generated.deepcopy.go +++ /dev/null @@ -1,464 +0,0 @@ -//go:build !ignore_autogenerated -// +build !ignore_autogenerated - -// Code generated by deepcopy-gen. DO NOT EDIT. - -package v1alpha1 - -import ( - runtime "k8s.io/apimachinery/pkg/runtime" -) - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ClusterOverride) DeepCopyInto(out *ClusterOverride) { - *out = *in - if in.Patches != nil { - in, out := &in.Patches, &out.Patches - *out = make([]OverridePatch, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterOverride. -func (in *ClusterOverride) DeepCopy() *ClusterOverride { - if in == nil { - return nil - } - out := new(ClusterOverride) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ControllerOverride) DeepCopyInto(out *ControllerOverride) { - *out = *in - if in.Clusters != nil { - in, out := &in.Clusters, &out.Clusters - *out = make([]ClusterOverride, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ControllerOverride. -func (in *ControllerOverride) DeepCopy() *ControllerOverride { - if in == nil { - return nil - } - out := new(ControllerOverride) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *FederatedDeployment) DeepCopyInto(out *FederatedDeployment) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Spec.DeepCopyInto(&out.Spec) - in.Status.DeepCopyInto(&out.Status) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FederatedDeployment. -func (in *FederatedDeployment) DeepCopy() *FederatedDeployment { - if in == nil { - return nil - } - out := new(FederatedDeployment) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *FederatedDeployment) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *FederatedDeploymentList) DeepCopyInto(out *FederatedDeploymentList) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]FederatedDeployment, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FederatedDeploymentList. -func (in *FederatedDeploymentList) DeepCopy() *FederatedDeploymentList { - if in == nil { - return nil - } - out := new(FederatedDeploymentList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *FederatedDeploymentList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *FederatedDeploymentSpec) DeepCopyInto(out *FederatedDeploymentSpec) { - *out = *in - in.GenericSpecWithPlacements.DeepCopyInto(&out.GenericSpecWithPlacements) - in.GenericSpecWithOverrides.DeepCopyInto(&out.GenericSpecWithOverrides) - in.Template.DeepCopyInto(&out.Template) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FederatedDeploymentSpec. -func (in *FederatedDeploymentSpec) DeepCopy() *FederatedDeploymentSpec { - if in == nil { - return nil - } - out := new(FederatedDeploymentSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *GenericClusterReference) DeepCopyInto(out *GenericClusterReference) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GenericClusterReference. -func (in *GenericClusterReference) DeepCopy() *GenericClusterReference { - if in == nil { - return nil - } - out := new(GenericClusterReference) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *GenericClusterStatus) DeepCopyInto(out *GenericClusterStatus) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GenericClusterStatus. -func (in *GenericClusterStatus) DeepCopy() *GenericClusterStatus { - if in == nil { - return nil - } - out := new(GenericClusterStatus) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *GenericCondition) DeepCopyInto(out *GenericCondition) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GenericCondition. -func (in *GenericCondition) DeepCopy() *GenericCondition { - if in == nil { - return nil - } - out := new(GenericCondition) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *GenericFederatedFollower) DeepCopyInto(out *GenericFederatedFollower) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Spec.DeepCopyInto(&out.Spec) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GenericFederatedFollower. -func (in *GenericFederatedFollower) DeepCopy() *GenericFederatedFollower { - if in == nil { - return nil - } - out := new(GenericFederatedFollower) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *GenericFederatedStatus) DeepCopyInto(out *GenericFederatedStatus) { - *out = *in - if in.CollisionCount != nil { - in, out := &in.CollisionCount, &out.CollisionCount - *out = new(int32) - **out = **in - } - if in.Conditions != nil { - in, out := &in.Conditions, &out.Conditions - *out = make([]*GenericCondition, len(*in)) - for i := range *in { - if (*in)[i] != nil { - in, out := &(*in)[i], &(*out)[i] - *out = new(GenericCondition) - **out = **in - } - } - } - if in.Clusters != nil { - in, out := &in.Clusters, &out.Clusters - *out = make([]GenericClusterStatus, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GenericFederatedStatus. -func (in *GenericFederatedStatus) DeepCopy() *GenericFederatedStatus { - if in == nil { - return nil - } - out := new(GenericFederatedStatus) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *GenericFollowerSpec) DeepCopyInto(out *GenericFollowerSpec) { - *out = *in - in.GenericSpecWithPlacements.DeepCopyInto(&out.GenericSpecWithPlacements) - in.GenericSpecWithFollows.DeepCopyInto(&out.GenericSpecWithFollows) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GenericFollowerSpec. -func (in *GenericFollowerSpec) DeepCopy() *GenericFollowerSpec { - if in == nil { - return nil - } - out := new(GenericFollowerSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *GenericObjectWithOverrides) DeepCopyInto(out *GenericObjectWithOverrides) { - *out = *in - if in.Spec != nil { - in, out := &in.Spec, &out.Spec - *out = new(GenericSpecWithOverrides) - (*in).DeepCopyInto(*out) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GenericObjectWithOverrides. -func (in *GenericObjectWithOverrides) DeepCopy() *GenericObjectWithOverrides { - if in == nil { - return nil - } - out := new(GenericObjectWithOverrides) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *GenericObjectWithPlacements) DeepCopyInto(out *GenericObjectWithPlacements) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Spec.DeepCopyInto(&out.Spec) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GenericObjectWithPlacements. -func (in *GenericObjectWithPlacements) DeepCopy() *GenericObjectWithPlacements { - if in == nil { - return nil - } - out := new(GenericObjectWithPlacements) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *GenericObjectWithStatus) DeepCopyInto(out *GenericObjectWithStatus) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - if in.Status != nil { - in, out := &in.Status, &out.Status - *out = new(GenericFederatedStatus) - (*in).DeepCopyInto(*out) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GenericObjectWithStatus. -func (in *GenericObjectWithStatus) DeepCopy() *GenericObjectWithStatus { - if in == nil { - return nil - } - out := new(GenericObjectWithStatus) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *GenericSpecWithFollows) DeepCopyInto(out *GenericSpecWithFollows) { - *out = *in - if in.Follows != nil { - in, out := &in.Follows, &out.Follows - *out = make([]LeaderReference, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GenericSpecWithFollows. -func (in *GenericSpecWithFollows) DeepCopy() *GenericSpecWithFollows { - if in == nil { - return nil - } - out := new(GenericSpecWithFollows) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *GenericSpecWithOverrides) DeepCopyInto(out *GenericSpecWithOverrides) { - *out = *in - if in.Overrides != nil { - in, out := &in.Overrides, &out.Overrides - *out = make([]ControllerOverride, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GenericSpecWithOverrides. -func (in *GenericSpecWithOverrides) DeepCopy() *GenericSpecWithOverrides { - if in == nil { - return nil - } - out := new(GenericSpecWithOverrides) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *GenericSpecWithPlacements) DeepCopyInto(out *GenericSpecWithPlacements) { - *out = *in - if in.Placements != nil { - in, out := &in.Placements, &out.Placements - *out = make([]PlacementWithController, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GenericSpecWithPlacements. -func (in *GenericSpecWithPlacements) DeepCopy() *GenericSpecWithPlacements { - if in == nil { - return nil - } - out := new(GenericSpecWithPlacements) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *LeaderReference) DeepCopyInto(out *LeaderReference) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LeaderReference. -func (in *LeaderReference) DeepCopy() *LeaderReference { - if in == nil { - return nil - } - out := new(LeaderReference) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in OverridePatches) DeepCopyInto(out *OverridePatches) { - { - in := &in - *out = make(OverridePatches, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - return - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OverridePatches. -func (in OverridePatches) DeepCopy() OverridePatches { - if in == nil { - return nil - } - out := new(OverridePatches) - in.DeepCopyInto(out) - return *out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Placement) DeepCopyInto(out *Placement) { - *out = *in - if in.Clusters != nil { - in, out := &in.Clusters, &out.Clusters - *out = make([]GenericClusterReference, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Placement. -func (in *Placement) DeepCopy() *Placement { - if in == nil { - return nil - } - out := new(Placement) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PlacementWithController) DeepCopyInto(out *PlacementWithController) { - *out = *in - in.Placement.DeepCopyInto(&out.Placement) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PlacementWithController. -func (in *PlacementWithController) DeepCopy() *PlacementWithController { - if in == nil { - return nil - } - out := new(PlacementWithController) - in.DeepCopyInto(out) - return out -} diff --git a/pkg/client/clientset/versioned/clientset.go b/pkg/client/clientset/versioned/clientset.go index fcbfb1d8..a412bed1 100644 --- a/pkg/client/clientset/versioned/clientset.go +++ b/pkg/client/clientset/versioned/clientset.go @@ -6,7 +6,6 @@ import ( "fmt" corev1alpha1 "github.com/kubewharf/kubeadmiral/pkg/client/clientset/versioned/typed/core/v1alpha1" - typesv1alpha1 "github.com/kubewharf/kubeadmiral/pkg/client/clientset/versioned/typed/types/v1alpha1" discovery "k8s.io/client-go/discovery" rest "k8s.io/client-go/rest" flowcontrol "k8s.io/client-go/util/flowcontrol" @@ -15,15 +14,13 @@ import ( type Interface interface { Discovery() discovery.DiscoveryInterface CoreV1alpha1() corev1alpha1.CoreV1alpha1Interface - TypesV1alpha1() typesv1alpha1.TypesV1alpha1Interface } // Clientset contains the clients for groups. Each group has exactly one // version included in a Clientset. type Clientset struct { *discovery.DiscoveryClient - coreV1alpha1 *corev1alpha1.CoreV1alpha1Client - typesV1alpha1 *typesv1alpha1.TypesV1alpha1Client + coreV1alpha1 *corev1alpha1.CoreV1alpha1Client } // CoreV1alpha1 retrieves the CoreV1alpha1Client @@ -31,11 +28,6 @@ func (c *Clientset) CoreV1alpha1() corev1alpha1.CoreV1alpha1Interface { return c.coreV1alpha1 } -// TypesV1alpha1 retrieves the TypesV1alpha1Client -func (c *Clientset) TypesV1alpha1() typesv1alpha1.TypesV1alpha1Interface { - return c.typesV1alpha1 -} - // Discovery retrieves the DiscoveryClient func (c *Clientset) Discovery() discovery.DiscoveryInterface { if c == nil { @@ -61,10 +53,6 @@ func NewForConfig(c *rest.Config) (*Clientset, error) { if err != nil { return nil, err } - cs.typesV1alpha1, err = typesv1alpha1.NewForConfig(&configShallowCopy) - if err != nil { - return nil, err - } cs.DiscoveryClient, err = discovery.NewDiscoveryClientForConfig(&configShallowCopy) if err != nil { @@ -78,7 +66,6 @@ func NewForConfig(c *rest.Config) (*Clientset, error) { func NewForConfigOrDie(c *rest.Config) *Clientset { var cs Clientset cs.coreV1alpha1 = corev1alpha1.NewForConfigOrDie(c) - cs.typesV1alpha1 = typesv1alpha1.NewForConfigOrDie(c) cs.DiscoveryClient = discovery.NewDiscoveryClientForConfigOrDie(c) return &cs @@ -88,7 +75,6 @@ func NewForConfigOrDie(c *rest.Config) *Clientset { func New(c rest.Interface) *Clientset { var cs Clientset cs.coreV1alpha1 = corev1alpha1.New(c) - cs.typesV1alpha1 = typesv1alpha1.New(c) cs.DiscoveryClient = discovery.NewDiscoveryClient(c) return &cs diff --git a/pkg/client/clientset/versioned/fake/clientset_generated.go b/pkg/client/clientset/versioned/fake/clientset_generated.go index 2bc5d5c6..acfa6e6d 100644 --- a/pkg/client/clientset/versioned/fake/clientset_generated.go +++ b/pkg/client/clientset/versioned/fake/clientset_generated.go @@ -6,8 +6,6 @@ import ( clientset "github.com/kubewharf/kubeadmiral/pkg/client/clientset/versioned" corev1alpha1 "github.com/kubewharf/kubeadmiral/pkg/client/clientset/versioned/typed/core/v1alpha1" fakecorev1alpha1 "github.com/kubewharf/kubeadmiral/pkg/client/clientset/versioned/typed/core/v1alpha1/fake" - typesv1alpha1 "github.com/kubewharf/kubeadmiral/pkg/client/clientset/versioned/typed/types/v1alpha1" - faketypesv1alpha1 "github.com/kubewharf/kubeadmiral/pkg/client/clientset/versioned/typed/types/v1alpha1/fake" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/watch" "k8s.io/client-go/discovery" @@ -66,8 +64,3 @@ var _ clientset.Interface = &Clientset{} func (c *Clientset) CoreV1alpha1() corev1alpha1.CoreV1alpha1Interface { return &fakecorev1alpha1.FakeCoreV1alpha1{Fake: &c.Fake} } - -// TypesV1alpha1 retrieves the TypesV1alpha1Client -func (c *Clientset) TypesV1alpha1() typesv1alpha1.TypesV1alpha1Interface { - return &faketypesv1alpha1.FakeTypesV1alpha1{Fake: &c.Fake} -} diff --git a/pkg/client/clientset/versioned/fake/register.go b/pkg/client/clientset/versioned/fake/register.go index 6b917261..61d39e93 100644 --- a/pkg/client/clientset/versioned/fake/register.go +++ b/pkg/client/clientset/versioned/fake/register.go @@ -4,7 +4,6 @@ package fake import ( corev1alpha1 "github.com/kubewharf/kubeadmiral/pkg/apis/core/v1alpha1" - typesv1alpha1 "github.com/kubewharf/kubeadmiral/pkg/apis/types/v1alpha1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" schema "k8s.io/apimachinery/pkg/runtime/schema" @@ -17,7 +16,6 @@ var codecs = serializer.NewCodecFactory(scheme) var localSchemeBuilder = runtime.SchemeBuilder{ corev1alpha1.AddToScheme, - typesv1alpha1.AddToScheme, } // AddToScheme adds all types of this clientset into the given scheme. This allows composition diff --git a/pkg/client/clientset/versioned/scheme/register.go b/pkg/client/clientset/versioned/scheme/register.go index debbc83b..270ba800 100644 --- a/pkg/client/clientset/versioned/scheme/register.go +++ b/pkg/client/clientset/versioned/scheme/register.go @@ -4,7 +4,6 @@ package scheme import ( corev1alpha1 "github.com/kubewharf/kubeadmiral/pkg/apis/core/v1alpha1" - typesv1alpha1 "github.com/kubewharf/kubeadmiral/pkg/apis/types/v1alpha1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" schema "k8s.io/apimachinery/pkg/runtime/schema" @@ -17,7 +16,6 @@ var Codecs = serializer.NewCodecFactory(Scheme) var ParameterCodec = runtime.NewParameterCodec(Scheme) var localSchemeBuilder = runtime.SchemeBuilder{ corev1alpha1.AddToScheme, - typesv1alpha1.AddToScheme, } // AddToScheme adds all types of this clientset into the given scheme. This allows composition diff --git a/pkg/client/clientset/versioned/typed/core/v1alpha1/clustercollectedstatus.go b/pkg/client/clientset/versioned/typed/core/v1alpha1/clustercollectedstatus.go new file mode 100644 index 00000000..debfbbcf --- /dev/null +++ b/pkg/client/clientset/versioned/typed/core/v1alpha1/clustercollectedstatus.go @@ -0,0 +1,152 @@ +// Code generated by client-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + "context" + "time" + + v1alpha1 "github.com/kubewharf/kubeadmiral/pkg/apis/core/v1alpha1" + scheme "github.com/kubewharf/kubeadmiral/pkg/client/clientset/versioned/scheme" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + rest "k8s.io/client-go/rest" +) + +// ClusterCollectedStatusesGetter has a method to return a ClusterCollectedStatusInterface. +// A group's client should implement this interface. +type ClusterCollectedStatusesGetter interface { + ClusterCollectedStatuses() ClusterCollectedStatusInterface +} + +// ClusterCollectedStatusInterface has methods to work with ClusterCollectedStatus resources. +type ClusterCollectedStatusInterface interface { + Create(ctx context.Context, clusterCollectedStatus *v1alpha1.ClusterCollectedStatus, opts v1.CreateOptions) (*v1alpha1.ClusterCollectedStatus, error) + Update(ctx context.Context, clusterCollectedStatus *v1alpha1.ClusterCollectedStatus, opts v1.UpdateOptions) (*v1alpha1.ClusterCollectedStatus, error) + Delete(ctx context.Context, name string, opts v1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error + Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha1.ClusterCollectedStatus, error) + List(ctx context.Context, opts v1.ListOptions) (*v1alpha1.ClusterCollectedStatusList, error) + Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.ClusterCollectedStatus, err error) + ClusterCollectedStatusExpansion +} + +// clusterCollectedStatuses implements ClusterCollectedStatusInterface +type clusterCollectedStatuses struct { + client rest.Interface +} + +// newClusterCollectedStatuses returns a ClusterCollectedStatuses +func newClusterCollectedStatuses(c *CoreV1alpha1Client) *clusterCollectedStatuses { + return &clusterCollectedStatuses{ + client: c.RESTClient(), + } +} + +// Get takes name of the clusterCollectedStatus, and returns the corresponding clusterCollectedStatus object, and an error if there is any. +func (c *clusterCollectedStatuses) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.ClusterCollectedStatus, err error) { + result = &v1alpha1.ClusterCollectedStatus{} + err = c.client.Get(). + Resource("clustercollectedstatuses"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(ctx). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of ClusterCollectedStatuses that match those selectors. +func (c *clusterCollectedStatuses) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.ClusterCollectedStatusList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v1alpha1.ClusterCollectedStatusList{} + err = c.client.Get(). + Resource("clustercollectedstatuses"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(ctx). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested clusterCollectedStatuses. +func (c *clusterCollectedStatuses) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Resource("clustercollectedstatuses"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch(ctx) +} + +// Create takes the representation of a clusterCollectedStatus and creates it. Returns the server's representation of the clusterCollectedStatus, and an error, if there is any. +func (c *clusterCollectedStatuses) Create(ctx context.Context, clusterCollectedStatus *v1alpha1.ClusterCollectedStatus, opts v1.CreateOptions) (result *v1alpha1.ClusterCollectedStatus, err error) { + result = &v1alpha1.ClusterCollectedStatus{} + err = c.client.Post(). + Resource("clustercollectedstatuses"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(clusterCollectedStatus). + Do(ctx). + Into(result) + return +} + +// Update takes the representation of a clusterCollectedStatus and updates it. Returns the server's representation of the clusterCollectedStatus, and an error, if there is any. +func (c *clusterCollectedStatuses) Update(ctx context.Context, clusterCollectedStatus *v1alpha1.ClusterCollectedStatus, opts v1.UpdateOptions) (result *v1alpha1.ClusterCollectedStatus, err error) { + result = &v1alpha1.ClusterCollectedStatus{} + err = c.client.Put(). + Resource("clustercollectedstatuses"). + Name(clusterCollectedStatus.Name). + VersionedParams(&opts, scheme.ParameterCodec). + Body(clusterCollectedStatus). + Do(ctx). + Into(result) + return +} + +// Delete takes name of the clusterCollectedStatus and deletes it. Returns an error if one occurs. +func (c *clusterCollectedStatuses) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { + return c.client.Delete(). + Resource("clustercollectedstatuses"). + Name(name). + Body(&opts). + Do(ctx). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *clusterCollectedStatuses) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { + var timeout time.Duration + if listOpts.TimeoutSeconds != nil { + timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + Resource("clustercollectedstatuses"). + VersionedParams(&listOpts, scheme.ParameterCodec). + Timeout(timeout). + Body(&opts). + Do(ctx). + Error() +} + +// Patch applies the patch and returns the patched clusterCollectedStatus. +func (c *clusterCollectedStatuses) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.ClusterCollectedStatus, err error) { + result = &v1alpha1.ClusterCollectedStatus{} + err = c.client.Patch(pt). + Resource("clustercollectedstatuses"). + Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). + Body(data). + Do(ctx). + Into(result) + return +} diff --git a/pkg/client/clientset/versioned/typed/core/v1alpha1/clusterfederatedobject.go b/pkg/client/clientset/versioned/typed/core/v1alpha1/clusterfederatedobject.go new file mode 100644 index 00000000..f6d60848 --- /dev/null +++ b/pkg/client/clientset/versioned/typed/core/v1alpha1/clusterfederatedobject.go @@ -0,0 +1,168 @@ +// Code generated by client-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + "context" + "time" + + v1alpha1 "github.com/kubewharf/kubeadmiral/pkg/apis/core/v1alpha1" + scheme "github.com/kubewharf/kubeadmiral/pkg/client/clientset/versioned/scheme" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + rest "k8s.io/client-go/rest" +) + +// ClusterFederatedObjectsGetter has a method to return a ClusterFederatedObjectInterface. +// A group's client should implement this interface. +type ClusterFederatedObjectsGetter interface { + ClusterFederatedObjects() ClusterFederatedObjectInterface +} + +// ClusterFederatedObjectInterface has methods to work with ClusterFederatedObject resources. +type ClusterFederatedObjectInterface interface { + Create(ctx context.Context, clusterFederatedObject *v1alpha1.ClusterFederatedObject, opts v1.CreateOptions) (*v1alpha1.ClusterFederatedObject, error) + Update(ctx context.Context, clusterFederatedObject *v1alpha1.ClusterFederatedObject, opts v1.UpdateOptions) (*v1alpha1.ClusterFederatedObject, error) + UpdateStatus(ctx context.Context, clusterFederatedObject *v1alpha1.ClusterFederatedObject, opts v1.UpdateOptions) (*v1alpha1.ClusterFederatedObject, error) + Delete(ctx context.Context, name string, opts v1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error + Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha1.ClusterFederatedObject, error) + List(ctx context.Context, opts v1.ListOptions) (*v1alpha1.ClusterFederatedObjectList, error) + Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.ClusterFederatedObject, err error) + ClusterFederatedObjectExpansion +} + +// clusterFederatedObjects implements ClusterFederatedObjectInterface +type clusterFederatedObjects struct { + client rest.Interface +} + +// newClusterFederatedObjects returns a ClusterFederatedObjects +func newClusterFederatedObjects(c *CoreV1alpha1Client) *clusterFederatedObjects { + return &clusterFederatedObjects{ + client: c.RESTClient(), + } +} + +// Get takes name of the clusterFederatedObject, and returns the corresponding clusterFederatedObject object, and an error if there is any. +func (c *clusterFederatedObjects) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.ClusterFederatedObject, err error) { + result = &v1alpha1.ClusterFederatedObject{} + err = c.client.Get(). + Resource("clusterfederatedobjects"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(ctx). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of ClusterFederatedObjects that match those selectors. +func (c *clusterFederatedObjects) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.ClusterFederatedObjectList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v1alpha1.ClusterFederatedObjectList{} + err = c.client.Get(). + Resource("clusterfederatedobjects"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(ctx). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested clusterFederatedObjects. +func (c *clusterFederatedObjects) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Resource("clusterfederatedobjects"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch(ctx) +} + +// Create takes the representation of a clusterFederatedObject and creates it. Returns the server's representation of the clusterFederatedObject, and an error, if there is any. +func (c *clusterFederatedObjects) Create(ctx context.Context, clusterFederatedObject *v1alpha1.ClusterFederatedObject, opts v1.CreateOptions) (result *v1alpha1.ClusterFederatedObject, err error) { + result = &v1alpha1.ClusterFederatedObject{} + err = c.client.Post(). + Resource("clusterfederatedobjects"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(clusterFederatedObject). + Do(ctx). + Into(result) + return +} + +// Update takes the representation of a clusterFederatedObject and updates it. Returns the server's representation of the clusterFederatedObject, and an error, if there is any. +func (c *clusterFederatedObjects) Update(ctx context.Context, clusterFederatedObject *v1alpha1.ClusterFederatedObject, opts v1.UpdateOptions) (result *v1alpha1.ClusterFederatedObject, err error) { + result = &v1alpha1.ClusterFederatedObject{} + err = c.client.Put(). + Resource("clusterfederatedobjects"). + Name(clusterFederatedObject.Name). + VersionedParams(&opts, scheme.ParameterCodec). + Body(clusterFederatedObject). + Do(ctx). + Into(result) + return +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). +func (c *clusterFederatedObjects) UpdateStatus(ctx context.Context, clusterFederatedObject *v1alpha1.ClusterFederatedObject, opts v1.UpdateOptions) (result *v1alpha1.ClusterFederatedObject, err error) { + result = &v1alpha1.ClusterFederatedObject{} + err = c.client.Put(). + Resource("clusterfederatedobjects"). + Name(clusterFederatedObject.Name). + SubResource("status"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(clusterFederatedObject). + Do(ctx). + Into(result) + return +} + +// Delete takes name of the clusterFederatedObject and deletes it. Returns an error if one occurs. +func (c *clusterFederatedObjects) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { + return c.client.Delete(). + Resource("clusterfederatedobjects"). + Name(name). + Body(&opts). + Do(ctx). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *clusterFederatedObjects) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { + var timeout time.Duration + if listOpts.TimeoutSeconds != nil { + timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + Resource("clusterfederatedobjects"). + VersionedParams(&listOpts, scheme.ParameterCodec). + Timeout(timeout). + Body(&opts). + Do(ctx). + Error() +} + +// Patch applies the patch and returns the patched clusterFederatedObject. +func (c *clusterFederatedObjects) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.ClusterFederatedObject, err error) { + result = &v1alpha1.ClusterFederatedObject{} + err = c.client.Patch(pt). + Resource("clusterfederatedobjects"). + Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). + Body(data). + Do(ctx). + Into(result) + return +} diff --git a/pkg/client/clientset/versioned/typed/core/v1alpha1/collectedstatus.go b/pkg/client/clientset/versioned/typed/core/v1alpha1/collectedstatus.go new file mode 100644 index 00000000..8dc06495 --- /dev/null +++ b/pkg/client/clientset/versioned/typed/core/v1alpha1/collectedstatus.go @@ -0,0 +1,162 @@ +// Code generated by client-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + "context" + "time" + + v1alpha1 "github.com/kubewharf/kubeadmiral/pkg/apis/core/v1alpha1" + scheme "github.com/kubewharf/kubeadmiral/pkg/client/clientset/versioned/scheme" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + rest "k8s.io/client-go/rest" +) + +// CollectedStatusesGetter has a method to return a CollectedStatusInterface. +// A group's client should implement this interface. +type CollectedStatusesGetter interface { + CollectedStatuses(namespace string) CollectedStatusInterface +} + +// CollectedStatusInterface has methods to work with CollectedStatus resources. +type CollectedStatusInterface interface { + Create(ctx context.Context, collectedStatus *v1alpha1.CollectedStatus, opts v1.CreateOptions) (*v1alpha1.CollectedStatus, error) + Update(ctx context.Context, collectedStatus *v1alpha1.CollectedStatus, opts v1.UpdateOptions) (*v1alpha1.CollectedStatus, error) + Delete(ctx context.Context, name string, opts v1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error + Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha1.CollectedStatus, error) + List(ctx context.Context, opts v1.ListOptions) (*v1alpha1.CollectedStatusList, error) + Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.CollectedStatus, err error) + CollectedStatusExpansion +} + +// collectedStatuses implements CollectedStatusInterface +type collectedStatuses struct { + client rest.Interface + ns string +} + +// newCollectedStatuses returns a CollectedStatuses +func newCollectedStatuses(c *CoreV1alpha1Client, namespace string) *collectedStatuses { + return &collectedStatuses{ + client: c.RESTClient(), + ns: namespace, + } +} + +// Get takes name of the collectedStatus, and returns the corresponding collectedStatus object, and an error if there is any. +func (c *collectedStatuses) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.CollectedStatus, err error) { + result = &v1alpha1.CollectedStatus{} + err = c.client.Get(). + Namespace(c.ns). + Resource("collectedstatuses"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(ctx). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of CollectedStatuses that match those selectors. +func (c *collectedStatuses) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.CollectedStatusList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v1alpha1.CollectedStatusList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("collectedstatuses"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(ctx). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested collectedStatuses. +func (c *collectedStatuses) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("collectedstatuses"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch(ctx) +} + +// Create takes the representation of a collectedStatus and creates it. Returns the server's representation of the collectedStatus, and an error, if there is any. +func (c *collectedStatuses) Create(ctx context.Context, collectedStatus *v1alpha1.CollectedStatus, opts v1.CreateOptions) (result *v1alpha1.CollectedStatus, err error) { + result = &v1alpha1.CollectedStatus{} + err = c.client.Post(). + Namespace(c.ns). + Resource("collectedstatuses"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(collectedStatus). + Do(ctx). + Into(result) + return +} + +// Update takes the representation of a collectedStatus and updates it. Returns the server's representation of the collectedStatus, and an error, if there is any. +func (c *collectedStatuses) Update(ctx context.Context, collectedStatus *v1alpha1.CollectedStatus, opts v1.UpdateOptions) (result *v1alpha1.CollectedStatus, err error) { + result = &v1alpha1.CollectedStatus{} + err = c.client.Put(). + Namespace(c.ns). + Resource("collectedstatuses"). + Name(collectedStatus.Name). + VersionedParams(&opts, scheme.ParameterCodec). + Body(collectedStatus). + Do(ctx). + Into(result) + return +} + +// Delete takes name of the collectedStatus and deletes it. Returns an error if one occurs. +func (c *collectedStatuses) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("collectedstatuses"). + Name(name). + Body(&opts). + Do(ctx). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *collectedStatuses) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { + var timeout time.Duration + if listOpts.TimeoutSeconds != nil { + timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + Namespace(c.ns). + Resource("collectedstatuses"). + VersionedParams(&listOpts, scheme.ParameterCodec). + Timeout(timeout). + Body(&opts). + Do(ctx). + Error() +} + +// Patch applies the patch and returns the patched collectedStatus. +func (c *collectedStatuses) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.CollectedStatus, err error) { + result = &v1alpha1.CollectedStatus{} + err = c.client.Patch(pt). + Namespace(c.ns). + Resource("collectedstatuses"). + Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). + Body(data). + Do(ctx). + Into(result) + return +} diff --git a/pkg/client/clientset/versioned/typed/core/v1alpha1/core_client.go b/pkg/client/clientset/versioned/typed/core/v1alpha1/core_client.go index d3554ac8..798e756f 100644 --- a/pkg/client/clientset/versioned/typed/core/v1alpha1/core_client.go +++ b/pkg/client/clientset/versioned/typed/core/v1alpha1/core_client.go @@ -10,10 +10,14 @@ import ( type CoreV1alpha1Interface interface { RESTClient() rest.Interface + ClusterCollectedStatusesGetter + ClusterFederatedObjectsGetter ClusterOverridePoliciesGetter ClusterPropagatedVersionsGetter ClusterPropagationPoliciesGetter + CollectedStatusesGetter FederatedClustersGetter + FederatedObjectsGetter FederatedTypeConfigsGetter OverridePoliciesGetter PropagatedVersionsGetter @@ -27,6 +31,14 @@ type CoreV1alpha1Client struct { restClient rest.Interface } +func (c *CoreV1alpha1Client) ClusterCollectedStatuses() ClusterCollectedStatusInterface { + return newClusterCollectedStatuses(c) +} + +func (c *CoreV1alpha1Client) ClusterFederatedObjects() ClusterFederatedObjectInterface { + return newClusterFederatedObjects(c) +} + func (c *CoreV1alpha1Client) ClusterOverridePolicies() ClusterOverridePolicyInterface { return newClusterOverridePolicies(c) } @@ -39,10 +51,18 @@ func (c *CoreV1alpha1Client) ClusterPropagationPolicies() ClusterPropagationPoli return newClusterPropagationPolicies(c) } +func (c *CoreV1alpha1Client) CollectedStatuses(namespace string) CollectedStatusInterface { + return newCollectedStatuses(c, namespace) +} + func (c *CoreV1alpha1Client) FederatedClusters() FederatedClusterInterface { return newFederatedClusters(c) } +func (c *CoreV1alpha1Client) FederatedObjects(namespace string) FederatedObjectInterface { + return newFederatedObjects(c, namespace) +} + func (c *CoreV1alpha1Client) FederatedTypeConfigs() FederatedTypeConfigInterface { return newFederatedTypeConfigs(c) } diff --git a/pkg/client/clientset/versioned/typed/core/v1alpha1/fake/fake_clustercollectedstatus.go b/pkg/client/clientset/versioned/typed/core/v1alpha1/fake/fake_clustercollectedstatus.go new file mode 100644 index 00000000..25043d39 --- /dev/null +++ b/pkg/client/clientset/versioned/typed/core/v1alpha1/fake/fake_clustercollectedstatus.go @@ -0,0 +1,106 @@ +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + "context" + + v1alpha1 "github.com/kubewharf/kubeadmiral/pkg/apis/core/v1alpha1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + labels "k8s.io/apimachinery/pkg/labels" + schema "k8s.io/apimachinery/pkg/runtime/schema" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + testing "k8s.io/client-go/testing" +) + +// FakeClusterCollectedStatuses implements ClusterCollectedStatusInterface +type FakeClusterCollectedStatuses struct { + Fake *FakeCoreV1alpha1 +} + +var clustercollectedstatusesResource = schema.GroupVersionResource{Group: "core.kubeadmiral.io", Version: "v1alpha1", Resource: "clustercollectedstatuses"} + +var clustercollectedstatusesKind = schema.GroupVersionKind{Group: "core.kubeadmiral.io", Version: "v1alpha1", Kind: "ClusterCollectedStatus"} + +// Get takes name of the clusterCollectedStatus, and returns the corresponding clusterCollectedStatus object, and an error if there is any. +func (c *FakeClusterCollectedStatuses) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.ClusterCollectedStatus, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootGetAction(clustercollectedstatusesResource, name), &v1alpha1.ClusterCollectedStatus{}) + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.ClusterCollectedStatus), err +} + +// List takes label and field selectors, and returns the list of ClusterCollectedStatuses that match those selectors. +func (c *FakeClusterCollectedStatuses) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.ClusterCollectedStatusList, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootListAction(clustercollectedstatusesResource, clustercollectedstatusesKind, opts), &v1alpha1.ClusterCollectedStatusList{}) + if obj == nil { + return nil, err + } + + label, _, _ := testing.ExtractFromListOptions(opts) + if label == nil { + label = labels.Everything() + } + list := &v1alpha1.ClusterCollectedStatusList{ListMeta: obj.(*v1alpha1.ClusterCollectedStatusList).ListMeta} + for _, item := range obj.(*v1alpha1.ClusterCollectedStatusList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested clusterCollectedStatuses. +func (c *FakeClusterCollectedStatuses) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(testing.NewRootWatchAction(clustercollectedstatusesResource, opts)) +} + +// Create takes the representation of a clusterCollectedStatus and creates it. Returns the server's representation of the clusterCollectedStatus, and an error, if there is any. +func (c *FakeClusterCollectedStatuses) Create(ctx context.Context, clusterCollectedStatus *v1alpha1.ClusterCollectedStatus, opts v1.CreateOptions) (result *v1alpha1.ClusterCollectedStatus, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootCreateAction(clustercollectedstatusesResource, clusterCollectedStatus), &v1alpha1.ClusterCollectedStatus{}) + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.ClusterCollectedStatus), err +} + +// Update takes the representation of a clusterCollectedStatus and updates it. Returns the server's representation of the clusterCollectedStatus, and an error, if there is any. +func (c *FakeClusterCollectedStatuses) Update(ctx context.Context, clusterCollectedStatus *v1alpha1.ClusterCollectedStatus, opts v1.UpdateOptions) (result *v1alpha1.ClusterCollectedStatus, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootUpdateAction(clustercollectedstatusesResource, clusterCollectedStatus), &v1alpha1.ClusterCollectedStatus{}) + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.ClusterCollectedStatus), err +} + +// Delete takes name of the clusterCollectedStatus and deletes it. Returns an error if one occurs. +func (c *FakeClusterCollectedStatuses) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewRootDeleteAction(clustercollectedstatusesResource, name), &v1alpha1.ClusterCollectedStatus{}) + return err +} + +// DeleteCollection deletes a collection of objects. +func (c *FakeClusterCollectedStatuses) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { + action := testing.NewRootDeleteCollectionAction(clustercollectedstatusesResource, listOpts) + + _, err := c.Fake.Invokes(action, &v1alpha1.ClusterCollectedStatusList{}) + return err +} + +// Patch applies the patch and returns the patched clusterCollectedStatus. +func (c *FakeClusterCollectedStatuses) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.ClusterCollectedStatus, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootPatchSubresourceAction(clustercollectedstatusesResource, name, pt, data, subresources...), &v1alpha1.ClusterCollectedStatus{}) + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.ClusterCollectedStatus), err +} diff --git a/pkg/client/clientset/versioned/typed/core/v1alpha1/fake/fake_clusterfederatedobject.go b/pkg/client/clientset/versioned/typed/core/v1alpha1/fake/fake_clusterfederatedobject.go new file mode 100644 index 00000000..32101b81 --- /dev/null +++ b/pkg/client/clientset/versioned/typed/core/v1alpha1/fake/fake_clusterfederatedobject.go @@ -0,0 +1,117 @@ +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + "context" + + v1alpha1 "github.com/kubewharf/kubeadmiral/pkg/apis/core/v1alpha1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + labels "k8s.io/apimachinery/pkg/labels" + schema "k8s.io/apimachinery/pkg/runtime/schema" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + testing "k8s.io/client-go/testing" +) + +// FakeClusterFederatedObjects implements ClusterFederatedObjectInterface +type FakeClusterFederatedObjects struct { + Fake *FakeCoreV1alpha1 +} + +var clusterfederatedobjectsResource = schema.GroupVersionResource{Group: "core.kubeadmiral.io", Version: "v1alpha1", Resource: "clusterfederatedobjects"} + +var clusterfederatedobjectsKind = schema.GroupVersionKind{Group: "core.kubeadmiral.io", Version: "v1alpha1", Kind: "ClusterFederatedObject"} + +// Get takes name of the clusterFederatedObject, and returns the corresponding clusterFederatedObject object, and an error if there is any. +func (c *FakeClusterFederatedObjects) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.ClusterFederatedObject, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootGetAction(clusterfederatedobjectsResource, name), &v1alpha1.ClusterFederatedObject{}) + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.ClusterFederatedObject), err +} + +// List takes label and field selectors, and returns the list of ClusterFederatedObjects that match those selectors. +func (c *FakeClusterFederatedObjects) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.ClusterFederatedObjectList, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootListAction(clusterfederatedobjectsResource, clusterfederatedobjectsKind, opts), &v1alpha1.ClusterFederatedObjectList{}) + if obj == nil { + return nil, err + } + + label, _, _ := testing.ExtractFromListOptions(opts) + if label == nil { + label = labels.Everything() + } + list := &v1alpha1.ClusterFederatedObjectList{ListMeta: obj.(*v1alpha1.ClusterFederatedObjectList).ListMeta} + for _, item := range obj.(*v1alpha1.ClusterFederatedObjectList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested clusterFederatedObjects. +func (c *FakeClusterFederatedObjects) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(testing.NewRootWatchAction(clusterfederatedobjectsResource, opts)) +} + +// Create takes the representation of a clusterFederatedObject and creates it. Returns the server's representation of the clusterFederatedObject, and an error, if there is any. +func (c *FakeClusterFederatedObjects) Create(ctx context.Context, clusterFederatedObject *v1alpha1.ClusterFederatedObject, opts v1.CreateOptions) (result *v1alpha1.ClusterFederatedObject, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootCreateAction(clusterfederatedobjectsResource, clusterFederatedObject), &v1alpha1.ClusterFederatedObject{}) + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.ClusterFederatedObject), err +} + +// Update takes the representation of a clusterFederatedObject and updates it. Returns the server's representation of the clusterFederatedObject, and an error, if there is any. +func (c *FakeClusterFederatedObjects) Update(ctx context.Context, clusterFederatedObject *v1alpha1.ClusterFederatedObject, opts v1.UpdateOptions) (result *v1alpha1.ClusterFederatedObject, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootUpdateAction(clusterfederatedobjectsResource, clusterFederatedObject), &v1alpha1.ClusterFederatedObject{}) + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.ClusterFederatedObject), err +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). +func (c *FakeClusterFederatedObjects) UpdateStatus(ctx context.Context, clusterFederatedObject *v1alpha1.ClusterFederatedObject, opts v1.UpdateOptions) (*v1alpha1.ClusterFederatedObject, error) { + obj, err := c.Fake. + Invokes(testing.NewRootUpdateSubresourceAction(clusterfederatedobjectsResource, "status", clusterFederatedObject), &v1alpha1.ClusterFederatedObject{}) + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.ClusterFederatedObject), err +} + +// Delete takes name of the clusterFederatedObject and deletes it. Returns an error if one occurs. +func (c *FakeClusterFederatedObjects) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewRootDeleteAction(clusterfederatedobjectsResource, name), &v1alpha1.ClusterFederatedObject{}) + return err +} + +// DeleteCollection deletes a collection of objects. +func (c *FakeClusterFederatedObjects) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { + action := testing.NewRootDeleteCollectionAction(clusterfederatedobjectsResource, listOpts) + + _, err := c.Fake.Invokes(action, &v1alpha1.ClusterFederatedObjectList{}) + return err +} + +// Patch applies the patch and returns the patched clusterFederatedObject. +func (c *FakeClusterFederatedObjects) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.ClusterFederatedObject, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootPatchSubresourceAction(clusterfederatedobjectsResource, name, pt, data, subresources...), &v1alpha1.ClusterFederatedObject{}) + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.ClusterFederatedObject), err +} diff --git a/pkg/client/clientset/versioned/typed/core/v1alpha1/fake/fake_collectedstatus.go b/pkg/client/clientset/versioned/typed/core/v1alpha1/fake/fake_collectedstatus.go new file mode 100644 index 00000000..18833904 --- /dev/null +++ b/pkg/client/clientset/versioned/typed/core/v1alpha1/fake/fake_collectedstatus.go @@ -0,0 +1,114 @@ +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + "context" + + v1alpha1 "github.com/kubewharf/kubeadmiral/pkg/apis/core/v1alpha1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + labels "k8s.io/apimachinery/pkg/labels" + schema "k8s.io/apimachinery/pkg/runtime/schema" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + testing "k8s.io/client-go/testing" +) + +// FakeCollectedStatuses implements CollectedStatusInterface +type FakeCollectedStatuses struct { + Fake *FakeCoreV1alpha1 + ns string +} + +var collectedstatusesResource = schema.GroupVersionResource{Group: "core.kubeadmiral.io", Version: "v1alpha1", Resource: "collectedstatuses"} + +var collectedstatusesKind = schema.GroupVersionKind{Group: "core.kubeadmiral.io", Version: "v1alpha1", Kind: "CollectedStatus"} + +// Get takes name of the collectedStatus, and returns the corresponding collectedStatus object, and an error if there is any. +func (c *FakeCollectedStatuses) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.CollectedStatus, err error) { + obj, err := c.Fake. + Invokes(testing.NewGetAction(collectedstatusesResource, c.ns, name), &v1alpha1.CollectedStatus{}) + + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.CollectedStatus), err +} + +// List takes label and field selectors, and returns the list of CollectedStatuses that match those selectors. +func (c *FakeCollectedStatuses) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.CollectedStatusList, err error) { + obj, err := c.Fake. + Invokes(testing.NewListAction(collectedstatusesResource, collectedstatusesKind, c.ns, opts), &v1alpha1.CollectedStatusList{}) + + if obj == nil { + return nil, err + } + + label, _, _ := testing.ExtractFromListOptions(opts) + if label == nil { + label = labels.Everything() + } + list := &v1alpha1.CollectedStatusList{ListMeta: obj.(*v1alpha1.CollectedStatusList).ListMeta} + for _, item := range obj.(*v1alpha1.CollectedStatusList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested collectedStatuses. +func (c *FakeCollectedStatuses) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(testing.NewWatchAction(collectedstatusesResource, c.ns, opts)) + +} + +// Create takes the representation of a collectedStatus and creates it. Returns the server's representation of the collectedStatus, and an error, if there is any. +func (c *FakeCollectedStatuses) Create(ctx context.Context, collectedStatus *v1alpha1.CollectedStatus, opts v1.CreateOptions) (result *v1alpha1.CollectedStatus, err error) { + obj, err := c.Fake. + Invokes(testing.NewCreateAction(collectedstatusesResource, c.ns, collectedStatus), &v1alpha1.CollectedStatus{}) + + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.CollectedStatus), err +} + +// Update takes the representation of a collectedStatus and updates it. Returns the server's representation of the collectedStatus, and an error, if there is any. +func (c *FakeCollectedStatuses) Update(ctx context.Context, collectedStatus *v1alpha1.CollectedStatus, opts v1.UpdateOptions) (result *v1alpha1.CollectedStatus, err error) { + obj, err := c.Fake. + Invokes(testing.NewUpdateAction(collectedstatusesResource, c.ns, collectedStatus), &v1alpha1.CollectedStatus{}) + + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.CollectedStatus), err +} + +// Delete takes name of the collectedStatus and deletes it. Returns an error if one occurs. +func (c *FakeCollectedStatuses) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewDeleteAction(collectedstatusesResource, c.ns, name), &v1alpha1.CollectedStatus{}) + + return err +} + +// DeleteCollection deletes a collection of objects. +func (c *FakeCollectedStatuses) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { + action := testing.NewDeleteCollectionAction(collectedstatusesResource, c.ns, listOpts) + + _, err := c.Fake.Invokes(action, &v1alpha1.CollectedStatusList{}) + return err +} + +// Patch applies the patch and returns the patched collectedStatus. +func (c *FakeCollectedStatuses) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.CollectedStatus, err error) { + obj, err := c.Fake. + Invokes(testing.NewPatchSubresourceAction(collectedstatusesResource, c.ns, name, pt, data, subresources...), &v1alpha1.CollectedStatus{}) + + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.CollectedStatus), err +} diff --git a/pkg/client/clientset/versioned/typed/core/v1alpha1/fake/fake_core_client.go b/pkg/client/clientset/versioned/typed/core/v1alpha1/fake/fake_core_client.go index 3bc3329e..751322e8 100644 --- a/pkg/client/clientset/versioned/typed/core/v1alpha1/fake/fake_core_client.go +++ b/pkg/client/clientset/versioned/typed/core/v1alpha1/fake/fake_core_client.go @@ -12,6 +12,14 @@ type FakeCoreV1alpha1 struct { *testing.Fake } +func (c *FakeCoreV1alpha1) ClusterCollectedStatuses() v1alpha1.ClusterCollectedStatusInterface { + return &FakeClusterCollectedStatuses{c} +} + +func (c *FakeCoreV1alpha1) ClusterFederatedObjects() v1alpha1.ClusterFederatedObjectInterface { + return &FakeClusterFederatedObjects{c} +} + func (c *FakeCoreV1alpha1) ClusterOverridePolicies() v1alpha1.ClusterOverridePolicyInterface { return &FakeClusterOverridePolicies{c} } @@ -24,10 +32,18 @@ func (c *FakeCoreV1alpha1) ClusterPropagationPolicies() v1alpha1.ClusterPropagat return &FakeClusterPropagationPolicies{c} } +func (c *FakeCoreV1alpha1) CollectedStatuses(namespace string) v1alpha1.CollectedStatusInterface { + return &FakeCollectedStatuses{c, namespace} +} + func (c *FakeCoreV1alpha1) FederatedClusters() v1alpha1.FederatedClusterInterface { return &FakeFederatedClusters{c} } +func (c *FakeCoreV1alpha1) FederatedObjects(namespace string) v1alpha1.FederatedObjectInterface { + return &FakeFederatedObjects{c, namespace} +} + func (c *FakeCoreV1alpha1) FederatedTypeConfigs() v1alpha1.FederatedTypeConfigInterface { return &FakeFederatedTypeConfigs{c} } diff --git a/pkg/client/clientset/versioned/typed/core/v1alpha1/fake/fake_federatedobject.go b/pkg/client/clientset/versioned/typed/core/v1alpha1/fake/fake_federatedobject.go new file mode 100644 index 00000000..a05fcf61 --- /dev/null +++ b/pkg/client/clientset/versioned/typed/core/v1alpha1/fake/fake_federatedobject.go @@ -0,0 +1,126 @@ +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + "context" + + v1alpha1 "github.com/kubewharf/kubeadmiral/pkg/apis/core/v1alpha1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + labels "k8s.io/apimachinery/pkg/labels" + schema "k8s.io/apimachinery/pkg/runtime/schema" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + testing "k8s.io/client-go/testing" +) + +// FakeFederatedObjects implements FederatedObjectInterface +type FakeFederatedObjects struct { + Fake *FakeCoreV1alpha1 + ns string +} + +var federatedobjectsResource = schema.GroupVersionResource{Group: "core.kubeadmiral.io", Version: "v1alpha1", Resource: "federatedobjects"} + +var federatedobjectsKind = schema.GroupVersionKind{Group: "core.kubeadmiral.io", Version: "v1alpha1", Kind: "FederatedObject"} + +// Get takes name of the federatedObject, and returns the corresponding federatedObject object, and an error if there is any. +func (c *FakeFederatedObjects) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.FederatedObject, err error) { + obj, err := c.Fake. + Invokes(testing.NewGetAction(federatedobjectsResource, c.ns, name), &v1alpha1.FederatedObject{}) + + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.FederatedObject), err +} + +// List takes label and field selectors, and returns the list of FederatedObjects that match those selectors. +func (c *FakeFederatedObjects) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.FederatedObjectList, err error) { + obj, err := c.Fake. + Invokes(testing.NewListAction(federatedobjectsResource, federatedobjectsKind, c.ns, opts), &v1alpha1.FederatedObjectList{}) + + if obj == nil { + return nil, err + } + + label, _, _ := testing.ExtractFromListOptions(opts) + if label == nil { + label = labels.Everything() + } + list := &v1alpha1.FederatedObjectList{ListMeta: obj.(*v1alpha1.FederatedObjectList).ListMeta} + for _, item := range obj.(*v1alpha1.FederatedObjectList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested federatedObjects. +func (c *FakeFederatedObjects) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(testing.NewWatchAction(federatedobjectsResource, c.ns, opts)) + +} + +// Create takes the representation of a federatedObject and creates it. Returns the server's representation of the federatedObject, and an error, if there is any. +func (c *FakeFederatedObjects) Create(ctx context.Context, federatedObject *v1alpha1.FederatedObject, opts v1.CreateOptions) (result *v1alpha1.FederatedObject, err error) { + obj, err := c.Fake. + Invokes(testing.NewCreateAction(federatedobjectsResource, c.ns, federatedObject), &v1alpha1.FederatedObject{}) + + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.FederatedObject), err +} + +// Update takes the representation of a federatedObject and updates it. Returns the server's representation of the federatedObject, and an error, if there is any. +func (c *FakeFederatedObjects) Update(ctx context.Context, federatedObject *v1alpha1.FederatedObject, opts v1.UpdateOptions) (result *v1alpha1.FederatedObject, err error) { + obj, err := c.Fake. + Invokes(testing.NewUpdateAction(federatedobjectsResource, c.ns, federatedObject), &v1alpha1.FederatedObject{}) + + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.FederatedObject), err +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). +func (c *FakeFederatedObjects) UpdateStatus(ctx context.Context, federatedObject *v1alpha1.FederatedObject, opts v1.UpdateOptions) (*v1alpha1.FederatedObject, error) { + obj, err := c.Fake. + Invokes(testing.NewUpdateSubresourceAction(federatedobjectsResource, "status", c.ns, federatedObject), &v1alpha1.FederatedObject{}) + + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.FederatedObject), err +} + +// Delete takes name of the federatedObject and deletes it. Returns an error if one occurs. +func (c *FakeFederatedObjects) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewDeleteAction(federatedobjectsResource, c.ns, name), &v1alpha1.FederatedObject{}) + + return err +} + +// DeleteCollection deletes a collection of objects. +func (c *FakeFederatedObjects) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { + action := testing.NewDeleteCollectionAction(federatedobjectsResource, c.ns, listOpts) + + _, err := c.Fake.Invokes(action, &v1alpha1.FederatedObjectList{}) + return err +} + +// Patch applies the patch and returns the patched federatedObject. +func (c *FakeFederatedObjects) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.FederatedObject, err error) { + obj, err := c.Fake. + Invokes(testing.NewPatchSubresourceAction(federatedobjectsResource, c.ns, name, pt, data, subresources...), &v1alpha1.FederatedObject{}) + + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.FederatedObject), err +} diff --git a/pkg/client/clientset/versioned/typed/core/v1alpha1/fake/fake_federatedtypeconfig.go b/pkg/client/clientset/versioned/typed/core/v1alpha1/fake/fake_federatedtypeconfig.go index 69b2d6dd..03d0de02 100644 --- a/pkg/client/clientset/versioned/typed/core/v1alpha1/fake/fake_federatedtypeconfig.go +++ b/pkg/client/clientset/versioned/typed/core/v1alpha1/fake/fake_federatedtypeconfig.go @@ -80,17 +80,6 @@ func (c *FakeFederatedTypeConfigs) Update(ctx context.Context, federatedTypeConf return obj.(*v1alpha1.FederatedTypeConfig), err } -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *FakeFederatedTypeConfigs) UpdateStatus(ctx context.Context, federatedTypeConfig *v1alpha1.FederatedTypeConfig, opts v1.UpdateOptions) (*v1alpha1.FederatedTypeConfig, error) { - obj, err := c.Fake. - Invokes(testing.NewRootUpdateSubresourceAction(federatedtypeconfigsResource, "status", federatedTypeConfig), &v1alpha1.FederatedTypeConfig{}) - if obj == nil { - return nil, err - } - return obj.(*v1alpha1.FederatedTypeConfig), err -} - // Delete takes name of the federatedTypeConfig and deletes it. Returns an error if one occurs. func (c *FakeFederatedTypeConfigs) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { _, err := c.Fake. diff --git a/pkg/client/clientset/versioned/typed/core/v1alpha1/federatedobject.go b/pkg/client/clientset/versioned/typed/core/v1alpha1/federatedobject.go new file mode 100644 index 00000000..0876f448 --- /dev/null +++ b/pkg/client/clientset/versioned/typed/core/v1alpha1/federatedobject.go @@ -0,0 +1,179 @@ +// Code generated by client-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + "context" + "time" + + v1alpha1 "github.com/kubewharf/kubeadmiral/pkg/apis/core/v1alpha1" + scheme "github.com/kubewharf/kubeadmiral/pkg/client/clientset/versioned/scheme" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + rest "k8s.io/client-go/rest" +) + +// FederatedObjectsGetter has a method to return a FederatedObjectInterface. +// A group's client should implement this interface. +type FederatedObjectsGetter interface { + FederatedObjects(namespace string) FederatedObjectInterface +} + +// FederatedObjectInterface has methods to work with FederatedObject resources. +type FederatedObjectInterface interface { + Create(ctx context.Context, federatedObject *v1alpha1.FederatedObject, opts v1.CreateOptions) (*v1alpha1.FederatedObject, error) + Update(ctx context.Context, federatedObject *v1alpha1.FederatedObject, opts v1.UpdateOptions) (*v1alpha1.FederatedObject, error) + UpdateStatus(ctx context.Context, federatedObject *v1alpha1.FederatedObject, opts v1.UpdateOptions) (*v1alpha1.FederatedObject, error) + Delete(ctx context.Context, name string, opts v1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error + Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha1.FederatedObject, error) + List(ctx context.Context, opts v1.ListOptions) (*v1alpha1.FederatedObjectList, error) + Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.FederatedObject, err error) + FederatedObjectExpansion +} + +// federatedObjects implements FederatedObjectInterface +type federatedObjects struct { + client rest.Interface + ns string +} + +// newFederatedObjects returns a FederatedObjects +func newFederatedObjects(c *CoreV1alpha1Client, namespace string) *federatedObjects { + return &federatedObjects{ + client: c.RESTClient(), + ns: namespace, + } +} + +// Get takes name of the federatedObject, and returns the corresponding federatedObject object, and an error if there is any. +func (c *federatedObjects) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.FederatedObject, err error) { + result = &v1alpha1.FederatedObject{} + err = c.client.Get(). + Namespace(c.ns). + Resource("federatedobjects"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(ctx). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of FederatedObjects that match those selectors. +func (c *federatedObjects) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.FederatedObjectList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v1alpha1.FederatedObjectList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("federatedobjects"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(ctx). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested federatedObjects. +func (c *federatedObjects) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("federatedobjects"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch(ctx) +} + +// Create takes the representation of a federatedObject and creates it. Returns the server's representation of the federatedObject, and an error, if there is any. +func (c *federatedObjects) Create(ctx context.Context, federatedObject *v1alpha1.FederatedObject, opts v1.CreateOptions) (result *v1alpha1.FederatedObject, err error) { + result = &v1alpha1.FederatedObject{} + err = c.client.Post(). + Namespace(c.ns). + Resource("federatedobjects"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(federatedObject). + Do(ctx). + Into(result) + return +} + +// Update takes the representation of a federatedObject and updates it. Returns the server's representation of the federatedObject, and an error, if there is any. +func (c *federatedObjects) Update(ctx context.Context, federatedObject *v1alpha1.FederatedObject, opts v1.UpdateOptions) (result *v1alpha1.FederatedObject, err error) { + result = &v1alpha1.FederatedObject{} + err = c.client.Put(). + Namespace(c.ns). + Resource("federatedobjects"). + Name(federatedObject.Name). + VersionedParams(&opts, scheme.ParameterCodec). + Body(federatedObject). + Do(ctx). + Into(result) + return +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). +func (c *federatedObjects) UpdateStatus(ctx context.Context, federatedObject *v1alpha1.FederatedObject, opts v1.UpdateOptions) (result *v1alpha1.FederatedObject, err error) { + result = &v1alpha1.FederatedObject{} + err = c.client.Put(). + Namespace(c.ns). + Resource("federatedobjects"). + Name(federatedObject.Name). + SubResource("status"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(federatedObject). + Do(ctx). + Into(result) + return +} + +// Delete takes name of the federatedObject and deletes it. Returns an error if one occurs. +func (c *federatedObjects) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("federatedobjects"). + Name(name). + Body(&opts). + Do(ctx). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *federatedObjects) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { + var timeout time.Duration + if listOpts.TimeoutSeconds != nil { + timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + Namespace(c.ns). + Resource("federatedobjects"). + VersionedParams(&listOpts, scheme.ParameterCodec). + Timeout(timeout). + Body(&opts). + Do(ctx). + Error() +} + +// Patch applies the patch and returns the patched federatedObject. +func (c *federatedObjects) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.FederatedObject, err error) { + result = &v1alpha1.FederatedObject{} + err = c.client.Patch(pt). + Namespace(c.ns). + Resource("federatedobjects"). + Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). + Body(data). + Do(ctx). + Into(result) + return +} diff --git a/pkg/client/clientset/versioned/typed/core/v1alpha1/federatedtypeconfig.go b/pkg/client/clientset/versioned/typed/core/v1alpha1/federatedtypeconfig.go index b7961c34..8a76b301 100644 --- a/pkg/client/clientset/versioned/typed/core/v1alpha1/federatedtypeconfig.go +++ b/pkg/client/clientset/versioned/typed/core/v1alpha1/federatedtypeconfig.go @@ -24,7 +24,6 @@ type FederatedTypeConfigsGetter interface { type FederatedTypeConfigInterface interface { Create(ctx context.Context, federatedTypeConfig *v1alpha1.FederatedTypeConfig, opts v1.CreateOptions) (*v1alpha1.FederatedTypeConfig, error) Update(ctx context.Context, federatedTypeConfig *v1alpha1.FederatedTypeConfig, opts v1.UpdateOptions) (*v1alpha1.FederatedTypeConfig, error) - UpdateStatus(ctx context.Context, federatedTypeConfig *v1alpha1.FederatedTypeConfig, opts v1.UpdateOptions) (*v1alpha1.FederatedTypeConfig, error) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha1.FederatedTypeConfig, error) @@ -113,21 +112,6 @@ func (c *federatedTypeConfigs) Update(ctx context.Context, federatedTypeConfig * return } -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *federatedTypeConfigs) UpdateStatus(ctx context.Context, federatedTypeConfig *v1alpha1.FederatedTypeConfig, opts v1.UpdateOptions) (result *v1alpha1.FederatedTypeConfig, err error) { - result = &v1alpha1.FederatedTypeConfig{} - err = c.client.Put(). - Resource("federatedtypeconfigs"). - Name(federatedTypeConfig.Name). - SubResource("status"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(federatedTypeConfig). - Do(ctx). - Into(result) - return -} - // Delete takes name of the federatedTypeConfig and deletes it. Returns an error if one occurs. func (c *federatedTypeConfigs) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { return c.client.Delete(). diff --git a/pkg/client/clientset/versioned/typed/core/v1alpha1/generated_expansion.go b/pkg/client/clientset/versioned/typed/core/v1alpha1/generated_expansion.go index ad30121e..bf602db7 100644 --- a/pkg/client/clientset/versioned/typed/core/v1alpha1/generated_expansion.go +++ b/pkg/client/clientset/versioned/typed/core/v1alpha1/generated_expansion.go @@ -2,14 +2,22 @@ package v1alpha1 +type ClusterCollectedStatusExpansion interface{} + +type ClusterFederatedObjectExpansion interface{} + type ClusterOverridePolicyExpansion interface{} type ClusterPropagatedVersionExpansion interface{} type ClusterPropagationPolicyExpansion interface{} +type CollectedStatusExpansion interface{} + type FederatedClusterExpansion interface{} +type FederatedObjectExpansion interface{} + type FederatedTypeConfigExpansion interface{} type OverridePolicyExpansion interface{} diff --git a/pkg/client/clientset/versioned/typed/types/v1alpha1/doc.go b/pkg/client/clientset/versioned/typed/types/v1alpha1/doc.go deleted file mode 100644 index 93a7ca4e..00000000 --- a/pkg/client/clientset/versioned/typed/types/v1alpha1/doc.go +++ /dev/null @@ -1,4 +0,0 @@ -// Code generated by client-gen. DO NOT EDIT. - -// This package has the automatically generated typed clients. -package v1alpha1 diff --git a/pkg/client/clientset/versioned/typed/types/v1alpha1/fake/doc.go b/pkg/client/clientset/versioned/typed/types/v1alpha1/fake/doc.go deleted file mode 100644 index 2b5ba4c8..00000000 --- a/pkg/client/clientset/versioned/typed/types/v1alpha1/fake/doc.go +++ /dev/null @@ -1,4 +0,0 @@ -// Code generated by client-gen. DO NOT EDIT. - -// Package fake has the automatically generated clients. -package fake diff --git a/pkg/client/clientset/versioned/typed/types/v1alpha1/fake/fake_types_client.go b/pkg/client/clientset/versioned/typed/types/v1alpha1/fake/fake_types_client.go deleted file mode 100644 index 04f47b5f..00000000 --- a/pkg/client/clientset/versioned/typed/types/v1alpha1/fake/fake_types_client.go +++ /dev/null @@ -1,19 +0,0 @@ -// Code generated by client-gen. DO NOT EDIT. - -package fake - -import ( - rest "k8s.io/client-go/rest" - testing "k8s.io/client-go/testing" -) - -type FakeTypesV1alpha1 struct { - *testing.Fake -} - -// RESTClient returns a RESTClient that is used to communicate -// with API server by this client implementation. -func (c *FakeTypesV1alpha1) RESTClient() rest.Interface { - var ret *rest.RESTClient - return ret -} diff --git a/pkg/client/clientset/versioned/typed/types/v1alpha1/generated_expansion.go b/pkg/client/clientset/versioned/typed/types/v1alpha1/generated_expansion.go deleted file mode 100644 index 95037b31..00000000 --- a/pkg/client/clientset/versioned/typed/types/v1alpha1/generated_expansion.go +++ /dev/null @@ -1,3 +0,0 @@ -// Code generated by client-gen. DO NOT EDIT. - -package v1alpha1 diff --git a/pkg/client/clientset/versioned/typed/types/v1alpha1/types_client.go b/pkg/client/clientset/versioned/typed/types/v1alpha1/types_client.go deleted file mode 100644 index 136186ff..00000000 --- a/pkg/client/clientset/versioned/typed/types/v1alpha1/types_client.go +++ /dev/null @@ -1,68 +0,0 @@ -// Code generated by client-gen. DO NOT EDIT. - -package v1alpha1 - -import ( - v1alpha1 "github.com/kubewharf/kubeadmiral/pkg/apis/types/v1alpha1" - "github.com/kubewharf/kubeadmiral/pkg/client/clientset/versioned/scheme" - rest "k8s.io/client-go/rest" -) - -type TypesV1alpha1Interface interface { - RESTClient() rest.Interface -} - -// TypesV1alpha1Client is used to interact with features provided by the types.kubeadmiral.io group. -type TypesV1alpha1Client struct { - restClient rest.Interface -} - -// NewForConfig creates a new TypesV1alpha1Client for the given config. -func NewForConfig(c *rest.Config) (*TypesV1alpha1Client, error) { - config := *c - if err := setConfigDefaults(&config); err != nil { - return nil, err - } - client, err := rest.RESTClientFor(&config) - if err != nil { - return nil, err - } - return &TypesV1alpha1Client{client}, nil -} - -// NewForConfigOrDie creates a new TypesV1alpha1Client for the given config and -// panics if there is an error in the config. -func NewForConfigOrDie(c *rest.Config) *TypesV1alpha1Client { - client, err := NewForConfig(c) - if err != nil { - panic(err) - } - return client -} - -// New creates a new TypesV1alpha1Client for the given RESTClient. -func New(c rest.Interface) *TypesV1alpha1Client { - return &TypesV1alpha1Client{c} -} - -func setConfigDefaults(config *rest.Config) error { - gv := v1alpha1.SchemeGroupVersion - config.GroupVersion = &gv - config.APIPath = "/apis" - config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() - - if config.UserAgent == "" { - config.UserAgent = rest.DefaultKubernetesUserAgent() - } - - return nil -} - -// RESTClient returns a RESTClient that is used to communicate -// with API server by this client implementation. -func (c *TypesV1alpha1Client) RESTClient() rest.Interface { - if c == nil { - return nil - } - return c.restClient -} diff --git a/pkg/client/generic/scheme/register.go b/pkg/client/generic/scheme/register.go index fc5bd2f9..3f08214a 100644 --- a/pkg/client/generic/scheme/register.go +++ b/pkg/client/generic/scheme/register.go @@ -29,7 +29,6 @@ import ( k8sscheme "k8s.io/client-go/kubernetes/scheme" fedcorev1a1 "github.com/kubewharf/kubeadmiral/pkg/apis/core/v1alpha1" - fedtypesv1a1 "github.com/kubewharf/kubeadmiral/pkg/apis/types/v1alpha1" ) var ( @@ -38,7 +37,6 @@ var ( ParameterCodec = runtime.NewParameterCodec(Scheme) localSchemeBuilder = runtime.SchemeBuilder{ fedcorev1a1.AddToScheme, - fedtypesv1a1.AddToScheme, k8sscheme.AddToScheme, } ) diff --git a/pkg/client/informers/externalversions/core/v1alpha1/clustercollectedstatus.go b/pkg/client/informers/externalversions/core/v1alpha1/clustercollectedstatus.go new file mode 100644 index 00000000..7bbe62a6 --- /dev/null +++ b/pkg/client/informers/externalversions/core/v1alpha1/clustercollectedstatus.go @@ -0,0 +1,73 @@ +// Code generated by informer-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + "context" + time "time" + + corev1alpha1 "github.com/kubewharf/kubeadmiral/pkg/apis/core/v1alpha1" + versioned "github.com/kubewharf/kubeadmiral/pkg/client/clientset/versioned" + internalinterfaces "github.com/kubewharf/kubeadmiral/pkg/client/informers/externalversions/internalinterfaces" + v1alpha1 "github.com/kubewharf/kubeadmiral/pkg/client/listers/core/v1alpha1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + cache "k8s.io/client-go/tools/cache" +) + +// ClusterCollectedStatusInformer provides access to a shared informer and lister for +// ClusterCollectedStatuses. +type ClusterCollectedStatusInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1alpha1.ClusterCollectedStatusLister +} + +type clusterCollectedStatusInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// NewClusterCollectedStatusInformer constructs a new informer for ClusterCollectedStatus type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewClusterCollectedStatusInformer(client versioned.Interface, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredClusterCollectedStatusInformer(client, resyncPeriod, indexers, nil) +} + +// NewFilteredClusterCollectedStatusInformer constructs a new informer for ClusterCollectedStatus type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredClusterCollectedStatusInformer(client versioned.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.CoreV1alpha1().ClusterCollectedStatuses().List(context.TODO(), options) + }, + WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.CoreV1alpha1().ClusterCollectedStatuses().Watch(context.TODO(), options) + }, + }, + &corev1alpha1.ClusterCollectedStatus{}, + resyncPeriod, + indexers, + ) +} + +func (f *clusterCollectedStatusInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredClusterCollectedStatusInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *clusterCollectedStatusInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&corev1alpha1.ClusterCollectedStatus{}, f.defaultInformer) +} + +func (f *clusterCollectedStatusInformer) Lister() v1alpha1.ClusterCollectedStatusLister { + return v1alpha1.NewClusterCollectedStatusLister(f.Informer().GetIndexer()) +} diff --git a/pkg/client/informers/externalversions/core/v1alpha1/clusterfederatedobject.go b/pkg/client/informers/externalversions/core/v1alpha1/clusterfederatedobject.go new file mode 100644 index 00000000..924968ff --- /dev/null +++ b/pkg/client/informers/externalversions/core/v1alpha1/clusterfederatedobject.go @@ -0,0 +1,73 @@ +// Code generated by informer-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + "context" + time "time" + + corev1alpha1 "github.com/kubewharf/kubeadmiral/pkg/apis/core/v1alpha1" + versioned "github.com/kubewharf/kubeadmiral/pkg/client/clientset/versioned" + internalinterfaces "github.com/kubewharf/kubeadmiral/pkg/client/informers/externalversions/internalinterfaces" + v1alpha1 "github.com/kubewharf/kubeadmiral/pkg/client/listers/core/v1alpha1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + cache "k8s.io/client-go/tools/cache" +) + +// ClusterFederatedObjectInformer provides access to a shared informer and lister for +// ClusterFederatedObjects. +type ClusterFederatedObjectInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1alpha1.ClusterFederatedObjectLister +} + +type clusterFederatedObjectInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// NewClusterFederatedObjectInformer constructs a new informer for ClusterFederatedObject type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewClusterFederatedObjectInformer(client versioned.Interface, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredClusterFederatedObjectInformer(client, resyncPeriod, indexers, nil) +} + +// NewFilteredClusterFederatedObjectInformer constructs a new informer for ClusterFederatedObject type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredClusterFederatedObjectInformer(client versioned.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.CoreV1alpha1().ClusterFederatedObjects().List(context.TODO(), options) + }, + WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.CoreV1alpha1().ClusterFederatedObjects().Watch(context.TODO(), options) + }, + }, + &corev1alpha1.ClusterFederatedObject{}, + resyncPeriod, + indexers, + ) +} + +func (f *clusterFederatedObjectInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredClusterFederatedObjectInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *clusterFederatedObjectInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&corev1alpha1.ClusterFederatedObject{}, f.defaultInformer) +} + +func (f *clusterFederatedObjectInformer) Lister() v1alpha1.ClusterFederatedObjectLister { + return v1alpha1.NewClusterFederatedObjectLister(f.Informer().GetIndexer()) +} diff --git a/pkg/client/informers/externalversions/core/v1alpha1/collectedstatus.go b/pkg/client/informers/externalversions/core/v1alpha1/collectedstatus.go new file mode 100644 index 00000000..19f310af --- /dev/null +++ b/pkg/client/informers/externalversions/core/v1alpha1/collectedstatus.go @@ -0,0 +1,74 @@ +// Code generated by informer-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + "context" + time "time" + + corev1alpha1 "github.com/kubewharf/kubeadmiral/pkg/apis/core/v1alpha1" + versioned "github.com/kubewharf/kubeadmiral/pkg/client/clientset/versioned" + internalinterfaces "github.com/kubewharf/kubeadmiral/pkg/client/informers/externalversions/internalinterfaces" + v1alpha1 "github.com/kubewharf/kubeadmiral/pkg/client/listers/core/v1alpha1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + cache "k8s.io/client-go/tools/cache" +) + +// CollectedStatusInformer provides access to a shared informer and lister for +// CollectedStatuses. +type CollectedStatusInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1alpha1.CollectedStatusLister +} + +type collectedStatusInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc + namespace string +} + +// NewCollectedStatusInformer constructs a new informer for CollectedStatus type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewCollectedStatusInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredCollectedStatusInformer(client, namespace, resyncPeriod, indexers, nil) +} + +// NewFilteredCollectedStatusInformer constructs a new informer for CollectedStatus type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredCollectedStatusInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.CoreV1alpha1().CollectedStatuses(namespace).List(context.TODO(), options) + }, + WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.CoreV1alpha1().CollectedStatuses(namespace).Watch(context.TODO(), options) + }, + }, + &corev1alpha1.CollectedStatus{}, + resyncPeriod, + indexers, + ) +} + +func (f *collectedStatusInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredCollectedStatusInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *collectedStatusInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&corev1alpha1.CollectedStatus{}, f.defaultInformer) +} + +func (f *collectedStatusInformer) Lister() v1alpha1.CollectedStatusLister { + return v1alpha1.NewCollectedStatusLister(f.Informer().GetIndexer()) +} diff --git a/pkg/client/informers/externalversions/core/v1alpha1/federatedobject.go b/pkg/client/informers/externalversions/core/v1alpha1/federatedobject.go new file mode 100644 index 00000000..89930604 --- /dev/null +++ b/pkg/client/informers/externalversions/core/v1alpha1/federatedobject.go @@ -0,0 +1,74 @@ +// Code generated by informer-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + "context" + time "time" + + corev1alpha1 "github.com/kubewharf/kubeadmiral/pkg/apis/core/v1alpha1" + versioned "github.com/kubewharf/kubeadmiral/pkg/client/clientset/versioned" + internalinterfaces "github.com/kubewharf/kubeadmiral/pkg/client/informers/externalversions/internalinterfaces" + v1alpha1 "github.com/kubewharf/kubeadmiral/pkg/client/listers/core/v1alpha1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + cache "k8s.io/client-go/tools/cache" +) + +// FederatedObjectInformer provides access to a shared informer and lister for +// FederatedObjects. +type FederatedObjectInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1alpha1.FederatedObjectLister +} + +type federatedObjectInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc + namespace string +} + +// NewFederatedObjectInformer constructs a new informer for FederatedObject type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFederatedObjectInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredFederatedObjectInformer(client, namespace, resyncPeriod, indexers, nil) +} + +// NewFilteredFederatedObjectInformer constructs a new informer for FederatedObject type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredFederatedObjectInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.CoreV1alpha1().FederatedObjects(namespace).List(context.TODO(), options) + }, + WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.CoreV1alpha1().FederatedObjects(namespace).Watch(context.TODO(), options) + }, + }, + &corev1alpha1.FederatedObject{}, + resyncPeriod, + indexers, + ) +} + +func (f *federatedObjectInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredFederatedObjectInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *federatedObjectInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&corev1alpha1.FederatedObject{}, f.defaultInformer) +} + +func (f *federatedObjectInformer) Lister() v1alpha1.FederatedObjectLister { + return v1alpha1.NewFederatedObjectLister(f.Informer().GetIndexer()) +} diff --git a/pkg/client/informers/externalversions/core/v1alpha1/interface.go b/pkg/client/informers/externalversions/core/v1alpha1/interface.go index 29d7e46c..dda83f1d 100644 --- a/pkg/client/informers/externalversions/core/v1alpha1/interface.go +++ b/pkg/client/informers/externalversions/core/v1alpha1/interface.go @@ -8,14 +8,22 @@ import ( // Interface provides access to all the informers in this group version. type Interface interface { + // ClusterCollectedStatuses returns a ClusterCollectedStatusInformer. + ClusterCollectedStatuses() ClusterCollectedStatusInformer + // ClusterFederatedObjects returns a ClusterFederatedObjectInformer. + ClusterFederatedObjects() ClusterFederatedObjectInformer // ClusterOverridePolicies returns a ClusterOverridePolicyInformer. ClusterOverridePolicies() ClusterOverridePolicyInformer // ClusterPropagatedVersions returns a ClusterPropagatedVersionInformer. ClusterPropagatedVersions() ClusterPropagatedVersionInformer // ClusterPropagationPolicies returns a ClusterPropagationPolicyInformer. ClusterPropagationPolicies() ClusterPropagationPolicyInformer + // CollectedStatuses returns a CollectedStatusInformer. + CollectedStatuses() CollectedStatusInformer // FederatedClusters returns a FederatedClusterInformer. FederatedClusters() FederatedClusterInformer + // FederatedObjects returns a FederatedObjectInformer. + FederatedObjects() FederatedObjectInformer // FederatedTypeConfigs returns a FederatedTypeConfigInformer. FederatedTypeConfigs() FederatedTypeConfigInformer // OverridePolicies returns a OverridePolicyInformer. @@ -41,6 +49,16 @@ func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakList return &version{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} } +// ClusterCollectedStatuses returns a ClusterCollectedStatusInformer. +func (v *version) ClusterCollectedStatuses() ClusterCollectedStatusInformer { + return &clusterCollectedStatusInformer{factory: v.factory, tweakListOptions: v.tweakListOptions} +} + +// ClusterFederatedObjects returns a ClusterFederatedObjectInformer. +func (v *version) ClusterFederatedObjects() ClusterFederatedObjectInformer { + return &clusterFederatedObjectInformer{factory: v.factory, tweakListOptions: v.tweakListOptions} +} + // ClusterOverridePolicies returns a ClusterOverridePolicyInformer. func (v *version) ClusterOverridePolicies() ClusterOverridePolicyInformer { return &clusterOverridePolicyInformer{factory: v.factory, tweakListOptions: v.tweakListOptions} @@ -56,11 +74,21 @@ func (v *version) ClusterPropagationPolicies() ClusterPropagationPolicyInformer return &clusterPropagationPolicyInformer{factory: v.factory, tweakListOptions: v.tweakListOptions} } +// CollectedStatuses returns a CollectedStatusInformer. +func (v *version) CollectedStatuses() CollectedStatusInformer { + return &collectedStatusInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} +} + // FederatedClusters returns a FederatedClusterInformer. func (v *version) FederatedClusters() FederatedClusterInformer { return &federatedClusterInformer{factory: v.factory, tweakListOptions: v.tweakListOptions} } +// FederatedObjects returns a FederatedObjectInformer. +func (v *version) FederatedObjects() FederatedObjectInformer { + return &federatedObjectInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} +} + // FederatedTypeConfigs returns a FederatedTypeConfigInformer. func (v *version) FederatedTypeConfigs() FederatedTypeConfigInformer { return &federatedTypeConfigInformer{factory: v.factory, tweakListOptions: v.tweakListOptions} diff --git a/pkg/client/informers/externalversions/generic.go b/pkg/client/informers/externalversions/generic.go index adde4a0e..36478bbc 100644 --- a/pkg/client/informers/externalversions/generic.go +++ b/pkg/client/informers/externalversions/generic.go @@ -37,14 +37,22 @@ func (f *genericInformer) Lister() cache.GenericLister { func (f *sharedInformerFactory) ForResource(resource schema.GroupVersionResource) (GenericInformer, error) { switch resource { // Group=core.kubeadmiral.io, Version=v1alpha1 + case v1alpha1.SchemeGroupVersion.WithResource("clustercollectedstatuses"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Core().V1alpha1().ClusterCollectedStatuses().Informer()}, nil + case v1alpha1.SchemeGroupVersion.WithResource("clusterfederatedobjects"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Core().V1alpha1().ClusterFederatedObjects().Informer()}, nil case v1alpha1.SchemeGroupVersion.WithResource("clusteroverridepolicies"): return &genericInformer{resource: resource.GroupResource(), informer: f.Core().V1alpha1().ClusterOverridePolicies().Informer()}, nil case v1alpha1.SchemeGroupVersion.WithResource("clusterpropagatedversions"): return &genericInformer{resource: resource.GroupResource(), informer: f.Core().V1alpha1().ClusterPropagatedVersions().Informer()}, nil case v1alpha1.SchemeGroupVersion.WithResource("clusterpropagationpolicies"): return &genericInformer{resource: resource.GroupResource(), informer: f.Core().V1alpha1().ClusterPropagationPolicies().Informer()}, nil + case v1alpha1.SchemeGroupVersion.WithResource("collectedstatuses"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Core().V1alpha1().CollectedStatuses().Informer()}, nil case v1alpha1.SchemeGroupVersion.WithResource("federatedclusters"): return &genericInformer{resource: resource.GroupResource(), informer: f.Core().V1alpha1().FederatedClusters().Informer()}, nil + case v1alpha1.SchemeGroupVersion.WithResource("federatedobjects"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Core().V1alpha1().FederatedObjects().Informer()}, nil case v1alpha1.SchemeGroupVersion.WithResource("federatedtypeconfigs"): return &genericInformer{resource: resource.GroupResource(), informer: f.Core().V1alpha1().FederatedTypeConfigs().Informer()}, nil case v1alpha1.SchemeGroupVersion.WithResource("overridepolicies"): diff --git a/pkg/client/listers/core/v1alpha1/clustercollectedstatus.go b/pkg/client/listers/core/v1alpha1/clustercollectedstatus.go new file mode 100644 index 00000000..049128eb --- /dev/null +++ b/pkg/client/listers/core/v1alpha1/clustercollectedstatus.go @@ -0,0 +1,52 @@ +// Code generated by lister-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + v1alpha1 "github.com/kubewharf/kubeadmiral/pkg/apis/core/v1alpha1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" +) + +// ClusterCollectedStatusLister helps list ClusterCollectedStatuses. +// All objects returned here must be treated as read-only. +type ClusterCollectedStatusLister interface { + // List lists all ClusterCollectedStatuses in the indexer. + // Objects returned here must be treated as read-only. + List(selector labels.Selector) (ret []*v1alpha1.ClusterCollectedStatus, err error) + // Get retrieves the ClusterCollectedStatus from the index for a given name. + // Objects returned here must be treated as read-only. + Get(name string) (*v1alpha1.ClusterCollectedStatus, error) + ClusterCollectedStatusListerExpansion +} + +// clusterCollectedStatusLister implements the ClusterCollectedStatusLister interface. +type clusterCollectedStatusLister struct { + indexer cache.Indexer +} + +// NewClusterCollectedStatusLister returns a new ClusterCollectedStatusLister. +func NewClusterCollectedStatusLister(indexer cache.Indexer) ClusterCollectedStatusLister { + return &clusterCollectedStatusLister{indexer: indexer} +} + +// List lists all ClusterCollectedStatuses in the indexer. +func (s *clusterCollectedStatusLister) List(selector labels.Selector) (ret []*v1alpha1.ClusterCollectedStatus, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1alpha1.ClusterCollectedStatus)) + }) + return ret, err +} + +// Get retrieves the ClusterCollectedStatus from the index for a given name. +func (s *clusterCollectedStatusLister) Get(name string) (*v1alpha1.ClusterCollectedStatus, error) { + obj, exists, err := s.indexer.GetByKey(name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v1alpha1.Resource("clustercollectedstatus"), name) + } + return obj.(*v1alpha1.ClusterCollectedStatus), nil +} diff --git a/pkg/client/listers/core/v1alpha1/clusterfederatedobject.go b/pkg/client/listers/core/v1alpha1/clusterfederatedobject.go new file mode 100644 index 00000000..aef44204 --- /dev/null +++ b/pkg/client/listers/core/v1alpha1/clusterfederatedobject.go @@ -0,0 +1,52 @@ +// Code generated by lister-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + v1alpha1 "github.com/kubewharf/kubeadmiral/pkg/apis/core/v1alpha1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" +) + +// ClusterFederatedObjectLister helps list ClusterFederatedObjects. +// All objects returned here must be treated as read-only. +type ClusterFederatedObjectLister interface { + // List lists all ClusterFederatedObjects in the indexer. + // Objects returned here must be treated as read-only. + List(selector labels.Selector) (ret []*v1alpha1.ClusterFederatedObject, err error) + // Get retrieves the ClusterFederatedObject from the index for a given name. + // Objects returned here must be treated as read-only. + Get(name string) (*v1alpha1.ClusterFederatedObject, error) + ClusterFederatedObjectListerExpansion +} + +// clusterFederatedObjectLister implements the ClusterFederatedObjectLister interface. +type clusterFederatedObjectLister struct { + indexer cache.Indexer +} + +// NewClusterFederatedObjectLister returns a new ClusterFederatedObjectLister. +func NewClusterFederatedObjectLister(indexer cache.Indexer) ClusterFederatedObjectLister { + return &clusterFederatedObjectLister{indexer: indexer} +} + +// List lists all ClusterFederatedObjects in the indexer. +func (s *clusterFederatedObjectLister) List(selector labels.Selector) (ret []*v1alpha1.ClusterFederatedObject, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1alpha1.ClusterFederatedObject)) + }) + return ret, err +} + +// Get retrieves the ClusterFederatedObject from the index for a given name. +func (s *clusterFederatedObjectLister) Get(name string) (*v1alpha1.ClusterFederatedObject, error) { + obj, exists, err := s.indexer.GetByKey(name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v1alpha1.Resource("clusterfederatedobject"), name) + } + return obj.(*v1alpha1.ClusterFederatedObject), nil +} diff --git a/pkg/client/listers/core/v1alpha1/collectedstatus.go b/pkg/client/listers/core/v1alpha1/collectedstatus.go new file mode 100644 index 00000000..dca31a03 --- /dev/null +++ b/pkg/client/listers/core/v1alpha1/collectedstatus.go @@ -0,0 +1,83 @@ +// Code generated by lister-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + v1alpha1 "github.com/kubewharf/kubeadmiral/pkg/apis/core/v1alpha1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" +) + +// CollectedStatusLister helps list CollectedStatuses. +// All objects returned here must be treated as read-only. +type CollectedStatusLister interface { + // List lists all CollectedStatuses in the indexer. + // Objects returned here must be treated as read-only. + List(selector labels.Selector) (ret []*v1alpha1.CollectedStatus, err error) + // CollectedStatuses returns an object that can list and get CollectedStatuses. + CollectedStatuses(namespace string) CollectedStatusNamespaceLister + CollectedStatusListerExpansion +} + +// collectedStatusLister implements the CollectedStatusLister interface. +type collectedStatusLister struct { + indexer cache.Indexer +} + +// NewCollectedStatusLister returns a new CollectedStatusLister. +func NewCollectedStatusLister(indexer cache.Indexer) CollectedStatusLister { + return &collectedStatusLister{indexer: indexer} +} + +// List lists all CollectedStatuses in the indexer. +func (s *collectedStatusLister) List(selector labels.Selector) (ret []*v1alpha1.CollectedStatus, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1alpha1.CollectedStatus)) + }) + return ret, err +} + +// CollectedStatuses returns an object that can list and get CollectedStatuses. +func (s *collectedStatusLister) CollectedStatuses(namespace string) CollectedStatusNamespaceLister { + return collectedStatusNamespaceLister{indexer: s.indexer, namespace: namespace} +} + +// CollectedStatusNamespaceLister helps list and get CollectedStatuses. +// All objects returned here must be treated as read-only. +type CollectedStatusNamespaceLister interface { + // List lists all CollectedStatuses in the indexer for a given namespace. + // Objects returned here must be treated as read-only. + List(selector labels.Selector) (ret []*v1alpha1.CollectedStatus, err error) + // Get retrieves the CollectedStatus from the indexer for a given namespace and name. + // Objects returned here must be treated as read-only. + Get(name string) (*v1alpha1.CollectedStatus, error) + CollectedStatusNamespaceListerExpansion +} + +// collectedStatusNamespaceLister implements the CollectedStatusNamespaceLister +// interface. +type collectedStatusNamespaceLister struct { + indexer cache.Indexer + namespace string +} + +// List lists all CollectedStatuses in the indexer for a given namespace. +func (s collectedStatusNamespaceLister) List(selector labels.Selector) (ret []*v1alpha1.CollectedStatus, err error) { + err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { + ret = append(ret, m.(*v1alpha1.CollectedStatus)) + }) + return ret, err +} + +// Get retrieves the CollectedStatus from the indexer for a given namespace and name. +func (s collectedStatusNamespaceLister) Get(name string) (*v1alpha1.CollectedStatus, error) { + obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v1alpha1.Resource("collectedstatus"), name) + } + return obj.(*v1alpha1.CollectedStatus), nil +} diff --git a/pkg/client/listers/core/v1alpha1/expansion_generated.go b/pkg/client/listers/core/v1alpha1/expansion_generated.go index 3227125f..167f089d 100644 --- a/pkg/client/listers/core/v1alpha1/expansion_generated.go +++ b/pkg/client/listers/core/v1alpha1/expansion_generated.go @@ -2,6 +2,14 @@ package v1alpha1 +// ClusterCollectedStatusListerExpansion allows custom methods to be added to +// ClusterCollectedStatusLister. +type ClusterCollectedStatusListerExpansion interface{} + +// ClusterFederatedObjectListerExpansion allows custom methods to be added to +// ClusterFederatedObjectLister. +type ClusterFederatedObjectListerExpansion interface{} + // ClusterOverridePolicyListerExpansion allows custom methods to be added to // ClusterOverridePolicyLister. type ClusterOverridePolicyListerExpansion interface{} @@ -14,10 +22,26 @@ type ClusterPropagatedVersionListerExpansion interface{} // ClusterPropagationPolicyLister. type ClusterPropagationPolicyListerExpansion interface{} +// CollectedStatusListerExpansion allows custom methods to be added to +// CollectedStatusLister. +type CollectedStatusListerExpansion interface{} + +// CollectedStatusNamespaceListerExpansion allows custom methods to be added to +// CollectedStatusNamespaceLister. +type CollectedStatusNamespaceListerExpansion interface{} + // FederatedClusterListerExpansion allows custom methods to be added to // FederatedClusterLister. type FederatedClusterListerExpansion interface{} +// FederatedObjectListerExpansion allows custom methods to be added to +// FederatedObjectLister. +type FederatedObjectListerExpansion interface{} + +// FederatedObjectNamespaceListerExpansion allows custom methods to be added to +// FederatedObjectNamespaceLister. +type FederatedObjectNamespaceListerExpansion interface{} + // FederatedTypeConfigListerExpansion allows custom methods to be added to // FederatedTypeConfigLister. type FederatedTypeConfigListerExpansion interface{} diff --git a/pkg/client/listers/core/v1alpha1/federatedobject.go b/pkg/client/listers/core/v1alpha1/federatedobject.go new file mode 100644 index 00000000..3af305af --- /dev/null +++ b/pkg/client/listers/core/v1alpha1/federatedobject.go @@ -0,0 +1,83 @@ +// Code generated by lister-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + v1alpha1 "github.com/kubewharf/kubeadmiral/pkg/apis/core/v1alpha1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" +) + +// FederatedObjectLister helps list FederatedObjects. +// All objects returned here must be treated as read-only. +type FederatedObjectLister interface { + // List lists all FederatedObjects in the indexer. + // Objects returned here must be treated as read-only. + List(selector labels.Selector) (ret []*v1alpha1.FederatedObject, err error) + // FederatedObjects returns an object that can list and get FederatedObjects. + FederatedObjects(namespace string) FederatedObjectNamespaceLister + FederatedObjectListerExpansion +} + +// federatedObjectLister implements the FederatedObjectLister interface. +type federatedObjectLister struct { + indexer cache.Indexer +} + +// NewFederatedObjectLister returns a new FederatedObjectLister. +func NewFederatedObjectLister(indexer cache.Indexer) FederatedObjectLister { + return &federatedObjectLister{indexer: indexer} +} + +// List lists all FederatedObjects in the indexer. +func (s *federatedObjectLister) List(selector labels.Selector) (ret []*v1alpha1.FederatedObject, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1alpha1.FederatedObject)) + }) + return ret, err +} + +// FederatedObjects returns an object that can list and get FederatedObjects. +func (s *federatedObjectLister) FederatedObjects(namespace string) FederatedObjectNamespaceLister { + return federatedObjectNamespaceLister{indexer: s.indexer, namespace: namespace} +} + +// FederatedObjectNamespaceLister helps list and get FederatedObjects. +// All objects returned here must be treated as read-only. +type FederatedObjectNamespaceLister interface { + // List lists all FederatedObjects in the indexer for a given namespace. + // Objects returned here must be treated as read-only. + List(selector labels.Selector) (ret []*v1alpha1.FederatedObject, err error) + // Get retrieves the FederatedObject from the indexer for a given namespace and name. + // Objects returned here must be treated as read-only. + Get(name string) (*v1alpha1.FederatedObject, error) + FederatedObjectNamespaceListerExpansion +} + +// federatedObjectNamespaceLister implements the FederatedObjectNamespaceLister +// interface. +type federatedObjectNamespaceLister struct { + indexer cache.Indexer + namespace string +} + +// List lists all FederatedObjects in the indexer for a given namespace. +func (s federatedObjectNamespaceLister) List(selector labels.Selector) (ret []*v1alpha1.FederatedObject, err error) { + err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { + ret = append(ret, m.(*v1alpha1.FederatedObject)) + }) + return ret, err +} + +// Get retrieves the FederatedObject from the indexer for a given namespace and name. +func (s federatedObjectNamespaceLister) Get(name string) (*v1alpha1.FederatedObject, error) { + obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v1alpha1.Resource("federatedobject"), name) + } + return obj.(*v1alpha1.FederatedObject), nil +} diff --git a/pkg/controllers/scheduler/extensions/webhook/v1alpha1/adapter.go b/pkg/controllers/scheduler/extensions/webhook/v1alpha1/adapter.go index 23e3fed7..c7c7a811 100644 --- a/pkg/controllers/scheduler/extensions/webhook/v1alpha1/adapter.go +++ b/pkg/controllers/scheduler/extensions/webhook/v1alpha1/adapter.go @@ -38,7 +38,7 @@ func ConvertSchedulingUnit(su *framework.SchedulingUnit) *schedwebhookv1a1.Sched } } - placements := []fedcorev1a1.Placement{} + placements := []fedcorev1a1.ClusterReference{} for cluster := range su.ClusterNames { var weight *int64 if w, ok := su.Weights[cluster]; ok { @@ -50,7 +50,7 @@ func ConvertSchedulingUnit(su *framework.SchedulingUnit) *schedwebhookv1a1.Sched maxReplicas = &max } - placement := fedcorev1a1.Placement{ + placement := fedcorev1a1.ClusterReference{ Cluster: cluster, Preferences: fedcorev1a1.Preferences{ MinReplicas: su.MinReplicas[cluster], diff --git a/pkg/controllers/scheduler/extensions/webhook/v1alpha1/adapter_test.go b/pkg/controllers/scheduler/extensions/webhook/v1alpha1/adapter_test.go index 6f3010c0..394b6e84 100644 --- a/pkg/controllers/scheduler/extensions/webhook/v1alpha1/adapter_test.go +++ b/pkg/controllers/scheduler/extensions/webhook/v1alpha1/adapter_test.go @@ -155,7 +155,7 @@ func TestConvertSchedulingUnit(t *testing.T) { }, }, MaxClusters: pointer.Int64(5), - Placements: []fedcorev1a1.Placement{ + Placements: []fedcorev1a1.DesiredPlacement{ { Cluster: "cluster1", }, @@ -348,7 +348,7 @@ func TestConvertSchedulingUnit(t *testing.T) { }, }, MaxClusters: pointer.Int64(5), - Placements: []fedcorev1a1.Placement{ + Placements: []fedcorev1a1.DesiredPlacement{ { Cluster: "cluster1", }, diff --git a/pkg/controllers/scheduler/scheduler_test.go b/pkg/controllers/scheduler/scheduler_test.go index 5f2975cb..e6e2c06f 100644 --- a/pkg/controllers/scheduler/scheduler_test.go +++ b/pkg/controllers/scheduler/scheduler_test.go @@ -351,7 +351,7 @@ func TestGetSchedulingUnitWithAnnotationOverrides(t *testing.T) { "label": "value1", }, MaxClusters: pointer.Int64(5), - Placements: []fedcorev1a1.Placement{ + Placements: []fedcorev1a1.ClusterReference{ { Cluster: "cluster1", }, diff --git a/pkg/controllers/scheduler/schedulingunit.go b/pkg/controllers/scheduler/schedulingunit.go index 47414418..06eb401f 100644 --- a/pkg/controllers/scheduler/schedulingunit.go +++ b/pkg/controllers/scheduler/schedulingunit.go @@ -473,7 +473,7 @@ func getWeightsFromObject(object *unstructured.Unstructured) (map[string]int64, return nil, false } - var placements []fedcorev1a1.Placement + var placements []fedcorev1a1.ClusterReference err := json.Unmarshal([]byte(annotation), &placements) if err != nil { klog.Errorf( @@ -531,7 +531,7 @@ func getMinReplicasFromObject(object *unstructured.Unstructured) (map[string]int return nil, false } - var placements []fedcorev1a1.Placement + var placements []fedcorev1a1.ClusterReference err := json.Unmarshal([]byte(annotation), &placements) if err != nil { klog.Errorf( @@ -589,7 +589,7 @@ func getMaxReplicasFromObject(object *unstructured.Unstructured) (map[string]int return nil, false } - var placements []fedcorev1a1.Placement + var placements []fedcorev1a1.ClusterReference err := json.Unmarshal([]byte(annotation), &placements) if err != nil { klog.Errorf( @@ -647,7 +647,7 @@ func getClusterNamesFromObject(object *unstructured.Unstructured) (map[string]st return nil, false } - var placements []fedcorev1a1.Placement + var placements []fedcorev1a1.ClusterReference err := json.Unmarshal([]byte(annotation), &placements) if err != nil { klog.Errorf( diff --git a/pkg/controllers/util/placement.go b/pkg/controllers/util/placement.go deleted file mode 100644 index 4dc8fafb..00000000 --- a/pkg/controllers/util/placement.go +++ /dev/null @@ -1,59 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. - -This file may have been modified by The KubeAdmiral Authors -("KubeAdmiral Modifications"). All KubeAdmiral Modifications -are Copyright 2023 The KubeAdmiral Authors. -*/ - -package util - -import ( - "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" - - fedtypesv1a1 "github.com/kubewharf/kubeadmiral/pkg/apis/types/v1alpha1" - "github.com/kubewharf/kubeadmiral/pkg/controllers/common" -) - -func UnmarshalGenericPlacements(uns *unstructured.Unstructured) (*fedtypesv1a1.GenericObjectWithPlacements, error) { - placements := &fedtypesv1a1.GenericObjectWithPlacements{} - err := UnstructuredToInterface(uns, placements) - if err != nil { - return nil, err - } - return placements, nil -} - -func SetGenericPlacements(uns *unstructured.Unstructured, placements []fedtypesv1a1.PlacementWithController) error { - unsPlacements, err := InterfaceToUnstructured(placements) - if err != nil { - return err - } - - return unstructured.SetNestedField(uns.Object, unsPlacements, common.PlacementsPath...) -} - -func SetPlacementClusterNames(uns *unstructured.Unstructured, controller string, clusters map[string]struct{}) (hasChange bool, err error) { - obj, err := UnmarshalGenericPlacements(uns) - if err != nil { - return false, err - } - - if hasChange := obj.Spec.SetPlacementNames(controller, clusters); !hasChange { - return false, nil - } - - return true, SetGenericPlacements(uns, obj.Spec.Placements) -} diff --git a/test/e2e/framework/policies/propagationpolicy.go b/test/e2e/framework/policies/propagationpolicy.go index defa6f7a..22c3986c 100644 --- a/test/e2e/framework/policies/propagationpolicy.go +++ b/test/e2e/framework/policies/propagationpolicy.go @@ -39,12 +39,12 @@ func PropagationPolicyForClustersWithPlacements( Spec: fedcorev1a1.PropagationPolicySpec{ SchedulingMode: fedcorev1a1.SchedulingModeDuplicate, StickyCluster: false, - Placements: []fedcorev1a1.Placement{}, + Placements: []fedcorev1a1.ClusterReference{}, }, } for _, c := range clusters { - policy.Spec.Placements = append(policy.Spec.Placements, fedcorev1a1.Placement{Cluster: c.Name}) + policy.Spec.Placements = append(policy.Spec.Placements, fedcorev1a1.ClusterReference{Cluster: c.Name}) } return policy From 2aca69c0d31976cdafd01d271988dc295a81f670 Mon Sep 17 00:00:00 2001 From: "lihanbo.0316" Date: Thu, 6 Jul 2023 16:22:09 +0800 Subject: [PATCH 002/173] generate name for federated object --- pkg/controllers/common/constants.go | 8 +++ pkg/controllers/util/naming.go | 58 ++++++++++++++++- pkg/controllers/util/naming_test.go | 98 +++++++++++++++++++++++++++++ 3 files changed, 163 insertions(+), 1 deletion(-) create mode 100644 pkg/controllers/util/naming_test.go diff --git a/pkg/controllers/common/constants.go b/pkg/controllers/common/constants.go index 7fe10566..b278b853 100644 --- a/pkg/controllers/common/constants.go +++ b/pkg/controllers/common/constants.go @@ -165,3 +165,11 @@ var DeploymentGVR = schema.GroupVersionResource{ Version: "v1", Resource: "deployments", } + +// MaxFederatedObjectNameLength defines the max length of a federated object name. +// A custom resource name must be a DNS subdomain as defined in RFC1123 with a maximum length of 253. +// For more information about the custom resource validator, please refer to +// https://github.com/kubernetes/kubernetes/blob/a17149e/staging/src/k8s.io/apiextensions-apiserver/pkg/registry/customresource/validator.go#L61 +// +//nolint:lll +const MaxFederatedObjectNameLength = 253 diff --git a/pkg/controllers/util/naming.go b/pkg/controllers/util/naming.go index fe3564af..809ea9f1 100644 --- a/pkg/controllers/util/naming.go +++ b/pkg/controllers/util/naming.go @@ -20,7 +20,12 @@ are Copyright 2023 The KubeAdmiral Authors. package util -import "github.com/kubewharf/kubeadmiral/pkg/controllers/common" +import ( + "fmt" + "hash/fnv" + + "github.com/kubewharf/kubeadmiral/pkg/controllers/common" +) // The functions in this file are exposed as variables to allow them // to be overridden for testing purposes. Simulated scale testing @@ -51,3 +56,54 @@ func qualifiedNameForCluster(clusterName string, qualifiedName common.QualifiedN // QualifiedNameForCluster returns the qualified name to use for the // given cluster. var QualifiedNameForCluster = qualifiedNameForCluster + +// GenerateFederatedObjectName generates a federated object name from source object name and ftc name. +func GenerateFederatedObjectName(objectName, ftcName string) string { + transformedName, transformed := transformObjectName(objectName) + federatedName := fmt.Sprintf("%s-%s", transformedName, ftcName) + if transformed { + federatedName = fmt.Sprintf("%s-%d", federatedName, fnvHashFunc(objectName)) + } + + if len(federatedName) > common.MaxFederatedObjectNameLength { + nameHash := fmt.Sprint(fnvHashFunc(federatedName)) + federatedName = fmt.Sprintf("%s-%s", federatedName[:common.MaxFederatedObjectNameLength-len(nameHash)-1], nameHash) + } + + return federatedName +} + +// transformObjectName will transform the object name as follows: +// - upper case letters are transformed into lower case letters +// - characters that cannot appear in a DNS subdomain as defined in RFC 1123 are replaced with dots +// For more information about the DNS subdomain name, please refer to +// https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#dns-subdomain-names. +func transformObjectName(objectName string) (string, bool) { + transformed := false + transformedName := []byte(objectName) + + const caseDiff byte = 'a' - 'A' + + for i, ch := range transformedName { + if ch >= 'a' && ch <= 'z' || ch >= '0' && ch <= '9' || ch == '.' || ch == '-' { + continue + } + + transformed = true + if ch >= 'A' && ch <= 'Z' { + // transform uppercase letters into lowercase + transformedName[i] = caseDiff + ch + } else { + // transform any other illegal characters to dots + transformedName[i] = '.' + } + } + + return string(transformedName), transformed +} + +func fnvHashFunc(key string) uint32 { + hash := fnv.New32() + hash.Write([]byte(key)) + return hash.Sum32() +} diff --git a/pkg/controllers/util/naming_test.go b/pkg/controllers/util/naming_test.go new file mode 100644 index 00000000..abb8f4f8 --- /dev/null +++ b/pkg/controllers/util/naming_test.go @@ -0,0 +1,98 @@ +package util + +import ( + "strings" + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestGenerateFederatedObjectName(t *testing.T) { + type args struct { + objectName string + ftcName string + } + tests := []struct { + name string + args args + want string + }{ + { + name: "generate federated object name", + args: args{ + objectName: "foo", + ftcName: "roles.rbac.authorization.k8s.io", + }, + want: "foo-roles.rbac.authorization.k8s.io", + }, + { + name: "generate federated object name with :", + args: args{ + objectName: "system:foo", + ftcName: "roles.rbac.authorization.k8s.io", + }, + want: "system.foo-roles.rbac.authorization.k8s.io-2728495308", + }, + { + name: "generate federated object name with $", + args: args{ + objectName: "system$foo", + ftcName: "roles.rbac.authorization.k8s.io", + }, + want: "system.foo-roles.rbac.authorization.k8s.io-4258037882", + }, + { + name: "generate federated object name with %", + args: args{ + objectName: "system%foo", + ftcName: "roles.rbac.authorization.k8s.io", + }, + want: "system.foo-roles.rbac.authorization.k8s.io-1244789457", + }, + { + name: "generate federated object name with #", + args: args{ + objectName: "system#foo", + ftcName: "roles.rbac.authorization.k8s.io", + }, + want: "system.foo-roles.rbac.authorization.k8s.io-1128546011", + }, + { + name: "generate federated object name with upper case letter", + args: args{ + objectName: "system#Foo", + ftcName: "roles.rbac.authorization.k8s.io", + }, + want: "system.foo-roles.rbac.authorization.k8s.io-1133665787", + }, + { + name: "generate federated object name with number", + args: args{ + objectName: "system.foo123", + ftcName: "roles.rbac.authorization.k8s.io", + }, + want: "system.foo123-roles.rbac.authorization.k8s.io", + }, + { + name: "generate federated object name for source object with long name", + args: args{ + objectName: strings.Repeat("foo", 80), + ftcName: "roles.rbac.authorization.k8s.io", + }, + want: strings.Repeat("foo", 80) + "-r-3980386512", + }, + { + name: "generate federated object name with transformation and truncation", + args: args{ + objectName: strings.Repeat("system#foo", 25), + ftcName: "roles.rbac.authorization.k8s.io", + }, + want: strings.Repeat("system.foo", 24) + "sys-552681660", + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + assert.Equalf(t, tt.want, GenerateFederatedObjectName(tt.args.objectName, tt.args.ftcName), "GenerateFederatedObjectName(%v, %v)", tt.args.ftcName, tt.args.objectName) + }) + } +} From d61d94e04d014c21ef2c76ec5b11c51e1622af15 Mon Sep 17 00:00:00 2001 From: "lihanbo.0316" Date: Mon, 10 Jul 2023 15:09:43 +0800 Subject: [PATCH 003/173] refactor/remove ftc manager and ftc controller --- .../app/controllermanager.go | 27 +- cmd/controller-manager/app/core.go | 21 - cmd/controller-manager/app/ftcmanager.go | 207 ---- .../federatedtypeconfig/crd_schema.go | 102 -- .../federatedtypeconfig_controller.go | 902 ------------------ 5 files changed, 1 insertion(+), 1258 deletions(-) delete mode 100644 pkg/controllers/federatedtypeconfig/crd_schema.go delete mode 100644 pkg/controllers/federatedtypeconfig/federatedtypeconfig_controller.go diff --git a/cmd/controller-manager/app/controllermanager.go b/cmd/controller-manager/app/controllermanager.go index a01a505d..5b4c5cb1 100644 --- a/cmd/controller-manager/app/controllermanager.go +++ b/cmd/controller-manager/app/controllermanager.go @@ -28,7 +28,6 @@ import ( "sigs.k8s.io/controller-runtime/pkg/healthz" "github.com/kubewharf/kubeadmiral/cmd/controller-manager/app/options" - fedcorev1a1 "github.com/kubewharf/kubeadmiral/pkg/apis/core/v1alpha1" "github.com/kubewharf/kubeadmiral/pkg/controllermanager" "github.com/kubewharf/kubeadmiral/pkg/controllermanager/healthcheck" fedleaderelection "github.com/kubewharf/kubeadmiral/pkg/controllermanager/leaderelection" @@ -37,14 +36,12 @@ import ( const ( FederatedClusterControllerName = "cluster" - TypeConfigControllerName = "typeconfig" MonitorControllerName = "monitor" FollowerControllerName = "follower" ) var knownControllers = map[string]controllermanager.StartControllerFunc{ FederatedClusterControllerName: startFederatedClusterController, - TypeConfigControllerName: startTypeConfigController, MonitorControllerName: startMonitorController, FollowerControllerName: startFollowerController, } @@ -77,7 +74,7 @@ func Run(ctx context.Context, opts *options.Options) { defer klog.Infoln("Ready to stop controllers") klog.Infoln("Ready to start controllers") - err := startControllers(ctx, controllerCtx, knownControllers, knownFTCSubControllers, opts.Controllers, healthCheckHandler) + err := startControllers(ctx, controllerCtx, knownControllers, opts.Controllers, healthCheckHandler) if err != nil { klog.Fatalf("Error starting controllers %s: %v", opts.Controllers, err) } @@ -127,7 +124,6 @@ func startControllers( ctx context.Context, controllerCtx *controllercontext.Context, startControllerFuncs map[string]controllermanager.StartControllerFunc, - ftcSubControllerInitFuncs map[string]controllermanager.FTCSubControllerInitFuncs, enabledControllers []string, healthCheckHandler *healthcheck.MutableHealthCheckHandler, ) error { @@ -153,26 +149,5 @@ func startControllers( }) } - manager := NewFederatedTypeConfigManager( - controllerCtx.FedInformerFactory.Core().V1alpha1().FederatedTypeConfigs(), - controllerCtx, - healthCheckHandler, - controllerCtx.Metrics, - ) - for controllerName, initFuncs := range ftcSubControllerInitFuncs { - controllerName := controllerName - initFuncs := initFuncs - manager.RegisterSubController(controllerName, initFuncs.StartFunc, func(typeConfig *fedcorev1a1.FederatedTypeConfig) bool { - if !isControllerEnabled(controllerName, controllersDisabledByDefault, enabledControllers) { - return false - } - if initFuncs.IsEnabledFunc != nil { - return initFuncs.IsEnabledFunc(typeConfig) - } - return true - }) - } - go manager.Run(ctx) - return nil } diff --git a/cmd/controller-manager/app/core.go b/cmd/controller-manager/app/core.go index 739d5340..6157efbd 100644 --- a/cmd/controller-manager/app/core.go +++ b/cmd/controller-manager/app/core.go @@ -58,27 +58,6 @@ func startFederatedClusterController(ctx context.Context, controllerCtx *control return clusterController, nil } -func startTypeConfigController(ctx context.Context, controllerCtx *controllercontext.Context) (controllermanager.Controller, error) { - controllerConfig := controllerConfigFromControllerContext(controllerCtx) - //nolint:contextcheck - typeConfigController, err := federatedtypeconfig.NewController( - controllerConfig, - controllerCtx.KubeClientset, - controllerCtx.DynamicClientset, - controllerCtx.FedClientset, - controllerCtx.KubeInformerFactory, - controllerCtx.DynamicInformerFactory, - controllerCtx.FedInformerFactory, - ) - if err != nil { - return nil, fmt.Errorf("error creating type config controller: %w", err) - } - - go typeConfigController.Run(ctx.Done()) - - return typeConfigController, nil -} - func startMonitorController(ctx context.Context, controllerCtx *controllercontext.Context) (controllermanager.Controller, error) { controllerConfig := controllerConfigFromControllerContext(controllerCtx) //nolint:contextcheck diff --git a/cmd/controller-manager/app/ftcmanager.go b/cmd/controller-manager/app/ftcmanager.go index e30b5387..95773149 100644 --- a/cmd/controller-manager/app/ftcmanager.go +++ b/cmd/controller-manager/app/ftcmanager.go @@ -17,26 +17,7 @@ limitations under the License. package app import ( - "context" - "fmt" - "net/http" - "sync" - "time" - - apierrors "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/util/sets" - "k8s.io/client-go/tools/cache" - "k8s.io/klog/v2" - - fedcorev1a1informers "github.com/kubewharf/kubeadmiral/pkg/client/informers/externalversions/core/v1alpha1" "github.com/kubewharf/kubeadmiral/pkg/controllermanager" - "github.com/kubewharf/kubeadmiral/pkg/controllermanager/healthcheck" - "github.com/kubewharf/kubeadmiral/pkg/controllers/common" - controllercontext "github.com/kubewharf/kubeadmiral/pkg/controllers/context" - "github.com/kubewharf/kubeadmiral/pkg/controllers/util" - "github.com/kubewharf/kubeadmiral/pkg/controllers/util/delayingdeliver" - "github.com/kubewharf/kubeadmiral/pkg/controllers/util/worker" - "github.com/kubewharf/kubeadmiral/pkg/stats" ) const ( @@ -59,191 +40,3 @@ var knownFTCSubControllers = map[string]controllermanager.FTCSubControllerInitFu IsEnabledFunc: isAutoMigrationControllerEnabled, }, } - -type FederatedTypeConfigManager struct { - informer fedcorev1a1informers.FederatedTypeConfigInformer - handle cache.ResourceEventHandlerRegistration - - lock sync.Mutex - registeredSubControllers map[string]controllermanager.StartFTCSubControllerFunc - isSubControllerEnabledFuncs map[string]controllermanager.IsFTCSubControllerEnabledFunc - - subControllerContexts map[string]context.Context - subControllerCancelFuncs map[string]context.CancelFunc - startedSubControllers map[string]sets.Set[string] - - healthCheckHandler *healthcheck.MutableHealthCheckHandler - worker worker.ReconcileWorker - controllerCtx *controllercontext.Context - - metrics stats.Metrics - logger klog.Logger -} - -func NewFederatedTypeConfigManager( - informer fedcorev1a1informers.FederatedTypeConfigInformer, - controllerCtx *controllercontext.Context, - healthCheckHandler *healthcheck.MutableHealthCheckHandler, - metrics stats.Metrics, -) *FederatedTypeConfigManager { - m := &FederatedTypeConfigManager{ - informer: informer, - lock: sync.Mutex{}, - registeredSubControllers: map[string]controllermanager.StartFTCSubControllerFunc{}, - isSubControllerEnabledFuncs: map[string]controllermanager.IsFTCSubControllerEnabledFunc{}, - subControllerContexts: map[string]context.Context{}, - subControllerCancelFuncs: map[string]context.CancelFunc{}, - startedSubControllers: map[string]sets.Set[string]{}, - controllerCtx: controllerCtx, - healthCheckHandler: healthCheckHandler, - metrics: metrics, - logger: klog.LoggerWithValues(klog.Background(), "controller", "federated-type-config-manager"), - } - - m.worker = worker.NewReconcileWorker( - m.reconcile, - worker.WorkerTiming{}, - 1, - metrics, - delayingdeliver.NewMetricTags("federated-type-config-manager", "FederatedTypeConfig"), - ) - - m.handle, _ = informer.Informer().AddEventHandler(util.NewTriggerOnAllChanges(m.worker.EnqueueObject)) - - return m -} - -func (m *FederatedTypeConfigManager) RegisterSubController( - name string, - startFunc controllermanager.StartFTCSubControllerFunc, - isEnabledFunc controllermanager.IsFTCSubControllerEnabledFunc, -) { - m.lock.Lock() - defer m.lock.Unlock() - m.registeredSubControllers[name] = startFunc - m.isSubControllerEnabledFuncs[name] = isEnabledFunc -} - -func (m *FederatedTypeConfigManager) Run(ctx context.Context) { - m.logger.Info("Starting FederatedTypeConfig manager") - defer m.logger.Info("Stopping FederatedTypeConfig manager") - - if !cache.WaitForNamedCacheSync("federated-type-config-manager", ctx.Done(), m.informer.Informer().HasSynced) { - return - } - - m.worker.Run(ctx.Done()) - <-ctx.Done() -} - -func (m *FederatedTypeConfigManager) reconcile(qualifiedName common.QualifiedName) (status worker.Result) { - _ = m.metrics.Rate("federated-type-config-manager.throughput", 1) - key := qualifiedName.String() - logger := m.logger.WithValues("federated-type-config", key) - startTime := time.Now() - - logger.V(3).Info("Start reconcile") - defer m.metrics.Duration("federated-type-config-manager.latency", startTime) - defer func() { - logger.WithValues("duration", time.Since(startTime), "status", status.String()).V(3).Info("Finished reconcile") - }() - - typeConfig, err := m.informer.Lister().Get(qualifiedName.Name) - if err != nil && apierrors.IsNotFound(err) { - logger.V(3).Info("Observed FederatedTypeConfig deletion") - m.processFTCDeletion(qualifiedName.Name) - return worker.StatusAllOK - } - if err != nil { - logger.Error(err, "Failed to get FederatedTypeConfig") - return worker.StatusError - } - - m.lock.Lock() - defer m.lock.Unlock() - - startedSubControllers, ok := m.startedSubControllers[qualifiedName.Name] - if !ok { - startedSubControllers = sets.New[string]() - m.startedSubControllers[qualifiedName.Name] = startedSubControllers - } - subControllerCtx, ok := m.subControllerContexts[qualifiedName.Name] - if !ok { - subControllerCtx, m.subControllerCancelFuncs[qualifiedName.Name] = context.WithCancel(context.TODO()) - m.subControllerContexts[qualifiedName.Name] = subControllerCtx - } - - needRetry := false - for controllerName, startFunc := range m.registeredSubControllers { - logger := logger.WithValues("subcontroller", controllerName) - - if startedSubControllers.Has(controllerName) { - logger.V(3).Info("Subcontroller already started") - continue - } - - isEnabledFunc := m.isSubControllerEnabledFuncs[controllerName] - if isEnabledFunc != nil && !isEnabledFunc(typeConfig) { - logger.V(3).Info("Skip starting subcontroller, is disabled") - continue - } - - controller, err := startFunc(subControllerCtx, m.controllerCtx, typeConfig) - if err != nil { - logger.Error(err, "Failed to start subcontroller") - needRetry = true - continue - } else { - logger.Info("Started subcontroller") - startedSubControllers.Insert(controllerName) - } - - m.healthCheckHandler.AddReadyzChecker( - resolveSubcontrollerName(controllerName, qualifiedName.Name), - func(_ *http.Request) error { - if controller.IsControllerReady() { - return nil - } - return fmt.Errorf("controller not ready") - }, - ) - } - - // Since the controllers are created dynamically, we have to start the informer factories again, in case any new - // informers were accessed. Note that a different context is used in case a FTC is recreated and the same informer - // needs to be used again (SharedInformerFactory and SharedInformers do not support restarts). - ctx := context.TODO() - m.controllerCtx.KubeInformerFactory.Start(ctx.Done()) - m.controllerCtx.DynamicInformerFactory.Start(ctx.Done()) - m.controllerCtx.FedInformerFactory.Start(ctx.Done()) - - if needRetry { - return worker.StatusError - } - - return worker.StatusAllOK -} - -func (m *FederatedTypeConfigManager) processFTCDeletion(ftcName string) { - m.lock.Lock() - defer m.lock.Unlock() - - cancel, ok := m.subControllerCancelFuncs[ftcName] - if !ok { - return - } - - cancel() - - for controller := range m.startedSubControllers[ftcName] { - m.healthCheckHandler.RemoveReadyzChecker(resolveSubcontrollerName(controller, ftcName)) - } - - delete(m.subControllerCancelFuncs, ftcName) - delete(m.subControllerContexts, ftcName) - delete(m.startedSubControllers, ftcName) -} - -func resolveSubcontrollerName(baseName, ftcName string) string { - return fmt.Sprintf("%s[%s]", ftcName, baseName) -} diff --git a/pkg/controllers/federatedtypeconfig/crd_schema.go b/pkg/controllers/federatedtypeconfig/crd_schema.go deleted file mode 100644 index 31b26098..00000000 --- a/pkg/controllers/federatedtypeconfig/crd_schema.go +++ /dev/null @@ -1,102 +0,0 @@ -/* -Copyright 2023 The KubeAdmiral Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package federatedtypeconfig - -import ( - "bytes" - "strings" - - apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" - "k8s.io/apimachinery/pkg/util/yaml" -) - -const fedObjectSchemaYaml = ` -openAPIV3Schema: - type: object - properties: - apiVersion: - type: string - kind: - type: string - metadata: - type: object - spec: - type: object - x-kubernetes-preserve-unknown-fields: true - status: - type: object - properties: - syncedGeneration: - format: int64 - type: integer - clusters: - type: array - items: - type: object - properties: - generation: - type: integer - name: - type: string - status: - type: string - required: [name] - conditions: - type: array - items: - type: object - properties: - lastTransitionTime: - type: string - format: date-time - lastUpdateTime: - type: string - format: date-time - reason: - type: string - status: - type: string - type: - type: string - required: [type, status] - required: [spec] - x-kubernetes-preserve-unknown-fields: true -` - -const statusObjectSchemaYaml = ` -openAPIV3Schema: - type: object - x-kubernetes-preserve-unknown-fields: true -` - -var fedObjectSchema, statusObjectSchema apiextensionsv1.CustomResourceValidation - -func init() { - if err := yaml.NewYAMLOrJSONDecoder( - bytes.NewReader([]byte(strings.Replace(fedObjectSchemaYaml, "\t", " ", -1))), - len(fedObjectSchemaYaml), - ).Decode(&fedObjectSchema); err != nil { - panic(err) - } - - if err := yaml.NewYAMLOrJSONDecoder( - bytes.NewReader([]byte(strings.Replace(statusObjectSchemaYaml, "\t", " ", -1))), - len(statusObjectSchemaYaml), - ).Decode(&statusObjectSchema); err != nil { - panic(err) - } -} diff --git a/pkg/controllers/federatedtypeconfig/federatedtypeconfig_controller.go b/pkg/controllers/federatedtypeconfig/federatedtypeconfig_controller.go deleted file mode 100644 index 0cb4f07b..00000000 --- a/pkg/controllers/federatedtypeconfig/federatedtypeconfig_controller.go +++ /dev/null @@ -1,902 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. - -This file may have been modified by The KubeAdmiral Authors -("KubeAdmiral Modifications"). All KubeAdmiral Modifications -are Copyright 2023 The KubeAdmiral Authors. -*/ - -package federatedtypeconfig - -import ( - "context" - "fmt" - "strings" - "sync" - - "github.com/pkg/errors" - appsv1 "k8s.io/api/apps/v1" - apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" - apiextensions "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset" - typedapiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1" - apierrors "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/api/meta" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - pkgruntime "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/runtime/schema" - "k8s.io/apimachinery/pkg/util/sets" - "k8s.io/client-go/discovery" - "k8s.io/client-go/dynamic" - "k8s.io/client-go/dynamic/dynamicinformer" - "k8s.io/client-go/informers" - "k8s.io/client-go/kubernetes" - restclient "k8s.io/client-go/rest" - "k8s.io/client-go/tools/cache" - "k8s.io/klog/v2" - - fedcorev1a1 "github.com/kubewharf/kubeadmiral/pkg/apis/core/v1alpha1" - fedclient "github.com/kubewharf/kubeadmiral/pkg/client/clientset/versioned" - fedinformers "github.com/kubewharf/kubeadmiral/pkg/client/informers/externalversions" - "github.com/kubewharf/kubeadmiral/pkg/controllers/common" - "github.com/kubewharf/kubeadmiral/pkg/controllers/nsautoprop" - "github.com/kubewharf/kubeadmiral/pkg/controllers/override" - "github.com/kubewharf/kubeadmiral/pkg/controllers/policyrc" - statuscontroller "github.com/kubewharf/kubeadmiral/pkg/controllers/status" - "github.com/kubewharf/kubeadmiral/pkg/controllers/statusaggregator" - synccontroller "github.com/kubewharf/kubeadmiral/pkg/controllers/sync" - "github.com/kubewharf/kubeadmiral/pkg/controllers/util" - "github.com/kubewharf/kubeadmiral/pkg/controllers/util/delayingdeliver" - "github.com/kubewharf/kubeadmiral/pkg/controllers/util/worker" -) - -var mutex = sync.Mutex{} - -var finalizer string = "core." + common.DefaultPrefix + "federated-type-config" - -const ControllerName = "federated-type-config" - -// The FederatedTypeConfig controller configures sync and status -// controllers in response to FederatedTypeConfig resources in the -// KubeFed system namespace. -type Controller struct { - // Arguments to use when starting new controllers - controllerConfig *util.ControllerConfig - - kubeClient kubernetes.Interface - fedClient fedclient.Interface - dynamicClient dynamic.Interface - - discoveryClient discovery.DiscoveryInterface - crdClient typedapiextensionsv1.CustomResourceDefinitionInterface - - kubeInformerFactory informers.SharedInformerFactory - dynamicInformerFactory dynamicinformer.DynamicSharedInformerFactory - fedInformerFactory fedinformers.SharedInformerFactory - - // Map of running sync controllers keyed by qualified target type - stopChannels map[string]chan struct{} - lock sync.RWMutex - - // Store for the FederatedTypeConfig objects - ftcStore cache.Store - // Informer for the FederatedTypeConfig objects - ftcController cache.Controller - - worker worker.ReconcileWorker - - controllerRevisionStore cache.Store - controllerRevisionController cache.Controller - isControllerRevisionExists bool -} - -func (c *Controller) IsControllerReady() bool { - return c.HasSynced() -} - -// NewController returns a new controller to manage FederatedTypeConfig objects. -func NewController( - config *util.ControllerConfig, - kubeClient kubernetes.Interface, - dynamicClient dynamic.Interface, - fedClient fedclient.Interface, - kubeInformerFactory informers.SharedInformerFactory, - dynamicInformerFactory dynamicinformer.DynamicSharedInformerFactory, - fedInformerFactory fedinformers.SharedInformerFactory, -) (*Controller, error) { - userAgent := "FederatedTypeConfig" - kubeConfig := restclient.CopyConfig(config.KubeConfig) - restclient.AddUserAgent(kubeConfig, userAgent) - - extClient, err := apiextensions.NewForConfig(kubeConfig) - if err != nil { - return nil, err - } - - c := &Controller{ - controllerConfig: config, - fedClient: fedClient, - kubeClient: kubeClient, - dynamicClient: dynamicClient, - discoveryClient: kubeClient.Discovery(), - crdClient: extClient.ApiextensionsV1().CustomResourceDefinitions(), - kubeInformerFactory: kubeInformerFactory, - dynamicInformerFactory: dynamicInformerFactory, - fedInformerFactory: fedInformerFactory, - stopChannels: make(map[string]chan struct{}), - } - - c.worker = worker.NewReconcileWorker(c.reconcile, worker.WorkerTiming{}, 1, config.Metrics, - delayingdeliver.NewMetricTags("typeconfig-worker", "FederatedTypeConfig")) - - c.ftcStore, c.ftcController, err = util.NewGenericInformer( - kubeConfig, - "", - &fedcorev1a1.FederatedTypeConfig{}, - util.NoResyncPeriod, - c.worker.EnqueueObject, - config.Metrics, - ) - if err != nil { - return nil, err - } - - return c, nil -} - -func (c *Controller) HasSynced() bool { - if !c.ftcController.HasSynced() { - klog.V(2).Infof("typeconfig controller's controller hasn't synced") - return false - } - return true -} - -// Run runs the Controller. -func (c *Controller) Run(stopChan <-chan struct{}) { - klog.Infof("Starting FederatedTypeConfig controller") - go c.ftcController.Run(stopChan) - - // wait for the caches to synchronize before starting the worker - if !cache.WaitForNamedCacheSync("type-config-controller", stopChan, c.ftcController.HasSynced) { - return - } - - var nsFTC *fedcorev1a1.FederatedTypeConfig - ftcs := c.ftcStore.List() - for _, obj := range ftcs { - ftc := obj.(*fedcorev1a1.FederatedTypeConfig) - if ftc.Name == common.NamespaceResource && ftc.GetTargetType().Kind == common.NamespaceKind && - ftc.GetSourceType().Kind == common.NamespaceKind { - nsFTC = ftc - break - } - } - if nsFTC == nil { - // panic if ftc for namespace does not exist, since no other resource can be synced otherwise - klog.Fatal("FederatedTypeConfig for namespaces not found") - } - - // ensure the federated namespace since it is a requirement for other controllers - if err := c.ensureFederatedObjectCrd(nsFTC); err != nil { - klog.Fatalf("Failed to ensure FederatedNamespace CRD: %v", err) - } - - c.worker.Run(stopChan) - - // Ensure all goroutines are cleaned up when the stop channel closes - go func() { - <-stopChan - c.shutDown() - }() -} - -func (c *Controller) reconcile(qualifiedName common.QualifiedName) worker.Result { - key := qualifiedName.String() - - klog.V(3).Infof("Running reconcile FederatedTypeConfig for %q", key) - - cachedObj, err := c.objCopyFromCache(key) - if err != nil { - return worker.StatusError - } - - if cachedObj == nil { - return worker.StatusAllOK - } - typeConfig := cachedObj.(*fedcorev1a1.FederatedTypeConfig) - - // TODO(marun) Perform this defaulting in a webhook - SetFederatedTypeConfigDefaults(typeConfig) - - deleted := typeConfig.DeletionTimestamp != nil - if c.controllerConfig.CreateCrdForFtcs && !deleted { - if err := c.ensureFederatedObjectCrd(typeConfig); err != nil { - klog.Error(fmt.Errorf("cannot ensure federated object CRD for %q: %w", typeConfig.Name, err)) - return worker.StatusError - } - } - - syncEnabled := typeConfig.GetPropagationEnabled() - statusEnabled := typeConfig.GetStatusEnabled() - statusAggregationEnabled := typeConfig.GetStatusAggregationEnabled() - policyRcEnabled := typeConfig.GetPolicyRcEnabled() - controllers := sets.New[string]() - for _, controllerGroup := range typeConfig.GetControllers() { - for _, controller := range controllerGroup { - controllers.Insert(controller) - } - } - namespaceAutoPropagationEnabled := controllers.Has(nsautoprop.PrefixedNamespaceAutoPropagationControllerName) - overridePolicyEnabled := controllers.Has(override.PrefixedControllerName) - - limitedScope := c.controllerConfig.TargetNamespace != metav1.NamespaceAll - if limitedScope && syncEnabled && !typeConfig.GetNamespaced() { - _, ok := c.getStopChannel(typeConfig.Name) - if !ok { - holderChan := make(chan struct{}) - c.lock.Lock() - c.stopChannels[typeConfig.Name] = holderChan - c.lock.Unlock() - klog.Infof( - "Skipping start of sync & status controller for cluster-scoped resource %q. It is not required for a namespaced KubeFed control plane.", - typeConfig.GetFederatedType().Kind, - ) - } - - // typeConfig.Status.ObservedGeneration = typeConfig.Generation - // typeConfig.Status.PropagationController = corev1a1.ControllerStatusNotRunning - - /*if typeConfig.Status.StatusController == nil { - typeConfig.Status.StatusController = new(corev1a1.ControllerStatus) - } - *typeConfig.Status.StatusController = corev1a1.ControllerStatusNotRunning - err = c.client.UpdateStatus(context.TODO(), typeConfig) - if err != nil { - runtime.HandleError(errors.Wrapf(err, "Could not update status fields of the CRD: %q", key)) - return worker.StatusError - }*/ - return worker.StatusAllOK - } - - statusKey := typeConfig.Name + "/status" - statusAggregationKey := typeConfig.Name + "/statusAggregation" - policyRcKey := typeConfig.Name + "/policyRc" - federateKey := typeConfig.Name + "/federate" - schedulerKey := typeConfig.Name + "/scheduler" - namespaceAutoPropagationKey := typeConfig.Name + "/namespaceAutoPropagation" - overridePolicyKey := typeConfig.Name + "/overridePolicy" - - syncStopChan, syncRunning := c.getStopChannel(typeConfig.Name) - statusStopChan, statusRunning := c.getStopChannel(statusKey) - statusAggregationStopChan, statusAggregationRunning := c.getStopChannel(statusAggregationKey) - policyRcStopChan, policyRcRunning := c.getStopChannel(policyRcKey) - federateStopChan, federateRunning := c.getStopChannel(federateKey) - schedulerStopChan, schedulerRunning := c.getStopChannel(schedulerKey) - namespaceAutoPropagationStopChan, namespaceAutoPropagationRunning := c.getStopChannel(namespaceAutoPropagationKey) - overridePolicyStopChan, overridePolicyRunning := c.getStopChannel(overridePolicyKey) - - if deleted { - if syncRunning { - c.stopController(typeConfig.Name, syncStopChan) - } - if statusRunning { - c.stopController(statusKey, statusStopChan) - } - if federateRunning { - c.stopController(federateKey, federateStopChan) - } - if schedulerRunning { - c.stopController(schedulerKey, schedulerStopChan) - } - if overridePolicyRunning { - c.stopController(overridePolicyKey, overridePolicyStopChan) - } - - if typeConfig.IsNamespace() { - if namespaceAutoPropagationRunning { - c.stopController(namespaceAutoPropagationKey, namespaceAutoPropagationStopChan) - } - klog.Infof("Reconciling all namespaced FederatedTypeConfig resources on deletion of %q", key) - c.reconcileOnNamespaceFTCUpdate() - } - - err := c.removeFinalizer(typeConfig) - if err != nil { - klog.Error(errors.Wrapf(err, "Failed to remove finalizer from FederatedTypeConfig %q", key)) - return worker.StatusError - } - return worker.StatusAllOK - } - - typeConfig, updated, err := c.ensureFinalizer(typeConfig) - if err != nil { - klog.Error(errors.Wrapf(err, "Failed to ensure finalizer for FederatedTypeConfig %q", key)) - return worker.StatusError - } else if updated && typeConfig.IsNamespace() { - // Detected creation of the namespace FTC. If there are existing FTCs - // which did not start their sync controllers due to the lack of a - // namespace FTC, then reconcile them now so they can start. - klog.Infof("Reconciling all namespaced FederatedTypeConfig resources on finalizer update for %q", key) - c.reconcileOnNamespaceFTCUpdate() - } - - startNewSyncController := !syncRunning && syncEnabled - stopSyncController := syncRunning && (!syncEnabled || (typeConfig.GetNamespaced() && !c.namespaceFTCExists())) - if startNewSyncController { - if err := c.startSyncController(typeConfig); err != nil { - klog.Error(err) - return worker.StatusError - } - } else if stopSyncController { - c.stopController(typeConfig.Name, syncStopChan) - } - - startNewStatusController := !statusRunning && statusEnabled - stopStatusController := statusRunning && !statusEnabled - if startNewStatusController { - if err := c.startStatusController(statusKey, typeConfig); err != nil { - klog.Error(err) - return worker.StatusError - } - } else if stopStatusController { - c.stopController(statusKey, statusStopChan) - } - - startNewStatusAggregationController := !statusAggregationRunning && statusAggregationEnabled - stopStatusAggregationController := statusAggregationRunning && !statusAggregationEnabled - if startNewStatusAggregationController { - if err := c.startStatusAggregationController(statusAggregationKey, typeConfig); err != nil { - klog.Error(err) - return worker.StatusError - } - } else if stopStatusAggregationController { - c.stopController(statusAggregationKey, statusAggregationStopChan) - } - - startNewPolicyRcController := !policyRcRunning && policyRcEnabled - stopPolicyRcController := policyRcRunning && !policyRcEnabled - if startNewPolicyRcController { - if err := c.startPolicyRcController(policyRcKey, typeConfig); err != nil { - klog.Error(err) - return worker.StatusError - } - } else if stopPolicyRcController { - c.stopController(policyRcKey, policyRcStopChan) - } - - startNewNamespaceAutoPropagationController := !namespaceAutoPropagationRunning && typeConfig.IsNamespace() && - namespaceAutoPropagationEnabled - stopNamespaceAutoPropagationController := namespaceAutoPropagationRunning && - (!typeConfig.IsNamespace() || !namespaceAutoPropagationEnabled) - if startNewNamespaceAutoPropagationController { - if err := c.startNamespaceAutoPropagationController(namespaceAutoPropagationKey, typeConfig); err != nil { - klog.Error(err) - return worker.StatusError - } - } else if stopNamespaceAutoPropagationController { - c.stopController(namespaceAutoPropagationKey, namespaceAutoPropagationStopChan) - } - - startOverridePolicyController := !overridePolicyRunning && overridePolicyEnabled - stopOverridePolicyController := overridePolicyRunning && !overridePolicyEnabled - if startOverridePolicyController { - if err := c.startOverridePolicyController(overridePolicyKey, typeConfig); err != nil { - klog.Error(err) - return worker.StatusError - } - } else if stopOverridePolicyController { - c.stopController(overridePolicyKey, overridePolicyStopChan) - } - - if !startNewSyncController && !stopSyncController && - typeConfig.Status.ObservedGeneration != typeConfig.Generation { - if err := c.refreshSyncController(typeConfig); err != nil { - klog.Error(err) - return worker.StatusError - } - } - - typeConfig.Status.ObservedGeneration = typeConfig.Generation - /*syncControllerRunning := startNewSyncController || (syncRunning && !stopSyncController) - if syncControllerRunning { - typeConfig.Status.PropagationController = corev1a1.ControllerStatusRunning - } else { - typeConfig.Status.PropagationController = corev1a1.ControllerStatusNotRunning - } - - if typeConfig.Status.StatusController == nil { - typeConfig.Status.StatusController = new(corev1a1.ControllerStatus) - } - - statusControllerRunning := startNewStatusController || (statusRunning && !stopStatusController) - if statusControllerRunning { - *typeConfig.Status.StatusController = corev1a1.ControllerStatusRunning - } else { - *typeConfig.Status.StatusController = corev1a1.ControllerStatusNotRunning - } - err = c.client.UpdateStatus(context.TODO(), typeConfig) - if err != nil { - runtime.HandleError(errors.Wrapf(err, "Could not update status fields of the CRD: %q", key)) - return worker.StatusError - }*/ - return worker.StatusAllOK -} - -func (c *Controller) ensureFederatedObjectCrd(ftc *fedcorev1a1.FederatedTypeConfig) error { - fedTy := ftc.Spec.FederatedType - crdName := fedTy.PluralName - if fedTy.Group != "" { - crdName += "." - crdName += fedTy.Group - } - - _, err := c.crdClient.Get(context.TODO(), crdName, metav1.GetOptions{ResourceVersion: "0"}) - if err != nil && !apierrors.IsNotFound(err) { - return fmt.Errorf("cannot check for existence of CRD %q: %w", crdName, err) - } - - needObjectCrd := err != nil - - needStatusCrd := false - statusTy := ftc.Spec.StatusType - var statusCrdName string - if statusTy != nil { - statusCrdName = statusTy.PluralName - if statusTy.Group != "" { - statusCrdName += "." - statusCrdName += statusTy.Group - } - - _, err = c.crdClient.Get(context.TODO(), statusCrdName, metav1.GetOptions{ResourceVersion: "0"}) - if err != nil && !apierrors.IsNotFound(err) { - return fmt.Errorf("cannot check for existence of CRD %q: %w", statusCrdName, err) - } - - needStatusCrd = err != nil - } - - var sourceResource *metav1.APIResource - if ftc.Spec.SourceType != nil { - srcTy := ftc.Spec.SourceType - - resourceList, err := c.discoveryClient.ServerResourcesForGroupVersion(schema.GroupVersion{ - Group: srcTy.Group, - Version: srcTy.Version, - }.String()) - if err != nil { - return fmt.Errorf("cannot invoke discovery client: %w", err) - } - - for _, resource := range resourceList.APIResources { - // we don't care about resource.Group because subresources are not supported - - if resource.Name == srcTy.PluralName { - resource := resource - sourceResource = &resource - break - } - } - } - - // create CRD now - if needObjectCrd { - klog.V(2).Infof("Creating federated CRD for %q", ftc.Name) - - fedShortNames := []string{"f" + strings.ToLower(ftc.Spec.TargetType.Kind)} - if sourceResource != nil { - for _, shortName := range sourceResource.ShortNames { - fedShortNames = append(fedShortNames, "f"+shortName) - } - } - - crd := &apiextensionsv1.CustomResourceDefinition{ - ObjectMeta: metav1.ObjectMeta{ - Name: crdName, - }, - Spec: apiextensionsv1.CustomResourceDefinitionSpec{ - Group: fedTy.Group, - Names: apiextensionsv1.CustomResourceDefinitionNames{ - Plural: fedTy.PluralName, - Kind: fedTy.Kind, - Singular: strings.ToLower(fedTy.Kind), - ShortNames: fedShortNames, - ListKind: fedTy.Kind + "List", - }, - Scope: apiextensionsv1.ResourceScope(fedTy.Scope), - Versions: []apiextensionsv1.CustomResourceDefinitionVersion{ - { - Name: fedTy.Version, - Served: true, - Storage: true, - Subresources: &apiextensionsv1.CustomResourceSubresources{ - Status: &apiextensionsv1.CustomResourceSubresourceStatus{}, - }, - Schema: &fedObjectSchema, - }, - }, - }, - } - _, err = c.crdClient.Create(context.TODO(), crd, metav1.CreateOptions{}) - if err != nil { - return err - } - } - - if needStatusCrd { - klog.V(2).Infof("Creating status CRD for %q", ftc.Name) - - statusShortNames := []string{fmt.Sprintf("f%sstatus", strings.ToLower(ftc.Spec.TargetType.Kind))} - if sourceResource != nil { - for _, shortName := range sourceResource.ShortNames { - statusShortNames = append(statusShortNames, fmt.Sprintf("f%sstatus", shortName)) - } - } - - crd := &apiextensionsv1.CustomResourceDefinition{ - ObjectMeta: metav1.ObjectMeta{ - Name: statusCrdName, - }, - Spec: apiextensionsv1.CustomResourceDefinitionSpec{ - Group: statusTy.Group, - Names: apiextensionsv1.CustomResourceDefinitionNames{ - Plural: statusTy.PluralName, - Kind: statusTy.Kind, - Singular: strings.ToLower(statusTy.Kind), - ShortNames: statusShortNames, - ListKind: statusTy.Kind + "List", - }, - Scope: apiextensionsv1.ResourceScope(statusTy.Scope), - Versions: []apiextensionsv1.CustomResourceDefinitionVersion{ - { - Name: statusTy.Version, - Served: true, - Storage: true, - Subresources: &apiextensionsv1.CustomResourceSubresources{ - Status: &apiextensionsv1.CustomResourceSubresourceStatus{}, - }, - Schema: &statusObjectSchema, - }, - }, - }, - } - _, err = c.crdClient.Create(context.TODO(), crd, metav1.CreateOptions{}) - if err != nil { - return err - } - } - - return nil -} - -func (c *Controller) objCopyFromCache(key string) (pkgruntime.Object, error) { - cachedObj, exist, err := c.ftcStore.GetByKey(key) - if err != nil { - wrappedErr := errors.Wrapf(err, "Failed to query FederatedTypeConfig store for %q", key) - klog.Error(wrappedErr) - return nil, err - } - if !exist { - return nil, nil - } - return cachedObj.(pkgruntime.Object).DeepCopyObject(), nil -} - -func (c *Controller) shutDown() { - c.lock.Lock() - defer c.lock.Unlock() - - // Stop all sync and status controllers - for key, stopChannel := range c.stopChannels { - close(stopChannel) - delete(c.stopChannels, key) - } -} - -func (c *Controller) getStopChannel(name string) (chan struct{}, bool) { - c.lock.RLock() - defer c.lock.RUnlock() - stopChan, ok := c.stopChannels[name] - return stopChan, ok -} - -func (c *Controller) startSyncController(tc *fedcorev1a1.FederatedTypeConfig) error { - ftc := tc.DeepCopyObject().(*fedcorev1a1.FederatedTypeConfig) - kind := ftc.Spec.FederatedType.Kind - controllerConfig := new(util.ControllerConfig) - *controllerConfig = *(c.controllerConfig) - - // A sync controller for a namespaced resource must be supplied - // with the ftc for namespaces so that it can consider federated - // namespace placement when determining the placement for - // contained resources. - var fedNamespaceAPIResource *metav1.APIResource - if ftc.GetNamespaced() { - var err error - fedNamespaceAPIResource, err = c.getFederatedNamespaceAPIResource() - if err != nil { - return errors.Wrapf( - err, - "Unable to start sync controller for %q due to missing FederatedTypeConfig for namespaces", - kind, - ) - } - } - - mutex.Lock() - defer mutex.Unlock() - stopChan := make(chan struct{}) - - if !c.isControllerRevisionExists { - controllerRevision := util.GetResourceKind(&appsv1.ControllerRevision{}) - controllerRevisionsResource := &metav1.APIResource{ - Name: util.GetPluralName(controllerRevision), - Group: appsv1.SchemeGroupVersion.Group, - Version: appsv1.SchemeGroupVersion.Version, - Kind: controllerRevision, - Namespaced: true, - } - userAgent := "controller-revision-federate-sync-controller" - configWithUserAgent := restclient.CopyConfig(controllerConfig.KubeConfig) - restclient.AddUserAgent(configWithUserAgent, userAgent) - controllerRevisionClient, err := util.NewResourceClient(configWithUserAgent, controllerRevisionsResource) - if err != nil { - klog.Errorf("Failed to initiate controller revision client.") - return err - } - - triggerFunc := func(obj pkgruntime.Object) { - if accessor, err := meta.Accessor(obj); err == nil { - klog.V(4). - Infof("ControllerRevision changement observed: %s/%s", accessor.GetNamespace(), accessor.GetName()) - } - } - c.controllerRevisionStore, c.controllerRevisionController = util.NewResourceInformer( - controllerRevisionClient, - "", - triggerFunc, - c.controllerConfig.Metrics, - ) - c.isControllerRevisionExists = true - go c.controllerRevisionController.Run(stopChan) - } - - err := synccontroller.StartSyncController( - controllerConfig, - stopChan, - ftc, - fedNamespaceAPIResource, - c.controllerRevisionStore, - c.controllerRevisionController) - if err != nil { - close(stopChan) - return errors.Wrapf(err, "Error starting sync controller for %q", kind) - } - klog.Infof("Started sync controller for %q", kind) - c.lock.Lock() - defer c.lock.Unlock() - c.stopChannels[ftc.Name] = stopChan - return nil -} - -func (c *Controller) startStatusController(statusKey string, tc *fedcorev1a1.FederatedTypeConfig) error { - kind := tc.Spec.FederatedType.Kind - stopChan := make(chan struct{}) - ftc := tc.DeepCopyObject().(*fedcorev1a1.FederatedTypeConfig) - err := statuscontroller.StartStatusController(c.controllerConfig, stopChan, ftc) - if err != nil { - close(stopChan) - return errors.Wrapf(err, "Error starting status controller for %q", kind) - } - klog.Infof("Started status controller for %q", kind) - c.lock.Lock() - defer c.lock.Unlock() - c.stopChannels[statusKey] = stopChan - return nil -} - -func (c *Controller) startStatusAggregationController( - statusAggregationKey string, - tc *fedcorev1a1.FederatedTypeConfig, -) error { - kind := tc.Spec.FederatedType.Kind - stopChan := make(chan struct{}) - ftc := tc.DeepCopyObject().(*fedcorev1a1.FederatedTypeConfig) - err := statusaggregator.StartStatusAggregator(c.controllerConfig, stopChan, ftc) - if err != nil { - close(stopChan) - return errors.Wrapf(err, "Error starting status aggregator for %q", kind) - } - klog.Infof("Started status aggregator for %q", kind) - c.lock.Lock() - defer c.lock.Unlock() - c.stopChannels[statusAggregationKey] = stopChan - return nil -} - -func (c *Controller) startPolicyRcController(policyRcKey string, tc *fedcorev1a1.FederatedTypeConfig) error { - kind := tc.Spec.FederatedType.Kind - stopChan := make(chan struct{}) - ftc := tc.DeepCopyObject().(*fedcorev1a1.FederatedTypeConfig) - err := policyrc.StartController(c.controllerConfig, stopChan, ftc) - if err != nil { - close(stopChan) - return errors.Wrapf(err, "Error starting policy-rc controller for %q", kind) - } - klog.Infof("Started policy-rc controller for %q", kind) - c.lock.Lock() - defer c.lock.Unlock() - c.stopChannels[policyRcKey] = stopChan - return nil -} - -func (c *Controller) startNamespaceAutoPropagationController( - namespaceAutoPropagationKey string, - tc *fedcorev1a1.FederatedTypeConfig, -) error { - kind := tc.Spec.FederatedType.Kind - stopChan := make(chan struct{}) - ftc := tc.DeepCopyObject().(*fedcorev1a1.FederatedTypeConfig) - err := nsautoprop.StartController( - c.controllerConfig, - stopChan, - ftc, - c.kubeClient, - c.dynamicClient, - c.dynamicInformerFactory, - c.fedInformerFactory, - ) - if err != nil { - close(stopChan) - return errors.Wrapf(err, "Error starting namespace-auto-propagation controller for %q", kind) - } - klog.Infof("Started namespace-auto-propagation controller for %q", kind) - c.lock.Lock() - defer c.lock.Unlock() - c.stopChannels[namespaceAutoPropagationKey] = stopChan - return nil -} - -func (c *Controller) startOverridePolicyController( - overridePolicyKey string, - tc *fedcorev1a1.FederatedTypeConfig, -) error { - kind := tc.Spec.FederatedType.Kind - stopChan := make(chan struct{}) - if err := override.StartController(c.controllerConfig, stopChan, tc); err != nil { - close(stopChan) - return errors.Wrapf(err, "Error starting overridepolicy-controller for %q", kind) - } - klog.Infof("Started overridepolicy-controller for %q", kind) - c.lock.Lock() - defer c.lock.Unlock() - c.stopChannels[overridePolicyKey] = stopChan - return nil -} - -func (c *Controller) stopController(key string, stopChan chan struct{}) { - klog.Infof("Stopping controller for %q", key) - close(stopChan) - c.lock.Lock() - defer c.lock.Unlock() - delete(c.stopChannels, key) -} - -func (c *Controller) refreshSyncController(tc *fedcorev1a1.FederatedTypeConfig) error { - klog.Infof("refreshing sync controller for %q", tc.Name) - - syncStopChan, ok := c.getStopChannel(tc.Name) - if ok { - c.stopController(tc.Name, syncStopChan) - } - - return c.startSyncController(tc) -} - -func (c *Controller) ensureFinalizer( - tc *fedcorev1a1.FederatedTypeConfig, -) (*fedcorev1a1.FederatedTypeConfig, bool, error) { - accessor, err := meta.Accessor(tc) - if err != nil { - return nil, false, err - } - finalizers := sets.NewString(accessor.GetFinalizers()...) - if finalizers.Has(finalizer) { - return tc, false, nil - } - finalizers.Insert(finalizer) - accessor.SetFinalizers(finalizers.List()) - tc, err = c.fedClient.CoreV1alpha1().FederatedTypeConfigs().Update(context.TODO(), tc, metav1.UpdateOptions{}) - return tc, true, err -} - -func (c *Controller) removeFinalizer(tc *fedcorev1a1.FederatedTypeConfig) error { - accessor, err := meta.Accessor(tc) - if err != nil { - return err - } - finalizers := sets.NewString(accessor.GetFinalizers()...) - if !finalizers.Has(finalizer) { - return nil - } - finalizers.Delete(finalizer) - accessor.SetFinalizers(finalizers.List()) - _, err = c.fedClient.CoreV1alpha1().FederatedTypeConfigs().Update(context.TODO(), tc, metav1.UpdateOptions{}) - return err -} - -func (c *Controller) namespaceFTCExists() bool { - _, err := c.getFederatedNamespaceAPIResource() - return err == nil -} - -func (c *Controller) getFederatedNamespaceAPIResource() (*metav1.APIResource, error) { - qualifiedName := common.QualifiedName{ - Namespace: "", - Name: common.NamespaceResource, - } - key := qualifiedName.String() - cachedObj, exists, err := c.ftcStore.GetByKey(key) - if err != nil { - return nil, errors.Wrapf(err, "Error retrieving %q from the informer cache", key) - } - if !exists { - return nil, errors.Errorf("Unable to find %q in the informer cache", key) - } - namespaceTypeConfig := cachedObj.(*fedcorev1a1.FederatedTypeConfig) - apiResource := namespaceTypeConfig.GetFederatedType() - return &apiResource, nil -} - -func (c *Controller) reconcileOnNamespaceFTCUpdate() { - for _, cachedObj := range c.ftcStore.List() { - typeConfig := cachedObj.(*fedcorev1a1.FederatedTypeConfig) - if typeConfig.GetNamespaced() && !typeConfig.IsNamespace() { - c.worker.EnqueueObject(typeConfig) - } - } -} - -// pluralName computes the plural name from the kind by lowercasing and suffixing with 's' or `es`. -func SetFederatedTypeConfigDefaults(obj *fedcorev1a1.FederatedTypeConfig) { - nameParts := strings.SplitN(obj.Name, ".", 2) - targetPluralName := nameParts[0] - setStringDefault(&obj.Spec.TargetType.PluralName, targetPluralName) - if len(nameParts) > 1 { - group := nameParts[1] - setStringDefault(&obj.Spec.TargetType.Group, group) - } - setStringDefault(&obj.Spec.FederatedType.PluralName, pluralName(obj.Spec.FederatedType.Kind)) -} - -func pluralName(kind string) string { - lowerKind := strings.ToLower(kind) - if strings.HasSuffix(lowerKind, "s") || strings.HasSuffix(lowerKind, "x") || - strings.HasSuffix(lowerKind, "ch") || strings.HasSuffix(lowerKind, "sh") || - strings.HasSuffix(lowerKind, "z") || strings.HasSuffix(lowerKind, "o") { - return fmt.Sprintf("%ses", lowerKind) - } - if strings.HasSuffix(lowerKind, "y") { - lowerKind = strings.TrimSuffix(lowerKind, "y") - return fmt.Sprintf("%sies", lowerKind) - } - return fmt.Sprintf("%ss", lowerKind) -} - -func setStringDefault(value *string, defaultValue string) { - if value == nil || len(*value) > 0 { - return - } - *value = defaultValue -} From 82c6e7a48a66d06f637d34bb3252c042e38b9f69 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Gary=20Liu=20=28=E5=88=98=E5=B9=BF=E6=BA=90=29?= Date: Tue, 11 Jul 2023 04:45:20 +0000 Subject: [PATCH 004/173] ci: bump golangci-lint to 1.53.3 --- .github/workflows/ci.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index c495d60d..9dfa8a7e 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -42,7 +42,7 @@ jobs: - uses: golangci/golangci-lint-action@v3 with: - version: v1.52.2 + version: v1.53.3 only-new-issues: true args: > --timeout=5m From 1fa9349c588e7afb58174bedb15ed7c8b7c205d8 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Gary=20Liu=20=28=E5=88=98=E5=B9=BF=E6=BA=90=29?= Date: Fri, 7 Jul 2023 05:09:40 +0000 Subject: [PATCH 005/173] refactor(reconcileworker): use workqueue.RateLimitingInterface and allow generic keys --- pkg/controllers/automigration/controller.go | 2 +- pkg/controllers/federate/controller.go | 2 +- .../federatedcluster/controller.go | 7 +- pkg/controllers/follower/controller.go | 2 +- pkg/controllers/monitor/monitor_controller.go | 2 +- .../monitor/monitor_subcontroller.go | 2 +- pkg/controllers/nsautoprop/controller.go | 2 +- .../override/overridepolicy_controller.go | 2 +- pkg/controllers/policyrc/controller.go | 6 +- pkg/controllers/scheduler/scheduler.go | 2 +- pkg/controllers/status/controller.go | 2 +- .../statusaggregator/controller.go | 2 +- pkg/controllers/sync/controller.go | 5 +- .../util/worker/{constants.go => result.go} | 0 pkg/controllers/util/worker/worker.go | 181 +++++++++--------- 15 files changed, 108 insertions(+), 111 deletions(-) rename pkg/controllers/util/worker/{constants.go => result.go} (100%) diff --git a/pkg/controllers/automigration/controller.go b/pkg/controllers/automigration/controller.go index 21cee5a5..2a05b4ce 100644 --- a/pkg/controllers/automigration/controller.go +++ b/pkg/controllers/automigration/controller.go @@ -109,7 +109,7 @@ func NewAutoMigrationController( c.worker = worker.NewReconcileWorker( c.reconcile, - worker.WorkerTiming{}, + worker.RateLimiterOptions{}, controllerConfig.WorkerCount, controllerConfig.Metrics, delayingdeliver.NewMetricTags("auto-migration-worker", c.typeConfig.GetFederatedType().Kind), diff --git a/pkg/controllers/federate/controller.go b/pkg/controllers/federate/controller.go index 2b8716ce..f8aa2c09 100644 --- a/pkg/controllers/federate/controller.go +++ b/pkg/controllers/federate/controller.go @@ -115,7 +115,7 @@ func NewFederateController( c.worker = worker.NewReconcileWorker( c.reconcile, - worker.WorkerTiming{}, + worker.RateLimiterOptions{}, workerCount, metrics, delayingdeliver.NewMetricTags("federate-controller-worker", c.typeConfig.GetFederatedType().Kind), diff --git a/pkg/controllers/federatedcluster/controller.go b/pkg/controllers/federatedcluster/controller.go index 1c2f1ee5..b216ace2 100644 --- a/pkg/controllers/federatedcluster/controller.go +++ b/pkg/controllers/federatedcluster/controller.go @@ -127,7 +127,7 @@ func NewFederatedClusterController( c.worker = worker.NewReconcileWorker( c.reconcile, - worker.WorkerTiming{}, + worker.RateLimiterOptions{}, workerCount, metrics, delayingdeliver.NewMetricTags("federatedcluster-worker", "FederatedCluster"), @@ -135,9 +135,8 @@ func NewFederatedClusterController( c.statusCollectWorker = worker.NewReconcileWorker( c.collectClusterStatus, - worker.WorkerTiming{ - Interval: 50 * time.Millisecond, - InitialBackoff: 50 * time.Millisecond, + worker.RateLimiterOptions{ + InitialDelay: 50 * time.Millisecond, }, workerCount, metrics, diff --git a/pkg/controllers/follower/controller.go b/pkg/controllers/follower/controller.go index 43c9ced2..25d668c3 100644 --- a/pkg/controllers/follower/controller.go +++ b/pkg/controllers/follower/controller.go @@ -174,7 +174,7 @@ func NewFollowerController( func(qualifiedName common.QualifiedName) worker.Result { return reconcile(handles, qualifiedName) }, - worker.WorkerTiming{}, + worker.RateLimiterOptions{}, workerCount, c.metrics, delayingdeliver.NewMetricTags("follower-controller-worker", handles.name), diff --git a/pkg/controllers/monitor/monitor_controller.go b/pkg/controllers/monitor/monitor_controller.go index 831252a2..e19d82b2 100644 --- a/pkg/controllers/monitor/monitor_controller.go +++ b/pkg/controllers/monitor/monitor_controller.go @@ -97,7 +97,7 @@ func NewMonitorController(config *util.ControllerConfig) (*MonitorController, er stopChannels: make(map[string]chan struct{}), } - c.worker = worker.NewReconcileWorker(c.reconcile, worker.WorkerTiming{}, 1, config.Metrics, + c.worker = worker.NewReconcileWorker(c.reconcile, worker.RateLimiterOptions{}, 1, config.Metrics, delayingdeliver.NewMetricTags("monitor-worker", "")) c.meters = &sync.Map{} diff --git a/pkg/controllers/monitor/monitor_subcontroller.go b/pkg/controllers/monitor/monitor_subcontroller.go index b6ef88c4..8ced928f 100644 --- a/pkg/controllers/monitor/monitor_subcontroller.go +++ b/pkg/controllers/monitor/monitor_subcontroller.go @@ -121,7 +121,7 @@ func newMonitorSubController( return nil, err } - m.worker = worker.NewReconcileWorker(m.reconcile, worker.WorkerTiming{}, controllerConfig.WorkerCount, + m.worker = worker.NewReconcileWorker(m.reconcile, worker.RateLimiterOptions{}, controllerConfig.WorkerCount, controllerConfig.Metrics, delayingdeliver.NewMetricTags("monitor-subcontroller", m.kind)) m.federatedStore, m.federatedController = util.NewResourceInformer(m.federatedClient, diff --git a/pkg/controllers/nsautoprop/controller.go b/pkg/controllers/nsautoprop/controller.go index 65917cea..830e5c46 100644 --- a/pkg/controllers/nsautoprop/controller.go +++ b/pkg/controllers/nsautoprop/controller.go @@ -154,7 +154,7 @@ func newController( c.worker = worker.NewReconcileWorker( c.reconcile, - worker.WorkerTiming{}, + worker.RateLimiterOptions{}, controllerConfig.WorkerCount, controllerConfig.Metrics, delayingdeliver.NewMetricTags(userAgent, federatedNamespaceApiResource.Kind), diff --git a/pkg/controllers/override/overridepolicy_controller.go b/pkg/controllers/override/overridepolicy_controller.go index a49bf6c9..b559f813 100644 --- a/pkg/controllers/override/overridepolicy_controller.go +++ b/pkg/controllers/override/overridepolicy_controller.go @@ -134,7 +134,7 @@ func newController( c.worker = worker.NewReconcileWorker( c.reconcile, - worker.WorkerTiming{}, + worker.RateLimiterOptions{}, controllerConfig.WorkerCount, controllerConfig.Metrics, delayingdeliver.NewMetricTags(c.name, federatedApiResource.Kind), diff --git a/pkg/controllers/policyrc/controller.go b/pkg/controllers/policyrc/controller.go index 7ce1d75e..7bcbffb4 100644 --- a/pkg/controllers/policyrc/controller.go +++ b/pkg/controllers/policyrc/controller.go @@ -104,7 +104,7 @@ func newController(controllerConfig *util.ControllerConfig, c.countWorker = worker.NewReconcileWorker( c.reconcileCount, - worker.WorkerTiming{}, + worker.RateLimiterOptions{}, 1, // currently only one worker is meaningful due to the global mutex controllerConfig.Metrics, delayingdeliver.NewMetricTags("policyrc-controller-count-worker", c.typeConfig.GetFederatedType().Kind), @@ -114,7 +114,7 @@ func newController(controllerConfig *util.ControllerConfig, func(qualifiedName common.QualifiedName) worker.Result { return c.reconcilePersist("propagation-policy", qualifiedName, c.pp.store, c.cpp.store, c.ppCounter) }, - worker.WorkerTiming{}, + worker.RateLimiterOptions{}, controllerConfig.WorkerCount, controllerConfig.Metrics, delayingdeliver.NewMetricTags("policyrc-controller-persist-worker", c.typeConfig.GetFederatedType().Kind), @@ -123,7 +123,7 @@ func newController(controllerConfig *util.ControllerConfig, func(qualifiedName common.QualifiedName) worker.Result { return c.reconcilePersist("override-policy", qualifiedName, c.op.store, c.cop.store, c.opCounter) }, - worker.WorkerTiming{}, + worker.RateLimiterOptions{}, controllerConfig.WorkerCount, controllerConfig.Metrics, delayingdeliver.NewMetricTags("policyrc-controller-persist-worker", c.typeConfig.GetFederatedType().Kind), diff --git a/pkg/controllers/scheduler/scheduler.go b/pkg/controllers/scheduler/scheduler.go index b533536d..87ea260e 100644 --- a/pkg/controllers/scheduler/scheduler.go +++ b/pkg/controllers/scheduler/scheduler.go @@ -127,7 +127,7 @@ func NewScheduler( s.worker = worker.NewReconcileWorker( s.reconcile, - worker.WorkerTiming{}, + worker.RateLimiterOptions{}, workerCount, metrics, delayingdeliver.NewMetricTags("scheduler-worker", s.typeConfig.GetFederatedType().Kind), diff --git a/pkg/controllers/status/controller.go b/pkg/controllers/status/controller.go index 51c9c464..4a57c1d6 100644 --- a/pkg/controllers/status/controller.go +++ b/pkg/controllers/status/controller.go @@ -171,7 +171,7 @@ func newStatusController( s.worker = worker.NewReconcileWorker( s.reconcile, - worker.WorkerTiming{}, + worker.RateLimiterOptions{}, controllerConfig.WorkerCount, controllerConfig.Metrics, delayingdeliver.NewMetricTags("status-worker", typeConfig.GetTargetType().Kind), diff --git a/pkg/controllers/statusaggregator/controller.go b/pkg/controllers/statusaggregator/controller.go index 7ca73485..9f2aaeba 100644 --- a/pkg/controllers/statusaggregator/controller.go +++ b/pkg/controllers/statusaggregator/controller.go @@ -154,7 +154,7 @@ func newStatusAggregator(controllerConfig *util.ControllerConfig, a.worker = worker.NewReconcileWorker( a.reconcile, - worker.WorkerTiming{}, + worker.RateLimiterOptions{}, controllerConfig.WorkerCount, controllerConfig.Metrics, delayingdeliver.NewMetricTags("statusaggregator-worker", typeConfig.GetTargetType().Kind), diff --git a/pkg/controllers/sync/controller.go b/pkg/controllers/sync/controller.go index 99c16d44..ceff480e 100644 --- a/pkg/controllers/sync/controller.go +++ b/pkg/controllers/sync/controller.go @@ -217,13 +217,13 @@ func newSyncController( s.worker = worker.NewReconcileWorker( s.reconcile, - worker.WorkerTiming{}, + worker.RateLimiterOptions{}, controllerConfig.WorkerCount, controllerConfig.Metrics, deliverutil.NewMetricTags("sync-worker", typeConfig.GetTargetType().Kind), ) - s.clusterWorker = worker.NewReconcileWorker(s.reconcileCluster, worker.WorkerTiming{}, 1, controllerConfig.Metrics, + s.clusterWorker = worker.NewReconcileWorker(s.reconcileCluster, worker.RateLimiterOptions{}, 1, controllerConfig.Metrics, deliverutil.NewMetricTags("sync-cluster-worker", typeConfig.GetTargetType().Kind)) // Build deliverer for triggering cluster reconciliations. @@ -342,7 +342,6 @@ func (s *SyncController) reconcile(qualifiedName common.QualifiedName) (status w keyedLogger := s.logger.WithValues("object", key) ctx := klog.NewContext(context.TODO(), keyedLogger) fedResource, possibleOrphan, err := s.fedAccessor.FederatedResource(qualifiedName) - if err != nil { keyedLogger.Error(err, "Failed to create FederatedResource helper") return worker.StatusError diff --git a/pkg/controllers/util/worker/constants.go b/pkg/controllers/util/worker/result.go similarity index 100% rename from pkg/controllers/util/worker/constants.go rename to pkg/controllers/util/worker/result.go diff --git a/pkg/controllers/util/worker/worker.go b/pkg/controllers/util/worker/worker.go index 3ca3a892..3b104366 100644 --- a/pkg/controllers/util/worker/worker.go +++ b/pkg/controllers/util/worker/worker.go @@ -21,154 +21,153 @@ are Copyright 2023 The KubeAdmiral Authors. package worker import ( + "math" "time" - pkgruntime "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/util/wait" - "k8s.io/client-go/util/flowcontrol" + "golang.org/x/time/rate" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/util/workqueue" - "github.com/kubewharf/kubeadmiral/pkg/controllers/common" - "github.com/kubewharf/kubeadmiral/pkg/controllers/util" - deliverutil "github.com/kubewharf/kubeadmiral/pkg/controllers/util/delayingdeliver" "github.com/kubewharf/kubeadmiral/pkg/stats" ) -type ReconcileFunc func(qualifiedName common.QualifiedName) Result +type ReconcileFunc[Key any] func(Key) Result -type ReconcileWorker interface { - Enqueue(qualifiedName common.QualifiedName) - EnqueueObject(obj pkgruntime.Object) - EnqueueForBackoff(qualifiedName common.QualifiedName) - EnqueueWithDelay(qualifiedName common.QualifiedName, delay time.Duration) +type KeyFunc[Key any] func(metav1.Object) Key + +type ReconcileWorker[Key any] interface { + Enqueue(key Key) + EnqueueObject(obj metav1.Object) + EnqueueWithBackoff(key Key) + EnqueueWithDelay(key Key, delay time.Duration) Run(stopChan <-chan struct{}) } -type WorkerTiming struct { - Interval time.Duration - InitialBackoff time.Duration - MaxBackoff time.Duration +type RateLimiterOptions struct { + // The initial delay for a failed item. + InitialDelay time.Duration + // The maximum delay for a failed item. + MaxDelay time.Duration + // The overall reconcile qps. + OverallQPS float64 + // The overall reconcile burst. + OverallBurst int } -type asyncWorker struct { - reconcile ReconcileFunc - - timing WorkerTiming +type asyncWorker[Key any] struct { + // Name of this reconcile worker. + name string - // For triggering reconciliation of a single resource. This is - // used when there is an add/update/delete operation on a resource - // in either the API of the cluster hosting KubeFed or in the API - // of a member cluster. - deliverer *deliverutil.DelayingDeliverer + // Function to extract queue key from a metav1.Object + keyFunc KeyFunc[Key] - // Work queue allowing parallel processing of resources - queue workqueue.Interface + // Work queue holding keys to be processed. + queue workqueue.RateLimitingInterface - // Backoff manager - backoff *flowcontrol.Backoff + // Function called to reconcile keys popped from the queue. + reconcile ReconcileFunc[Key] + // Number of parallel workers to reconcile keys popped from the queue. workerCount int - metrics stats.Metrics - metricTags deliverutil.MetricTags + // Metrics implementation. + // TODO: export workqueue metrics by providing a MetricsProvider implementation. + metrics stats.Metrics } -func NewReconcileWorker( - reconcile ReconcileFunc, - timing WorkerTiming, +func NewReconcileWorker[Key any]( + name string, + keyFunc KeyFunc[Key], + reconcile ReconcileFunc[Key], + timing RateLimiterOptions, workerCount int, metrics stats.Metrics, - metricTags deliverutil.MetricTags, -) ReconcileWorker { - if timing.Interval == 0 { - timing.Interval = time.Second * 1 +) ReconcileWorker[Key] { + if timing.InitialDelay <= 0 { + timing.InitialDelay = 5 * time.Second } - if timing.InitialBackoff == 0 { - timing.InitialBackoff = time.Second * 5 + if timing.MaxDelay <= 0 { + timing.MaxDelay = time.Minute } - if timing.MaxBackoff == 0 { - timing.MaxBackoff = time.Minute + if timing.OverallQPS <= 0 { + timing.OverallQPS = float64(rate.Inf) } + if timing.OverallBurst <= 0 { + timing.OverallBurst = math.MaxInt + } + + rateLimiter := workqueue.NewMaxOfRateLimiter( + workqueue.NewItemExponentialFailureRateLimiter(timing.InitialDelay, timing.MaxDelay), + &workqueue.BucketRateLimiter{Limiter: rate.NewLimiter(rate.Limit(timing.OverallQPS), timing.OverallBurst)}, + ) + queue := workqueue.NewNamedRateLimitingQueue(rateLimiter, name) - if workerCount == 0 { + if workerCount <= 0 { workerCount = 1 } - return &asyncWorker{ + + return &asyncWorker[Key]{ + name: name, + keyFunc: keyFunc, reconcile: reconcile, - timing: timing, - deliverer: deliverutil.NewDelayingDeliverer(), - queue: workqueue.New(), - backoff: flowcontrol.NewBackOff(timing.InitialBackoff, timing.MaxBackoff), + queue: queue, workerCount: workerCount, metrics: metrics, - metricTags: metricTags, } } -func (w *asyncWorker) Enqueue(qualifiedName common.QualifiedName) { - w.deliver(qualifiedName, 0, false) +func (w *asyncWorker[Key]) Enqueue(key Key) { + w.queue.Add(key) } -func (w *asyncWorker) EnqueueObject(obj pkgruntime.Object) { - qualifiedName := common.NewQualifiedName(obj) - w.Enqueue(qualifiedName) +func (w *asyncWorker[Key]) EnqueueObject(obj metav1.Object) { + w.Enqueue(w.keyFunc(obj)) } -func (w *asyncWorker) EnqueueForBackoff(qualifiedName common.QualifiedName) { - w.deliver(qualifiedName, 0, true) +func (w *asyncWorker[Key]) EnqueueWithBackoff(key Key) { + w.queue.AddRateLimited(key) } -func (w *asyncWorker) EnqueueWithDelay(qualifiedName common.QualifiedName, delay time.Duration) { - w.deliver(qualifiedName, delay, false) +func (w *asyncWorker[Key]) EnqueueWithDelay(key Key, delay time.Duration) { + w.queue.AddAfter(key, delay) } -func (w *asyncWorker) Run(stopChan <-chan struct{}) { - util.StartBackoffGC(w.backoff, stopChan) - w.deliverer.StartWithHandler(func(item *deliverutil.DelayingDelivererItem) { - w.queue.Add(item.Key) - }) - go w.deliverer.RunMetricLoop(stopChan, 30*time.Second, w.metrics, w.metricTags) - +func (w *asyncWorker[Key]) Run(stopChan <-chan struct{}) { for i := 0; i < w.workerCount; i++ { - go wait.Until(w.worker, w.timing.Interval, stopChan) + go w.worker() } // Ensure all goroutines are cleaned up when the stop channel closes go func() { <-stopChan w.queue.ShutDown() - w.deliverer.Stop() }() } -// deliver adds backoff to delay if backoff is true. Otherwise, it -// resets backoff. -func (w *asyncWorker) deliver(qualifiedName common.QualifiedName, delay time.Duration, backoff bool) { - key := qualifiedName.String() - if backoff { - w.backoff.Next(key, time.Now()) - delay = delay + w.backoff.Get(key) - } else { - w.backoff.Reset(key) +func (w *asyncWorker[Key]) processNextItem() bool { + keyAny, quit := w.queue.Get() + if quit { + return false } - w.deliverer.DeliverAfter(key, &qualifiedName, delay) -} -func (w *asyncWorker) worker() { - for { - obj, quit := w.queue.Get() - if quit { - return - } + key := keyAny.(Key) + result := w.reconcile(key) + w.queue.Done(keyAny) - qualifiedName := common.NewQualifiedFromString(obj.(string)) - result := w.reconcile(qualifiedName) - w.queue.Done(obj) + if result.Backoff { + w.EnqueueWithBackoff(key) + } else { + w.queue.Forget(keyAny) - if result.Backoff { - w.EnqueueForBackoff(qualifiedName) - } else if result.RequeueAfter != nil { - w.EnqueueWithDelay(qualifiedName, *result.RequeueAfter) + if result.RequeueAfter != nil { + w.EnqueueWithDelay(key, *result.RequeueAfter) } } + + return true +} + +func (w *asyncWorker[Key]) worker() { + for w.processNextItem() { + } } From 5917797bd8eb093cbf19dd07eb8c5acb46f6b06b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Gary=20Liu=20=28=E5=88=98=E5=B9=BF=E6=BA=90=29?= Date: Fri, 7 Jul 2023 05:35:44 +0000 Subject: [PATCH 006/173] refactor(deliver): replace DelayingDeliverer with workqueue.DelayingInterface --- pkg/controllers/status/controller.go | 28 +++++++++-------- .../statusaggregator/controller.go | 29 ++++++++++-------- pkg/controllers/sync/controller.go | 30 +++++++++++-------- 3 files changed, 49 insertions(+), 38 deletions(-) diff --git a/pkg/controllers/status/controller.go b/pkg/controllers/status/controller.go index 4a57c1d6..3821ec7a 100644 --- a/pkg/controllers/status/controller.go +++ b/pkg/controllers/status/controller.go @@ -42,6 +42,7 @@ import ( "k8s.io/client-go/rest" "k8s.io/client-go/tools/cache" "k8s.io/client-go/tools/record" + "k8s.io/client-go/util/workqueue" "k8s.io/klog/v2" fedcorev1a1 "github.com/kubewharf/kubeadmiral/pkg/apis/core/v1alpha1" @@ -58,7 +59,6 @@ import ( const ( StatusControllerName = "status-controller" - allClustersKey = "ALL_CLUSTERS" ) const ( @@ -71,7 +71,7 @@ type StatusController struct { // For triggering reconciliation of all target resources. This is // used when a new cluster becomes available. - clusterDeliverer *delayingdeliver.DelayingDeliverer + clusterQueue workqueue.DelayingInterface // Informer for resources in member clusters informer util.FederatedInformer @@ -177,8 +177,8 @@ func newStatusController( delayingdeliver.NewMetricTags("status-worker", typeConfig.GetTargetType().Kind), ) - // Build deliverer for triggering cluster reconciliations. - s.clusterDeliverer = delayingdeliver.NewDelayingDeliverer() + // Build queue for triggering cluster reconciliations. + s.clusterQueue = workqueue.NewNamedDelayingQueue("status-controller-cluster-queue") // Start informers on the resources for the federated type enqueueObj := s.worker.EnqueueObject @@ -214,11 +214,11 @@ func newStatusController( &util.ClusterLifecycleHandlerFuncs{ ClusterAvailable: func(cluster *fedcorev1a1.FederatedCluster) { // When new cluster becomes available process all the target resources again. - s.clusterDeliverer.DeliverAt(allClustersKey, nil, time.Now().Add(s.clusterAvailableDelay)) + s.clusterQueue.AddAfter(struct{}{}, s.clusterAvailableDelay) }, // When a cluster becomes unavailable process all the target resources again. ClusterUnavailable: func(cluster *fedcorev1a1.FederatedCluster, _ []interface{}) { - s.clusterDeliverer.DeliverAt(allClustersKey, nil, time.Now().Add(s.clusterUnavailableDelay)) + s.clusterQueue.AddAfter(struct{}{}, s.clusterUnavailableDelay) }, }, ) @@ -239,14 +239,18 @@ func (s *StatusController) minimizeLatency() { // Run runs the status controller func (s *StatusController) Run(stopChan <-chan struct{}) { - go s.clusterDeliverer.RunMetricLoop(stopChan, 30*time.Second, s.metrics, - delayingdeliver.NewMetricTags("status-clusterDeliverer", s.typeConfig.GetTargetType().Kind)) go s.federatedController.Run(stopChan) go s.statusController.Run(stopChan) s.informer.Start() - s.clusterDeliverer.StartWithHandler(func(_ *delayingdeliver.DelayingDelivererItem) { - s.reconcileOnClusterChange() - }) + go func() { + for { + _, shutdown := s.clusterQueue.Get() + if shutdown { + break + } + s.reconcileOnClusterChange() + } + }() if !cache.WaitForNamedCacheSync(s.name, stopChan, s.HasSynced) { return @@ -258,7 +262,7 @@ func (s *StatusController) Run(stopChan <-chan struct{}) { go func() { <-stopChan s.informer.Stop() - s.clusterDeliverer.Stop() + s.clusterQueue.ShutDown() }() } diff --git a/pkg/controllers/statusaggregator/controller.go b/pkg/controllers/statusaggregator/controller.go index 9f2aaeba..4484e6d7 100644 --- a/pkg/controllers/statusaggregator/controller.go +++ b/pkg/controllers/statusaggregator/controller.go @@ -33,6 +33,7 @@ import ( "k8s.io/client-go/rest" "k8s.io/client-go/tools/cache" "k8s.io/client-go/tools/record" + "k8s.io/client-go/util/workqueue" "k8s.io/klog/v2" fedcorev1a1 "github.com/kubewharf/kubeadmiral/pkg/apis/core/v1alpha1" @@ -51,8 +52,6 @@ import ( const ( ControllerName = "status-aggregator-controller" - allClustersKey = "ALL_CLUSTERS" - EventReasonUpdateSourceObjectStatus = "UpdateSourceObjectStatus" EventReasonUpdateSourceObjectAnnotation = "UpdateSourceObjectAnnotation" ) @@ -80,7 +79,7 @@ type StatusAggregator struct { informer util.FederatedInformer // For triggering reconciliation of all target resources. This is // used when a new cluster becomes available. - clusterDeliverer *delayingdeliver.DelayingDeliverer + clusterQueue workqueue.DelayingInterface clusterAvailableDelay time.Duration clusterUnavailableDelay time.Duration objectEnqueueDelay time.Duration @@ -146,8 +145,8 @@ func newStatusAggregator(controllerConfig *util.ControllerConfig, return nil, err } - // Build deliverer for triggering cluster reconciliations. - a.clusterDeliverer = delayingdeliver.NewDelayingDeliverer() + // Build queue for triggering cluster reconciliations. + a.clusterQueue = workqueue.NewNamedDelayingQueue("status-aggregator-cluster-queue") a.clusterAvailableDelay = controllerConfig.ClusterAvailableDelay a.clusterUnavailableDelay = controllerConfig.ClusterUnavailableDelay a.objectEnqueueDelay = 10 * time.Second @@ -185,11 +184,11 @@ func newStatusAggregator(controllerConfig *util.ControllerConfig, &util.ClusterLifecycleHandlerFuncs{ ClusterAvailable: func(cluster *fedcorev1a1.FederatedCluster) { // When new cluster becomes available process all the target resources again. - a.clusterDeliverer.DeliverAt(allClustersKey, nil, time.Now().Add(a.clusterAvailableDelay)) + a.clusterQueue.AddAfter(struct{}{}, a.clusterAvailableDelay) }, // When a cluster becomes unavailable process all the target resources again. ClusterUnavailable: func(cluster *fedcorev1a1.FederatedCluster, _ []interface{}) { - a.clusterDeliverer.DeliverAt(allClustersKey, nil, time.Now().Add(a.clusterUnavailableDelay)) + a.clusterQueue.AddAfter(struct{}{}, a.clusterUnavailableDelay) }, }, ) @@ -206,11 +205,15 @@ func (a *StatusAggregator) Run(stopChan <-chan struct{}) { go a.sourceController.Run(stopChan) go a.federatedController.Run(stopChan) a.informer.Start() - a.clusterDeliverer.StartWithHandler(func(_ *delayingdeliver.DelayingDelivererItem) { - a.reconcileOnClusterChange() - }) - go a.clusterDeliverer.RunMetricLoop(stopChan, 30*time.Second, a.metrics, - delayingdeliver.NewMetricTags("schedulingpreference-clusterDeliverer", a.typeConfig.GetTargetType().Kind)) + go func() { + for { + _, shutdown := a.clusterQueue.Get() + if shutdown { + break + } + a.reconcileOnClusterChange() + } + }() if !cache.WaitForNamedCacheSync(a.name, stopChan, a.HasSynced) { return } @@ -225,7 +228,7 @@ func (a *StatusAggregator) Run(stopChan <-chan struct{}) { }() <-stopChan a.informer.Stop() - a.clusterDeliverer.Stop() + a.clusterQueue.ShutDown() }() } diff --git a/pkg/controllers/sync/controller.go b/pkg/controllers/sync/controller.go index ceff480e..2c81b6b8 100644 --- a/pkg/controllers/sync/controller.go +++ b/pkg/controllers/sync/controller.go @@ -43,6 +43,7 @@ import ( "k8s.io/client-go/tools/cache" "k8s.io/client-go/tools/record" "k8s.io/client-go/util/retry" + "k8s.io/client-go/util/workqueue" "k8s.io/klog/v2" runtimeclient "sigs.k8s.io/controller-runtime/pkg/client" @@ -54,7 +55,6 @@ import ( "github.com/kubewharf/kubeadmiral/pkg/controllers/sync/status" "github.com/kubewharf/kubeadmiral/pkg/controllers/util" annotationutil "github.com/kubewharf/kubeadmiral/pkg/controllers/util/annotation" - deliverutil "github.com/kubewharf/kubeadmiral/pkg/controllers/util/delayingdeliver" "github.com/kubewharf/kubeadmiral/pkg/controllers/util/eventsink" finalizersutil "github.com/kubewharf/kubeadmiral/pkg/controllers/util/finalizers" "github.com/kubewharf/kubeadmiral/pkg/controllers/util/history" @@ -67,7 +67,6 @@ import ( ) const ( - allClustersKey = "ALL_CLUSTERS" EventReasonWaitForCascadingDelete = "WaitForCascadingDelete" EventReasonWaitForCascadingDeleteError = "WaitForCascadingDeleteError" SyncControllerName = "sync-controller" @@ -95,7 +94,7 @@ type SyncController struct { // For triggering reconciliation of all target resources. This is // used when a new cluster becomes available. - clusterDeliverer *deliverutil.DelayingDeliverer + clusterQueue workqueue.DelayingInterface // Informer for resources in member clusters informer util.FederatedInformer @@ -223,11 +222,12 @@ func newSyncController( deliverutil.NewMetricTags("sync-worker", typeConfig.GetTargetType().Kind), ) + // TODO: do we need both clusterWorker and clusterQueue? s.clusterWorker = worker.NewReconcileWorker(s.reconcileCluster, worker.RateLimiterOptions{}, 1, controllerConfig.Metrics, deliverutil.NewMetricTags("sync-cluster-worker", typeConfig.GetTargetType().Kind)) - // Build deliverer for triggering cluster reconciliations. - s.clusterDeliverer = deliverutil.NewDelayingDeliverer() + // Build queue for triggering cluster reconciliations. + s.clusterQueue = workqueue.NewNamedDelayingQueue("sync-controller-cluster-queue") targetAPIResource := typeConfig.GetTargetType() @@ -245,12 +245,12 @@ func newSyncController( ClusterAvailable: func(cluster *fedcorev1a1.FederatedCluster) { // When new cluster becomes available process all the target resources again. s.clusterWorker.EnqueueObject(cluster) - s.clusterDeliverer.DeliverAt(allClustersKey, nil, time.Now().Add(s.clusterAvailableDelay)) + s.clusterQueue.AddAfter(struct{}{}, s.clusterAvailableDelay) }, // When a cluster becomes unavailable process all the target resources again. ClusterUnavailable: func(cluster *fedcorev1a1.FederatedCluster, _ []interface{}) { s.clusterWorker.EnqueueObject(cluster) - s.clusterDeliverer.DeliverAt(allClustersKey, nil, time.Now().Add(s.clusterUnavailableDelay)) + s.clusterQueue.AddAfter(struct{}{}, s.clusterUnavailableDelay) }, }, ) @@ -287,11 +287,15 @@ func (s *SyncController) minimizeLatency() { func (s *SyncController) Run(stopChan <-chan struct{}) { s.fedAccessor.Run(stopChan) s.informer.Start() - s.clusterDeliverer.StartWithHandler(func(_ *deliverutil.DelayingDelivererItem) { - s.reconcileOnClusterChange() - }) - go s.clusterDeliverer.RunMetricLoop(stopChan, 30*time.Second, s.metrics, - deliverutil.NewMetricTags("sync-clusterDeliverer", s.typeConfig.GetTargetType().Kind)) + go func() { + for { + _, shutdown := s.clusterQueue.Get() + if shutdown { + break + } + s.reconcileOnClusterChange() + } + }() if !cache.WaitForNamedCacheSync(s.name, stopChan, s.HasSynced) { return @@ -304,7 +308,7 @@ func (s *SyncController) Run(stopChan <-chan struct{}) { go func() { <-stopChan s.informer.Stop() - s.clusterDeliverer.Stop() + s.clusterQueue.ShutDown() }() } From 48d241eb3d6ee2e5f57f9c401df771a5caf1ebb6 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Gary=20Liu=20=28=E5=88=98=E5=B9=BF=E6=BA=90=29?= Date: Fri, 7 Jul 2023 05:39:37 +0000 Subject: [PATCH 007/173] refactor(delayingdeliver): remove delayingdeliver and unbounded queue --- pkg/controllers/util/backoff.go | 40 --- .../delayingdeliver/delaying_deliverer.go | 223 ---------------- .../delaying_deliverer_test.go | 67 ----- .../util/unboundedqueue/unbounded_queue.go | 241 ------------------ .../unboundedqueue/unbounded_queue_test.go | 57 ----- 5 files changed, 628 deletions(-) delete mode 100644 pkg/controllers/util/backoff.go delete mode 100644 pkg/controllers/util/delayingdeliver/delaying_deliverer.go delete mode 100644 pkg/controllers/util/delayingdeliver/delaying_deliverer_test.go delete mode 100644 pkg/controllers/util/unboundedqueue/unbounded_queue.go delete mode 100644 pkg/controllers/util/unboundedqueue/unbounded_queue_test.go diff --git a/pkg/controllers/util/backoff.go b/pkg/controllers/util/backoff.go deleted file mode 100644 index ca235816..00000000 --- a/pkg/controllers/util/backoff.go +++ /dev/null @@ -1,40 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. - -This file may have been modified by The KubeAdmiral Authors -("KubeAdmiral Modifications"). All KubeAdmiral Modifications -are Copyright 2023 The KubeAdmiral Authors. -*/ - -package util - -import ( - "time" - - "k8s.io/client-go/util/flowcontrol" -) - -func StartBackoffGC(backoff *flowcontrol.Backoff, stopCh <-chan struct{}) { - go func() { - for { - select { - case <-time.After(time.Minute): - backoff.GC() - case <-stopCh: - return - } - } - }() -} diff --git a/pkg/controllers/util/delayingdeliver/delaying_deliverer.go b/pkg/controllers/util/delayingdeliver/delaying_deliverer.go deleted file mode 100644 index 7b6cd657..00000000 --- a/pkg/controllers/util/delayingdeliver/delaying_deliverer.go +++ /dev/null @@ -1,223 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. - -This file may have been modified by The KubeAdmiral Authors -("KubeAdmiral Modifications"). All KubeAdmiral Modifications -are Copyright 2023 The KubeAdmiral Authors. -*/ - -package delayingdeliver - -import ( - "container/heap" - "time" - - "github.com/kubewharf/kubeadmiral/pkg/controllers/util/unboundedqueue" - "github.com/kubewharf/kubeadmiral/pkg/stats" -) - -const ( - // TODO: Investigate what capacity is right. - delayingDelivererUpdateChanCapacity = 10000 -) - -// DelayingDelivererItem is structure delivered by DelayingDeliverer to the -// target channel. -type DelayingDelivererItem struct { - // Key under which the value was added to deliverer. - Key string - // Value of the item. - Value interface{} - // When the item should be delivered. - DeliveryTime time.Time -} - -type delivererHeap struct { - keyPosition map[string]int - data []*DelayingDelivererItem -} - -// Functions required by container.Heap. - -func (dh *delivererHeap) Len() int { return len(dh.data) } - -func (dh *delivererHeap) Less(i, j int) bool { - return dh.data[i].DeliveryTime.Before(dh.data[j].DeliveryTime) -} - -func (dh *delivererHeap) Swap(i, j int) { - dh.keyPosition[dh.data[i].Key] = j - dh.keyPosition[dh.data[j].Key] = i - dh.data[i], dh.data[j] = dh.data[j], dh.data[i] -} - -func (dh *delivererHeap) Push(x interface{}) { - item := x.(*DelayingDelivererItem) - dh.data = append(dh.data, item) - dh.keyPosition[item.Key] = len(dh.data) - 1 -} - -func (dh *delivererHeap) Pop() interface{} { - n := len(dh.data) - item := dh.data[n-1] - dh.data = dh.data[:n-1] - delete(dh.keyPosition, item.Key) - return item -} - -// A structure that pushes the items to the target channel at a given time. -type DelayingDeliverer struct { - // Channel to deliver the data when their time comes. - targetChannel chan *DelayingDelivererItem - // Store for data - heap *delivererHeap - // Channel to feed the main goroutine with updates. - updateQueue *unboundedqueue.UnboundedQueue - // To stop the main goroutine. - stopChannel chan struct{} -} - -func NewDelayingDeliverer() *DelayingDeliverer { - return NewDelayingDelivererWithChannel(make(chan *DelayingDelivererItem, 100)) -} - -func NewDelayingDelivererWithChannel(targetChannel chan *DelayingDelivererItem) *DelayingDeliverer { - return &DelayingDeliverer{ - targetChannel: targetChannel, - heap: &delivererHeap{ - keyPosition: make(map[string]int), - data: make([]*DelayingDelivererItem, 0), - }, - updateQueue: unboundedqueue.New(delayingDelivererUpdateChanCapacity), - stopChannel: make(chan struct{}), - } -} - -type MetricTags struct { - controller string - kind string -} - -func NewMetricTags(controller string, kind string) MetricTags { - if kind == "" { - kind = "nil" - } - - return MetricTags{controller, kind} -} - -func (d *DelayingDeliverer) RunMetricLoop( - stopCh <-chan struct{}, - interval time.Duration, - metrics stats.Metrics, - metricTags MetricTags, -) { - tags := []stats.Tag{ - {Name: "controller", Value: metricTags.controller}, - {Name: "kind", Value: metricTags.kind}, - } - - d.updateQueue.RunMetricLoop(stopCh, interval, func(md unboundedqueue.MetricData) { - metrics.Store("delayingDeliverer.queueLength", md.MaxLength, tags...) - metrics.Store("delayingDeliverer.queueCapacity", md.Capacity, tags...) - }) -} - -// Deliver all items due before or equal to timestamp. -func (d *DelayingDeliverer) deliver(timestamp time.Time) { - for d.heap.Len() > 0 { - if timestamp.Before(d.heap.data[0].DeliveryTime) { - return - } - item := heap.Pop(d.heap).(*DelayingDelivererItem) - d.targetChannel <- item - } -} - -func (d *DelayingDeliverer) run() { - for { - now := time.Now() - d.deliver(now) - - nextWakeUp := now.Add(time.Hour) - if d.heap.Len() > 0 { - nextWakeUp = d.heap.data[0].DeliveryTime - } - sleepTime := nextWakeUp.Sub(now) - - select { - case <-time.After(sleepTime): - break // just wake up and process the data - case anyItem := <-d.updateQueue.Receiver(): - item := anyItem.(*DelayingDelivererItem) - if position, found := d.heap.keyPosition[item.Key]; found { - if item.DeliveryTime.Before(d.heap.data[position].DeliveryTime) { - d.heap.data[position] = item - heap.Fix(d.heap, position) - } - // Ignore if later. - } else { - heap.Push(d.heap, item) - } - case <-d.stopChannel: - d.updateQueue.Close() - return - } - } -} - -// Starts the DelayingDeliverer. -func (d *DelayingDeliverer) Start() { - go d.run() -} - -// Stops the DelayingDeliverer. Undelivered items are discarded. -func (d *DelayingDeliverer) Stop() { - close(d.stopChannel) -} - -// Delivers value at the given time. -func (d *DelayingDeliverer) DeliverAt(key string, value interface{}, deliveryTime time.Time) { - d.updateQueue.Send(&DelayingDelivererItem{ - Key: key, - Value: value, - DeliveryTime: deliveryTime, - }) -} - -// Delivers value after the given delay. -func (d *DelayingDeliverer) DeliverAfter(key string, value interface{}, delay time.Duration) { - d.DeliverAt(key, value, time.Now().Add(delay)) -} - -// Gets target channel of the deliverer. -func (d *DelayingDeliverer) GetTargetChannel() chan *DelayingDelivererItem { - return d.targetChannel -} - -// Starts Delaying deliverer with a handler listening on the target channel. -func (d *DelayingDeliverer) StartWithHandler(handler func(*DelayingDelivererItem)) { - go func() { - for { - select { - case item := <-d.targetChannel: - handler(item) - case <-d.stopChannel: - return - } - } - }() - d.Start() -} diff --git a/pkg/controllers/util/delayingdeliver/delaying_deliverer_test.go b/pkg/controllers/util/delayingdeliver/delaying_deliverer_test.go deleted file mode 100644 index c189b3b4..00000000 --- a/pkg/controllers/util/delayingdeliver/delaying_deliverer_test.go +++ /dev/null @@ -1,67 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. - -This file may have been modified by The KubeAdmiral Authors -("KubeAdmiral Modifications"). All KubeAdmiral Modifications -are Copyright 2023 The KubeAdmiral Authors. -*/ - -package delayingdeliver - -import ( - "testing" - "time" - - "github.com/stretchr/testify/assert" -) - -func TestDelayingDeliverer(t *testing.T) { - targetChannel := make(chan *DelayingDelivererItem) - now := time.Now() - d := NewDelayingDelivererWithChannel(targetChannel) - d.Start() - defer d.Stop() - startupDelay := time.Second - d.DeliverAt("a", "aaa", now.Add(startupDelay+2*time.Millisecond)) - d.DeliverAt("b", "bbb", now.Add(startupDelay+3*time.Millisecond)) - d.DeliverAt("c", "ccc", now.Add(startupDelay+1*time.Millisecond)) - d.DeliverAt("e", "eee", now.Add(time.Hour)) - d.DeliverAt("e", "eee", now) - - d.DeliverAt("d", "ddd", now.Add(time.Hour)) - - i0 := <-targetChannel - assert.Equal(t, "e", i0.Key) - assert.Equal(t, "eee", i0.Value.(string)) - assert.Equal(t, now, i0.DeliveryTime) - - i1 := <-targetChannel - received1 := time.Now() - assert.True(t, received1.Sub(now).Nanoseconds() > startupDelay.Nanoseconds()) - assert.Equal(t, "c", i1.Key) - - i2 := <-targetChannel - assert.Equal(t, "a", i2.Key) - - i3 := <-targetChannel - assert.Equal(t, "b", i3.Key) - - select { - case <-targetChannel: - t.Fatalf("Nothing should be received") - case <-time.After(time.Second): - // Ok. Expected - } -} diff --git a/pkg/controllers/util/unboundedqueue/unbounded_queue.go b/pkg/controllers/util/unboundedqueue/unbounded_queue.go deleted file mode 100644 index 3bbdf6a5..00000000 --- a/pkg/controllers/util/unboundedqueue/unbounded_queue.go +++ /dev/null @@ -1,241 +0,0 @@ -/* -Copyright 2023 The KubeAdmiral Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package unboundedqueue - -import ( - "sync" - "time" - - "k8s.io/apimachinery/pkg/util/runtime" -) - -// UnboundedQueue is an unbounded channel. -type UnboundedQueue struct { - deque *deque - notifier chan<- struct{} - receiver <-chan interface{} - stopCh chan<- struct{} -} - -// Creates a new UnboundedQueue with the specified initial capacity. -func New(initialCapacity int) *UnboundedQueue { - deque := newDeque(initialCapacity) - notifier := make(chan struct{}, 1) - receiver := make(chan interface{}) - stopCh := make(chan struct{}, 1) - - go receiverLoop(deque, notifier, receiver, stopCh) - - return &UnboundedQueue{ - deque: deque, - notifier: notifier, - receiver: receiver, - stopCh: stopCh, - } -} - -// Receiver returns the channel that can be used for receiving from this UnboundedQueue. -func (uq *UnboundedQueue) Receiver() <-chan interface{} { - return uq.receiver -} - -type MetricData struct { - // Length is the current length of the queue. - Length int - // MaxLength is the maximum length of the queue since the last sample. - MaxLength int - // Capacity is the current capacity of the queue. - Capacity int -} - -func (uq *UnboundedQueue) RunMetricLoop(stopCh <-chan struct{}, interval time.Duration, sender func(MetricData)) { - defer runtime.HandleCrash() - - for { - select { - case <-stopCh: - return - case <-time.After(interval): - metricData := uq.deque.getMetricData() - sender(metricData) - } - } -} - -func (uq *UnboundedQueue) Close() { - uq.stopCh <- struct{}{} -} - -// Sends an item to the queue. -// -// Since the channel capacity is unbounded, send operations always succeed and never block. -func (uq *UnboundedQueue) Send(obj interface{}) { - uq.deque.pushBack(obj) - - select { - case uq.notifier <- struct{}{}: - // notified the receiver goroutine - default: - // no goroutine is receiving, but notifier is already nonempty anyway - } -} - -func receiverLoop(deque *deque, notifier <-chan struct{}, receiver chan<- interface{}, stopCh <-chan struct{}) { - defer runtime.HandleCrash() - - for { - item := deque.popFront() - - if item != nil { - select { - case <-stopCh: - // queue stopped, close the receiver - close(receiver) - return - case receiver <- item: - // since notifier only has one buffer signal, there may be multiple actual items. - // therefore, we call `popFront` again without waiting for the notifier. - continue - } - } - - // item is nil, we have to wait - // since there is one buffer signal in notifier, - // even if a new item is sent on this line, - // notifier will still receive something. - - select { - case <-stopCh: - // queue stopped, close the receiver - close(receiver) - return - case <-notifier: - // deque has been updated - } - } -} - -// deque is a typical double-ended queue implemented through a ring buffer. -// -// All operations on deque locks on its own mutex, so all operations are concurrency-safe. -type deque struct { - data []interface{} - start int - end int - maxLength int - lock sync.Mutex -} - -// newDeque constructs a new deque with the specified initial capacity. -// -// deque capacity is doubled when the length reaches the capacity, -// i.e. a deque can never be full. -func newDeque(initialCapacity int) *deque { - return &deque{ - data: make([]interface{}, initialCapacity, initialCapacity), - start: 0, - end: 0, - } -} - -// pushBack pushes an object to the end of the queue. -// -// This method has amortized O(1) time complexity and expands the capacity on demand. -func (q *deque) pushBack(obj interface{}) { - q.lock.Lock() - defer q.lock.Unlock() - - q.data[q.end] = obj - q.end = (q.end + 1) % len(q.data) - - if q.end == q.start { - // when deque is unlocked, q.end == q.start implies empty deque. - // therefore, we need to expand it now. - - newData := make([]interface{}, len(q.data)*2, len(q.data)*2) - - for i := q.start; i < len(q.data); i++ { - newData[i-q.start] = q.data[i] - } - - leftOffset := len(q.data) - q.start - for i := 0; i < q.end; i++ { - newData[leftOffset+i] = q.data[i] - } - - q.start = 0 - q.end = len(q.data) - - q.data = newData - } - - length := q.lockedGetLength() - if q.maxLength < length { - q.maxLength = length - } -} - -// popFront pops an object from the start of the queue. -// -// This method has O(1) time complexity. -func (q *deque) popFront() interface{} { - q.lock.Lock() - defer q.lock.Unlock() - - if q.start == q.end { - // we assume the deque is in sound state, - // i.e. q.start == q.end implies empty queue. - return nil - } - - ret := q.data[q.start] - - // we need to unset this pointer to allow GC - q.data[q.start] = nil - - q.start = (q.start + 1) % len(q.data) - - return ret -} - -func (q *deque) getMetricData() MetricData { - q.lock.Lock() - defer q.lock.Unlock() - - length := q.lockedGetLength() - - maxLength := q.maxLength - q.maxLength = 0 - - capacity := len(q.data) - - return MetricData{ - Length: length, - MaxLength: maxLength, - Capacity: capacity, - } -} - -func (q *deque) lockedGetLength() int { - if q.start <= q.end { - return q.end - q.start - } - - front := len(q.data) - q.start - back := q.end - return front + back -} diff --git a/pkg/controllers/util/unboundedqueue/unbounded_queue_test.go b/pkg/controllers/util/unboundedqueue/unbounded_queue_test.go deleted file mode 100644 index c46d8b0e..00000000 --- a/pkg/controllers/util/unboundedqueue/unbounded_queue_test.go +++ /dev/null @@ -1,57 +0,0 @@ -/* -Copyright 2023 The KubeAdmiral Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package unboundedqueue_test - -import ( - "testing" - - "github.com/stretchr/testify/assert" - - "github.com/kubewharf/kubeadmiral/pkg/controllers/util/unboundedqueue" -) - -func TestOverflow(t *testing.T) { - assert := assert.New(t) - - uq := unboundedqueue.New(3) - recv := []interface{}{} - - uq.Send(int(1)) - recv = append(recv, <-uq.Receiver()) - - uq.Send(int(2)) - uq.Send(int(3)) - uq.Send(int(4)) - recv = append(recv, <-uq.Receiver()) - recv = append(recv, <-uq.Receiver()) - recv = append(recv, <-uq.Receiver()) - - uq.Send(int(5)) - uq.Send(int(6)) - uq.Send(int(7)) - uq.Send(int(8)) - uq.Send(int(9)) - recv = append(recv, <-uq.Receiver()) - recv = append(recv, <-uq.Receiver()) - recv = append(recv, <-uq.Receiver()) - recv = append(recv, <-uq.Receiver()) - recv = append(recv, <-uq.Receiver()) - - for i, v := range recv { - assert.Equal(i, v.(int)-1) - } -} From 8855579fa3471b5f059e3d5a02b38e762e3eb521 Mon Sep 17 00:00:00 2001 From: "hawjia.lim" Date: Wed, 5 Jul 2023 19:35:45 +0800 Subject: [PATCH 008/173] feat: InformerManager and FederatedInformerManager --- pkg/util/informermanager/interface.go | 89 +++++++++++++++++++++++++++ 1 file changed, 89 insertions(+) create mode 100644 pkg/util/informermanager/interface.go diff --git a/pkg/util/informermanager/interface.go b/pkg/util/informermanager/interface.go new file mode 100644 index 00000000..52bf5d4d --- /dev/null +++ b/pkg/util/informermanager/interface.go @@ -0,0 +1,89 @@ +package informermanager + +import ( + "context" + + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/client-go/dynamic" + "k8s.io/client-go/tools/cache" + + fedcorev1a1 "github.com/kubewharf/kubeadmiral/pkg/apis/core/v1alpha1" + fedcorev1a1listers "github.com/kubewharf/kubeadmiral/pkg/client/listers/core/v1alpha1" +) + +// EventHandlerGenerator is used by InformerManger and FederatedInformerManager to generate and register +// ResourceEventHandlers for each FTC's source type informer. +type EventHandlerGenerator struct { + // Predicate is called for each FTC add or update event. Predicate should return True only if a new + // ResoureEventHandler should be generated and registered for the given FTC. Previous event handlers registered for + // this EventHandlerGenerator will also be removed. + // + // Note: we should be cautious about registering new ResourceEventHandler as it will receive synthetic add events + // for every object in the informer's cache. + Predicate func(oldFTC, newFTC *fedcorev1a1.FederatedTypeConfig) bool + // Generator is used to generate a ResourceEventHandler for the given FTC. If nil is returned, no + // ResourceEventHandler will be registered. + Generator func(ftc *fedcorev1a1.FederatedTypeConfig) cache.ResourceEventHandler +} + +// InformerManager provides an interface for controllers that need to dynamically register event handlers and access +// objects based on FederatedTypeConfigs. InformerManager will listen to FTC events and maintain informers for the +// source type of each FTC. +type InformerManager interface { + // Adds an EventHandler used to generate and register ResourceEventHandlers for each FTC's source type informer. + AddEventHandlerGenerator(generator EventHandlerGenerator) error + // Returns a lister for the given GroupResourceVersion if it exists. The lister for each FTC's source type will + // eventually exist. + GetResourceLister(gvr schema.GroupVersionResource) (lister cache.GenericLister, informerSynced cache.InformerSynced, exists bool) + + // Returns the FederatedTypeConfig lister used by the InformerManager. + GetFederatedTypeConfigLister() fedcorev1a1listers.FederatedTypeConfigLister + // Returns true if the InformerManager's view of FederatedTypeConfigs is synced. + HasSynced() bool + + // Starts processing FederatedTypeConfig events. + Start(ctx context.Context) +} + +// ClusterEventHandler can be registered by controllers to hook into the cluster events received by the +// FederatedInformerManager. +type ClusterEventHandler struct { + // ClusterEventPredicate is called for each FederatedCluster event and determines if the callback of this + // ClusterEventHandler should be called for the the given event. + Predicate ClusterEventPredicate + // Callback is a function that accepts a FederatedCluster object. + Callback func(cluster *fedcorev1a1.FederatedCluster) +} + +// ClusterEventPredicate determines if a callback should be called for a given cluster event. +type ClusterEventPredicate func(oldCluster, newCluster *fedcorev1a1.FederatedCluster) bool + +// FederatedInformerManager provides an interface for controllers that need to dynamically register event handlers and +// access objects in member clusters based on FederatedTypeConfigs. FederatedInformerManager will listen to FTC events +// and maintian informers for each FTC's source type and joined member cluster. +type FederatedInformerManager interface { + // Adds an EventHandler used to generate and register ResourceEventHandlers for each FTC's source type informer. + AddEventHandlerGenerator(generator EventHandlerGenerator) error + // Returns a lister for the given GroupResourceVersion and cluster if it exists. The lister for each FTC's source + // type and cluster will eventually exist. + GetResourceLister( + gvr schema.GroupVersionResource, + cluster string, + ) (lister cache.GenericLister, informerSynced cache.InformerSynced, exists bool) + // Returns a client for the given cluster if it exists. The client for each cluster will eventually exist. + GetClusterClient(cluster string) (dynamic.Interface, bool) + + // Returns the FederatedTypeConfig lister used by the FederatedInformerManager. + GetFederatedTypeConfigLister() fedcorev1a1listers.FederatedTypeConfigLister + // Returns the FederatedCluster lister used by the FederatedInformerManager. + GetFederatedClusterLister() fedcorev1a1listers.FederatedClusterLister + // Returns true if the FederatedInformerManager's view of FederatedTypeConfigs and FederatedClusters is synced. + HasSynced() bool + + // Adds a ClusterEventHandler that can be used by controllers to hook into the cluster events received by the + // FederatedInformerManager. + AddClusterEventHandler(handler ClusterEventHandler) error + + // Starts processing FederatedTypeConfig and FederatedCluster events. + Start(ctx context.Context) +} From 29139757758e55ce023b3451959f27115255d6e6 Mon Sep 17 00:00:00 2001 From: "hawjia.lim" Date: Thu, 6 Jul 2023 18:33:15 +0800 Subject: [PATCH 009/173] implement informermanager and bootstrap unit tests --- pkg/controllers/common/constants.go | 18 + .../federatedinformermanager.go | 53 ++ pkg/util/informermanager/informermanager.go | 256 ++++++ .../informermanager/informermanager_test.go | 775 ++++++++++++++++++ pkg/util/informermanager/interface.go | 30 +- 5 files changed, 1121 insertions(+), 11 deletions(-) create mode 100644 pkg/util/informermanager/federatedinformermanager.go create mode 100644 pkg/util/informermanager/informermanager.go create mode 100644 pkg/util/informermanager/informermanager_test.go diff --git a/pkg/controllers/common/constants.go b/pkg/controllers/common/constants.go index b278b853..810817e5 100644 --- a/pkg/controllers/common/constants.go +++ b/pkg/controllers/common/constants.go @@ -166,6 +166,24 @@ var DeploymentGVR = schema.GroupVersionResource{ Resource: "deployments", } +var ConfigMapGVR = schema.GroupVersionResource{ + Group: "", + Version: "v1", + Resource: "configmaps", +} + +var SecretGVR = schema.GroupVersionResource{ + Group: "", + Version: "v1", + Resource: "secrets", +} + +var DaemonSetGVR = schema.GroupVersionResource{ + Group: "apps", + Version: "v1", + Resource: "daemonsets", +} + // MaxFederatedObjectNameLength defines the max length of a federated object name. // A custom resource name must be a DNS subdomain as defined in RFC1123 with a maximum length of 253. // For more information about the custom resource validator, please refer to diff --git a/pkg/util/informermanager/federatedinformermanager.go b/pkg/util/informermanager/federatedinformermanager.go new file mode 100644 index 00000000..a19d2de4 --- /dev/null +++ b/pkg/util/informermanager/federatedinformermanager.go @@ -0,0 +1,53 @@ +package informermanager + +import ( + "context" + "github.com/kubewharf/kubeadmiral/pkg/client/listers/core/v1alpha1" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/client-go/dynamic" + "k8s.io/client-go/tools/cache" +) + +type federatedInformerManager struct{} + +// AddClusterEventHandler implements FederatedInformerManager. +func (*federatedInformerManager) AddClusterEventHandler(handler ClusterEventHandler) error { + panic("unimplemented") +} + +// AddEventHandlerGenerator implements FederatedInformerManager. +func (*federatedInformerManager) AddEventHandlerGenerator(generator *EventHandlerGenerator) error { + panic("unimplemented") +} + +// GetClusterClient implements FederatedInformerManager. +func (*federatedInformerManager) GetClusterClient(cluster string) (dynamic.Interface, bool) { + panic("unimplemented") +} + +// GetFederatedClusterLister implements FederatedInformerManager. +func (*federatedInformerManager) GetFederatedClusterLister() v1alpha1.FederatedClusterLister { + panic("unimplemented") +} + +// GetFederatedTypeConfigLister implements FederatedInformerManager. +func (*federatedInformerManager) GetFederatedTypeConfigLister() v1alpha1.FederatedTypeConfigLister { + panic("unimplemented") +} + +// GetResourceLister implements FederatedInformerManager. +func (*federatedInformerManager) GetResourceLister(gvr schema.GroupVersionResource, cluster string) (lister cache.GenericLister, informerSynced cache.InformerSynced, exists bool) { + panic("unimplemented") +} + +// HasSynced implements FederatedInformerManager. +func (*federatedInformerManager) HasSynced() bool { + panic("unimplemented") +} + +// Start implements FederatedInformerManager. +func (*federatedInformerManager) Start(ctx context.Context) { + panic("unimplemented") +} + +var _ FederatedInformerManager = &federatedInformerManager{} diff --git a/pkg/util/informermanager/informermanager.go b/pkg/util/informermanager/informermanager.go new file mode 100644 index 00000000..e07d9e36 --- /dev/null +++ b/pkg/util/informermanager/informermanager.go @@ -0,0 +1,256 @@ +package informermanager + +import ( + "context" + "fmt" + "sync" + + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/client-go/dynamic" + "k8s.io/client-go/dynamic/dynamicinformer" + "k8s.io/client-go/informers" + "k8s.io/client-go/tools/cache" + "k8s.io/client-go/util/workqueue" + "k8s.io/klog/v2" + + fedcorev1a1 "github.com/kubewharf/kubeadmiral/pkg/apis/core/v1alpha1" + fedcorev1a1informers "github.com/kubewharf/kubeadmiral/pkg/client/informers/externalversions/core/v1alpha1" + "github.com/kubewharf/kubeadmiral/pkg/client/listers/core/v1alpha1" + schemautil "github.com/kubewharf/kubeadmiral/pkg/controllers/util/schema" +) + +type informerManager struct { + lock sync.RWMutex + + started bool + + client dynamic.Interface + ftcInformer fedcorev1a1informers.FederatedTypeConfigInformer + + eventHandlerGenerators []*EventHandlerGenerator + + gvrMapping map[schema.GroupVersionResource]string + + informers map[string]informers.GenericInformer + informerStopChs map[string]chan struct{} + eventHandlerRegistrations map[string]map[*EventHandlerGenerator]cache.ResourceEventHandlerRegistration + + queue workqueue.Interface + logger klog.Logger +} + +func NewInformerManager(client dynamic.Interface, ftcInformer fedcorev1a1informers.FederatedTypeConfigInformer) InformerManager { + manager := &informerManager{ + lock: sync.RWMutex{}, + started: false, + client: client, + ftcInformer: ftcInformer, + eventHandlerGenerators: []*EventHandlerGenerator{}, + gvrMapping: map[schema.GroupVersionResource]string{}, + informers: map[string]informers.GenericInformer{}, + informerStopChs: map[string]chan struct{}{}, + eventHandlerRegistrations: map[string]map[*EventHandlerGenerator]cache.ResourceEventHandlerRegistration{}, + queue: workqueue.NewRateLimitingQueue(workqueue.DefaultItemBasedRateLimiter()), + logger: klog.LoggerWithName(klog.Background(), "informer-manager"), + } + + ftcInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{ + AddFunc: func(obj interface{}) { manager.enqueue(obj) }, + UpdateFunc: func(_ interface{}, obj interface{}) { manager.enqueue(obj) }, + DeleteFunc: func(obj interface{}) { manager.enqueue(obj) }, + }) + + return manager +} + +func (m *informerManager) enqueue(obj interface{}) { + key, err := cache.DeletionHandlingMetaNamespaceKeyFunc(obj) + if err != nil { + m.logger.Error(err, "Failed to enqueue FederatedTypeConfig") + return + } + m.queue.Add(key) +} + +func (m *informerManager) worker() { + key, shutdown := m.queue.Get() + if shutdown { + return + } + defer m.queue.Done(key) + + _, name, err := cache.SplitMetaNamespaceKey(key.(string)) + if err != nil { + m.logger.Error(err, "Failed to process FederatedTypeConfig") + return + } + + ftc, err := m.ftcInformer.Lister().Get(name) + if apierrors.IsNotFound(err) { + if err := m.processFTCDeletion(name); err != nil { + m.logger.Error(err, "Failed to process FederatedTypeConfig, will retry") + m.queue.Add(key) + return + } + return + } + if err != nil { + m.logger.Error(err, "Failed to process FederatedTypeConfig, will retry") + m.queue.Add(key) + return + } + + if err := m.processFTC(ftc); err != nil { + m.logger.Error(err, "Failed to process FederatedTypeConfig, will retry") + m.queue.Add(key) + } +} + +func (m *informerManager) processFTC(ftc *fedcorev1a1.FederatedTypeConfig) error { + m.lock.Lock() + defer m.lock.Unlock() + + ftcName := ftc.Name + apiResource := ftc.GetSourceType() + gvr := schemautil.APIResourceToGVR(&apiResource) + + m.gvrMapping[gvr] = ftcName + + informer, ok := m.informers[ftcName] + if !ok { + informer = dynamicinformer.NewFilteredDynamicInformer( + m.client, + gvr, + metav1.NamespaceAll, + 0, + cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, + nil, + ) + stopCh := make(chan struct{}) + go informer.Informer().Run(stopCh) + + m.informers[ftcName] = informer + m.informerStopChs[ftcName] = stopCh + m.eventHandlerRegistrations[ftcName] = map[*EventHandlerGenerator]cache.ResourceEventHandlerRegistration{} + } + + registrations := m.eventHandlerRegistrations[ftcName] + + for _, generator := range m.eventHandlerGenerators { + shouldRegister := generator.Predicate(ftc) + oldRegistration, oldRegistrationExists := registrations[generator] + + switch { + case !shouldRegister && oldRegistrationExists: + if err := informer.Informer().RemoveEventHandler(oldRegistration); err != nil { + return fmt.Errorf("failed to unregister event handler: %w", err) + } + delete(registrations, generator) + + case shouldRegister && !oldRegistrationExists: + handler := generator.Generator(ftc) + newRegistration, err := informer.Informer().AddEventHandler(handler) + if err != nil { + return fmt.Errorf("failed to register event handler: %w", err) + } + registrations[generator] = newRegistration + } + } + + return nil +} + +func (m *informerManager) processFTCDeletion(ftcName string) error { + m.lock.Lock() + defer m.lock.Unlock() + + stopCh, ok := m.informerStopChs[ftcName] + if !ok { + return nil + } + + close(stopCh) + delete(m.informers, ftcName) + delete(m.informerStopChs, ftcName) + delete(m.eventHandlerRegistrations, ftcName) + + for gvr, ftc := range m.gvrMapping { + if ftc == ftcName { + delete(m.gvrMapping, gvr) + } + } + + return nil +} + +func (m *informerManager) AddEventHandlerGenerator(generator *EventHandlerGenerator) error { + m.lock.Lock() + defer m.lock.Unlock() + + if m.started { + return fmt.Errorf("InformerManager is already started.") + } + + m.eventHandlerGenerators = append(m.eventHandlerGenerators, generator) + return nil +} + +func (m *informerManager) GetFederatedTypeConfigLister() v1alpha1.FederatedTypeConfigLister { + return m.ftcInformer.Lister() +} + +func (m *informerManager) GetResourceLister( + gvr schema.GroupVersionResource, +) (lister cache.GenericLister, informerSynced cache.InformerSynced, exists bool) { + m.lock.RLock() + defer m.lock.RUnlock() + + ftc, ok := m.gvrMapping[gvr] + if !ok { + return nil, nil, false + } + + informer, ok := m.informers[ftc] + if !ok { + return nil, nil, false + } + + return informer.Lister(), informer.Informer().HasSynced, true +} + +func (m *informerManager) HasSynced() bool { + return m.ftcInformer.Informer().HasSynced() +} + +func (m *informerManager) Start(ctx context.Context) { + if !cache.WaitForNamedCacheSync("informer-manager", ctx.Done(), m.HasSynced) { + return + } + + m.lock.Lock() + defer m.lock.Unlock() + + if m.started { + m.logger.Error(nil, "InformerManager cannot be started more than once") + return + } + + m.started = true + + go wait.Until(m.worker, 0, ctx.Done()) + go func() { + <-ctx.Done() + m.queue.ShutDown() + + m.lock.Lock() + defer m.lock.Unlock() + for _, stopCh := range m.informerStopChs { + close(stopCh) + } + }() +} + +var _ InformerManager = &informerManager{} diff --git a/pkg/util/informermanager/informermanager_test.go b/pkg/util/informermanager/informermanager_test.go new file mode 100644 index 00000000..7e0e34f5 --- /dev/null +++ b/pkg/util/informermanager/informermanager_test.go @@ -0,0 +1,775 @@ +package informermanager + +import ( + "context" + "sync" + "testing" + "time" + + "github.com/onsi/gomega" + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" + + "k8s.io/apimachinery/pkg/runtime/schema" + dynamicclient "k8s.io/client-go/dynamic" + dynamicfake "k8s.io/client-go/dynamic/fake" + "k8s.io/client-go/tools/cache" + + fedcorev1a1 "github.com/kubewharf/kubeadmiral/pkg/apis/core/v1alpha1" + fedclient "github.com/kubewharf/kubeadmiral/pkg/client/clientset/versioned" + "github.com/kubewharf/kubeadmiral/pkg/client/clientset/versioned/fake" + fedinformers "github.com/kubewharf/kubeadmiral/pkg/client/informers/externalversions" + "github.com/kubewharf/kubeadmiral/pkg/controllers/common" + schemautil "github.com/kubewharf/kubeadmiral/pkg/controllers/util/schema" +) + +func TestListerAvailableForExistingFTCs(t *testing.T) { + g := gomega.NewGomegaWithT(t) + + defaultFTCs := []*fedcorev1a1.FederatedTypeConfig{deploymentFTC, configmapFTC, secretFTC} + manager, _, _ := boostrapInformerManagerWithFakeClients(defaultFTCs, []*unstructured.Unstructured{}) + + ctx := context.Background() + manager.Start(ctx) + + ctxWithTimeout, cancel := context.WithTimeout(ctx, time.Second*3) + defer cancel() + if !cache.WaitForCacheSync(ctxWithTimeout.Done(), manager.HasSynced) { + g.Fail("Timed out waiting for InformerManager cache sync") + } + + for _, ftc := range defaultFTCs { + apiresource := ftc.GetSourceType() + gvr := schemautil.APIResourceToGVR(&apiresource) + + g.Eventually(func(g gomega.Gomega) { + lister, informerSynced, exists := manager.GetResourceLister(gvr) + g.Expect(exists).To(gomega.BeTrue()) + g.Expect(lister).ToNot(gomega.BeNil()) + g.Expect(informerSynced()).To(gomega.BeTrue()) + }).WithTimeout(time.Second * 2).Should(gomega.Succeed()) + } + + // sanity check: gvr without corresponding ftc should not exist + gvr := schema.GroupVersionResource{ + Group: "apps", + Version: "v1", + Resource: "daemonsets", + } + lister, informerSynced, exists := manager.GetResourceLister(gvr) + g.Expect(exists).To(gomega.BeFalse()) + g.Expect(lister).To(gomega.BeNil()) + g.Expect(informerSynced).To(gomega.BeNil()) +} + +func TestListerAvailableForNewFTC(t *testing.T) { + g := gomega.NewGomegaWithT(t) + + manager, _, fedClient := boostrapInformerManagerWithFakeClients([]*fedcorev1a1.FederatedTypeConfig{}, []*unstructured.Unstructured{}) + + ctx := context.Background() + manager.Start(ctx) + + ctxWithTimeout, cancel := context.WithTimeout(ctx, time.Second*3) + defer cancel() + if !cache.WaitForCacheSync(ctxWithTimeout.Done(), manager.HasSynced) { + g.Fail("Timed out waiting for InformerManager cache sync") + } + + ftc := daemonsetFTC + _, err := fedClient.CoreV1alpha1().FederatedTypeConfigs().Create(ctx, ftc, metav1.CreateOptions{}) + g.Expect(err).ToNot(gomega.HaveOccurred()) + + apiresource := ftc.GetSourceType() + gvr := schemautil.APIResourceToGVR(&apiresource) + + g.Eventually(func(g gomega.Gomega) { + lister, informerSynced, exists := manager.GetResourceLister(gvr) + g.Expect(exists).To(gomega.BeTrue()) + g.Expect(lister).ToNot(gomega.BeNil()) + g.Expect(informerSynced()).To(gomega.BeTrue()) + }).WithTimeout(time.Second * 2).Should(gomega.Succeed()) + + // sanity check: gvr without corresponding ftc should not exist + gvr = schema.GroupVersionResource{ + Group: "apps", + Version: "v1", + Resource: "statefulsets", + } + lister, informerSynced, exists := manager.GetResourceLister(gvr) + g.Expect(exists).To(gomega.BeFalse()) + g.Expect(lister).To(gomega.BeNil()) + g.Expect(informerSynced).To(gomega.BeNil()) +} + +func TestEventHandlerRegistrationForExistingFTCs(t *testing.T) { + g := gomega.NewGomegaWithT(t) + + dp1 := getDeployment("dp-1", "default") + cm1 := getConfigMap("cm-1", "default") + sc1 := getSecret("sc-1", "default") + + defaultFTCs := []*fedcorev1a1.FederatedTypeConfig{deploymentFTC, configmapFTC, secretFTC} + defaultObjects := []*unstructured.Unstructured{dp1, cm1, sc1} + manager, dynamicClient, _ := boostrapInformerManagerWithFakeClients(defaultFTCs, defaultObjects) + + registeredResourceEventHandler := &countingResourceEventHandler{} + unregisteredResourceEventHandler := &countingResourceEventHandler{} + + manager.AddEventHandlerGenerator(&EventHandlerGenerator{ + Predicate: func(ftc *fedcorev1a1.FederatedTypeConfig) bool { return true }, + Generator: registeredResourceEventHandler.generateEventHandler, + }) + manager.AddEventHandlerGenerator(&EventHandlerGenerator{ + Predicate: func(ftc *fedcorev1a1.FederatedTypeConfig) bool { return false }, + Generator: unregisteredResourceEventHandler.generateEventHandler, + }) + + ctx := context.Background() + manager.Start(ctx) + + ctxWithTimeout, cancel := context.WithTimeout(ctx, time.Second*3) + defer cancel() + if !cache.WaitForCacheSync(ctxWithTimeout.Done(), manager.HasSynced) { + g.Fail("Timed out waiting for InformerManager cache sync") + } + + // 1. If Predicate returns true, EventHandler should be generated and registered. + + // check event handlers generated and initial events are received + + g.Eventually(func(g gomega.Gomega) { + g.Expect(registeredResourceEventHandler.getGenerateCount()).To(gomega.BeNumerically("==", len(defaultFTCs))) + g.Expect(registeredResourceEventHandler.getAddEventCount()).To(gomega.BeNumerically("==", len(defaultObjects))) + g.Expect(registeredResourceEventHandler.getUpdateEventCount()).To(gomega.BeZero()) + g.Expect(registeredResourceEventHandler.getDeleteEventCount()).To(gomega.BeZero()) + }).WithTimeout(time.Second * 2).Should(gomega.Succeed()) + + // check if additional events can be received + + // +1 add + sc2 := getSecret("sc-2", "default") + sc2, err := dynamicClient.Resource(common.SecretGVR).Namespace("default").Create(ctx, sc2, metav1.CreateOptions{}) + g.Expect(err).ToNot(gomega.HaveOccurred()) + + // +1 update + dp1.SetAnnotations(map[string]string{"test-annotation": "test-value"}) + dp1, err = dynamicClient.Resource(common.DeploymentGVR).Namespace("default").Update(ctx, dp1, metav1.UpdateOptions{}) + g.Expect(err).ToNot(gomega.HaveOccurred()) + + // +1 delete + err = dynamicClient.Resource(common.ConfigMapGVR).Namespace("default").Delete(ctx, cm1.GetName(), metav1.DeleteOptions{}) + g.Expect(err).ToNot(gomega.HaveOccurred()) + + // santiy check: events for gvr without corresponding FTC should not be received + dm1 := getDaemonSet("dm-1", "default") + _, err = dynamicClient.Resource(common.DaemonSetGVR).Namespace("default").Create(ctx, dm1, metav1.CreateOptions{}) + g.Expect(err).ToNot(gomega.HaveOccurred()) + + g.Eventually(func(g gomega.Gomega) { + g.Expect(registeredResourceEventHandler.getAddEventCount()).To(gomega.BeNumerically("==", len(defaultObjects)+1)) + g.Expect(registeredResourceEventHandler.getUpdateEventCount()).To(gomega.BeNumerically("==", 1)) + g.Expect(registeredResourceEventHandler.getDeleteEventCount()).To(gomega.BeNumerically("==", 1)) + }) + + // 2. If Predicate returns false, EventHandler should not be generated and registered. + + g.Consistently(func(g gomega.Gomega) { + g.Expect(unregisteredResourceEventHandler.getGenerateCount()).To(gomega.BeZero()) + g.Expect(unregisteredResourceEventHandler.getAddEventCount()).To(gomega.BeZero()) + g.Expect(unregisteredResourceEventHandler.getUpdateEventCount()).To(gomega.BeZero()) + g.Expect(unregisteredResourceEventHandler.getDeleteEventCount()).To(gomega.BeZero()) + }).WithTimeout(time.Second * 2).Should(gomega.Succeed()) +} + +func TestEventHandlerRegistrationForNewFTC(t *testing.T) { + g := gomega.NewGomegaWithT(t) + + dm1 := getDaemonSet("dm-1", "default") + dm2 := getDaemonSet("dm-2", "default") + dm3 := getDaemonSet("dm-3", "default") + dm4 := getDaemonSet("dm-4", "default") + + defaultObjects := []*unstructured.Unstructured{dm1, dm2, dm3, dm4} + manager, dynamicClient, fedClient := boostrapInformerManagerWithFakeClients([]*fedcorev1a1.FederatedTypeConfig{}, defaultObjects) + + registeredResourceEventHandler := &countingResourceEventHandler{} + unregisteredResourceEventHandler := &countingResourceEventHandler{} + + manager.AddEventHandlerGenerator(&EventHandlerGenerator{ + Predicate: func(ftc *fedcorev1a1.FederatedTypeConfig) bool { return true }, + Generator: registeredResourceEventHandler.generateEventHandler, + }) + manager.AddEventHandlerGenerator(&EventHandlerGenerator{ + Predicate: func(ftc *fedcorev1a1.FederatedTypeConfig) bool { return false }, + Generator: unregisteredResourceEventHandler.generateEventHandler, + }) + + ctx := context.Background() + manager.Start(ctx) + + ctxWithTimeout, cancel := context.WithTimeout(ctx, time.Second*3) + defer cancel() + if !cache.WaitForCacheSync(ctxWithTimeout.Done(), manager.HasSynced) { + g.Fail("Timed out waiting for InformerManager cache sync") + } + + ftc := daemonsetFTC + _, err := fedClient.CoreV1alpha1().FederatedTypeConfigs().Create(ctx, ftc, metav1.CreateOptions{}) + g.Expect(err).ToNot(gomega.HaveOccurred()) + + // 1. If Predicate returns true, a new EventHandler should be generated and registered. + + // check event handlers generated and initial events are received + + g.Eventually(func(g gomega.Gomega) { + g.Expect(registeredResourceEventHandler.getGenerateCount()).To(gomega.BeNumerically("==", 1)) + g.Expect(registeredResourceEventHandler.getAddEventCount()).To(gomega.BeNumerically("==", len(defaultObjects))) + g.Expect(registeredResourceEventHandler.getUpdateEventCount()).To(gomega.BeZero()) + g.Expect(registeredResourceEventHandler.getDeleteEventCount()).To(gomega.BeZero()) + }).WithTimeout(time.Second * 2).Should(gomega.Succeed()) + + // check if additional events can be received + + // +2 update + dm1.SetAnnotations(map[string]string{"test-annotation": "test-value"}) + dm1, err = dynamicClient.Resource(common.DaemonSetGVR).Namespace("default").Update(ctx, dm1, metav1.UpdateOptions{}) + g.Expect(err).ToNot(gomega.HaveOccurred()) + dm2.SetAnnotations(map[string]string{"test-annotation": "test-value"}) + dm2, err = dynamicClient.Resource(common.DaemonSetGVR).Namespace("default").Update(ctx, dm2, metav1.UpdateOptions{}) + g.Expect(err).ToNot(gomega.HaveOccurred()) + + // +1 delete + err = dynamicClient.Resource(common.DaemonSetGVR).Namespace("default").Delete(ctx, dm4.GetName(), metav1.DeleteOptions{}) + g.Expect(err).ToNot(gomega.HaveOccurred()) + + // santiy check: events for gvr without corresponding FTC should not be received + sc1 := getSecret("sc-1", "default") + _, err = dynamicClient.Resource(common.SecretGVR).Namespace("default").Create(ctx, sc1, metav1.CreateOptions{}) + g.Expect(err).ToNot(gomega.HaveOccurred()) + + g.Eventually(func(g gomega.Gomega) { + g.Expect(registeredResourceEventHandler.getAddEventCount()).To(gomega.BeNumerically("==", 1)) + g.Expect(registeredResourceEventHandler.getUpdateEventCount()).To(gomega.BeNumerically("==", 2)) + g.Expect(registeredResourceEventHandler.getDeleteEventCount()).To(gomega.BeNumerically("==", 1)) + }) + + // 2. If Predicate returns false, no new EventHandlers should be generated and registered. + + g.Consistently(func(g gomega.Gomega) { + g.Expect(unregisteredResourceEventHandler.getGenerateCount()).To(gomega.BeZero()) + g.Expect(unregisteredResourceEventHandler.getAddEventCount()).To(gomega.BeZero()) + g.Expect(unregisteredResourceEventHandler.getUpdateEventCount()).To(gomega.BeZero()) + g.Expect(unregisteredResourceEventHandler.getDeleteEventCount()).To(gomega.BeZero()) + }).WithTimeout(time.Second * 2).Should(gomega.Succeed()) +} + +func TestEventHandlerRegistrationOnFTCUpdate(t *testing.T) { + g := gomega.NewGomegaWithT(t) + + ftc := deploymentFTC.DeepCopy() + ftc.SetAnnotations(map[string]string{"predicate": "false"}) + + defaultFTCs := []*fedcorev1a1.FederatedTypeConfig{ftc} + manager, dynamicClient, fedClient := boostrapInformerManagerWithFakeClients(defaultFTCs, []*unstructured.Unstructured{}) + + registeredResourceEventHandler := &countingResourceEventHandler{} + + manager.AddEventHandlerGenerator(&EventHandlerGenerator{ + Predicate: func(ftc *fedcorev1a1.FederatedTypeConfig) bool { + anno := ftc.GetAnnotations() + return anno != nil && anno["predicate"] == "true" + }, + Generator: registeredResourceEventHandler.generateEventHandler, + }) + + ctx := context.Background() + manager.Start(ctx) + + ctxWithTimeout, cancel := context.WithTimeout(ctx, time.Second*3) + defer cancel() + if !cache.WaitForCacheSync(ctxWithTimeout.Done(), manager.HasSynced) { + g.Fail("Timed out waiting for InformerManager cache sync") + } + + // sanity check: no event handler should have been generated and no events should have been received + g.Consistently(func(g gomega.Gomega) { + g.Expect(registeredResourceEventHandler.getGenerateCount()).To(gomega.BeZero()) + g.Expect(registeredResourceEventHandler.getAddEventCount()).To(gomega.BeZero()) + g.Expect(registeredResourceEventHandler.getUpdateEventCount()).To(gomega.BeZero()) + g.Expect(registeredResourceEventHandler.getDeleteEventCount()).To(gomega.BeZero()) + }).WithTimeout(time.Second * 2).Should(gomega.Succeed()) + + // 1. If Predicate returns true and there is no existing EventHandler, a new EventHandler should be generated and + // registered. + + ftc.SetAnnotations(map[string]string{"predicate": "true"}) + ftc, err := fedClient.CoreV1alpha1().FederatedTypeConfigs().Update(ctx, ftc, metav1.UpdateOptions{}) + g.Expect(err).ToNot(gomega.HaveOccurred()) + // sleep for a second to allow the InformerManager to process the ftc update + <-time.After(time.Second) + + dp1 := getDeployment("dp-1", "default") + dp1, err = dynamicClient.Resource(common.DeploymentGVR).Namespace("default").Create(ctx, dp1, metav1.CreateOptions{}) + g.Expect(err).ToNot(gomega.HaveOccurred()) + + g.Eventually(func(g gomega.Gomega) { + g.Expect(registeredResourceEventHandler.getGenerateCount()).To(gomega.BeNumerically("==", 1)) + g.Expect(registeredResourceEventHandler.getAddEventCount()).To(gomega.BeNumerically("==", 1)) + g.Expect(registeredResourceEventHandler.getUpdateEventCount()).To(gomega.BeZero()) + g.Expect(registeredResourceEventHandler.getDeleteEventCount()).To(gomega.BeZero()) + }).WithTimeout(time.Second * 2).Should(gomega.Succeed()) + + // 2. If Predicate returns true and there is an existing EventHandler, a new EventHandler should not be generated but + // the existing EventHandler should remain registered. + + ftc.SetAnnotations(map[string]string{"predicate": "true", "update-trigger": "1"}) + ftc, err = fedClient.CoreV1alpha1().FederatedTypeConfigs().Update(ctx, ftc, metav1.UpdateOptions{}) + g.Expect(err).ToNot(gomega.HaveOccurred()) + // sleep for a second to allow the InformerManager to process the ftc update + <-time.After(time.Second) + + g.Consistently(func(g gomega.Gomega) { + g.Expect(registeredResourceEventHandler.getGenerateCount()).To(gomega.BeNumerically("==", 1)) + }).WithTimeout(time.Second * 2).Should(gomega.Succeed()) + + dp1.SetAnnotations(map[string]string{"test-annotation": "test-value"}) + dp1, err = dynamicClient.Resource(common.DeploymentGVR).Namespace("default").Update(ctx, dp1, metav1.UpdateOptions{}) + g.Expect(err).ToNot(gomega.HaveOccurred()) + + g.Eventually(func(g gomega.Gomega) { + g.Expect(registeredResourceEventHandler.getGenerateCount()).To(gomega.BeNumerically("==", 1)) + g.Expect(registeredResourceEventHandler.getAddEventCount()).To(gomega.BeNumerically("==", 1)) + g.Expect(registeredResourceEventHandler.getUpdateEventCount()).To(gomega.BeNumerically("==", 1)) + g.Expect(registeredResourceEventHandler.getDeleteEventCount()).To(gomega.BeZero()) + }).WithTimeout(time.Second * 2).Should(gomega.Succeed()) + + // 3. If Predicate returns false and there is an existing EventHandler, the existing EventHandler should be + // unregistered. + + ftc.SetAnnotations(map[string]string{"predicate": "false"}) + ftc, err = fedClient.CoreV1alpha1().FederatedTypeConfigs().Update(ctx, ftc, metav1.UpdateOptions{}) + g.Expect(err).ToNot(gomega.HaveOccurred()) + // sleep for a second to allow the InformerManager to process the ftc update + <-time.After(time.Second) + + // events should no longer be received for deployments + dp2 := getDeployment("dp-2", "default") + dp2, err = dynamicClient.Resource(common.DeploymentGVR).Namespace("default").Create(ctx, dp2, metav1.CreateOptions{}) + g.Expect(err).ToNot(gomega.HaveOccurred()) + + dp2.SetAnnotations(map[string]string{"test-annotation": "test-value"}) + dp2, err = dynamicClient.Resource(common.DeploymentGVR).Namespace("default").Update(ctx, dp2, metav1.UpdateOptions{}) + g.Expect(err).ToNot(gomega.HaveOccurred()) + + err = dynamicClient.Resource(common.DeploymentGVR).Namespace("default").Delete(ctx, dp1.GetName(), metav1.DeleteOptions{}) + g.Expect(err).ToNot(gomega.HaveOccurred()) + + g.Consistently(func(g gomega.Gomega) { + g.Expect(registeredResourceEventHandler.getGenerateCount()).To(gomega.BeNumerically("==", 1)) + g.Expect(registeredResourceEventHandler.getAddEventCount()).To(gomega.BeNumerically("==", 1)) + g.Expect(registeredResourceEventHandler.getUpdateEventCount()).To(gomega.BeNumerically("==", 1)) + g.Expect(registeredResourceEventHandler.getDeleteEventCount()).To(gomega.BeZero()) + }).WithTimeout(time.Second * 2).Should(gomega.Succeed()) + + // 4. If Predicate returns false and there is no existing EventHandler, no new EventHandlers should be generated and + // registered. + + ftc.SetAnnotations(map[string]string{"predicate": "false", "update-trigger": "1"}) + ftc, err = fedClient.CoreV1alpha1().FederatedTypeConfigs().Update(ctx, ftc, metav1.UpdateOptions{}) + g.Expect(err).ToNot(gomega.HaveOccurred()) + // sleep for a second to allow the InformerManager to process the ftc update + <-time.After(time.Second) + + err = dynamicClient.Resource(common.DeploymentGVR).Namespace("default").Delete(ctx, dp2.GetName(), metav1.DeleteOptions{}) + g.Expect(err).ToNot(gomega.HaveOccurred()) + + g.Consistently(func(g gomega.Gomega) { + g.Expect(registeredResourceEventHandler.getGenerateCount()).To(gomega.BeNumerically("==", 1)) + g.Expect(registeredResourceEventHandler.getAddEventCount()).To(gomega.BeNumerically("==", 1)) + g.Expect(registeredResourceEventHandler.getUpdateEventCount()).To(gomega.BeNumerically("==", 1)) + g.Expect(registeredResourceEventHandler.getDeleteEventCount()).To(gomega.BeZero()) + }).WithTimeout(time.Second * 2).Should(gomega.Succeed()) +} + +func TestEventHandlerRegistrationOnFTCDelete(t *testing.T) { + g := gomega.NewGomegaWithT(t) + + dp1 := getDeployment("dp-1", "default") + cm1 := getConfigMap("cm-1", "default") + sc1 := getSecret("sc-1", "default") + + defaultFTCs := []*fedcorev1a1.FederatedTypeConfig{deploymentFTC, configmapFTC, secretFTC} + defaultObjects := []*unstructured.Unstructured{dp1, cm1, sc1} + manager, dynamicClient, fedClient := boostrapInformerManagerWithFakeClients(defaultFTCs, defaultObjects) + + registeredResourceEventHandler := &countingResourceEventHandler{} + + manager.AddEventHandlerGenerator(&EventHandlerGenerator{ + Predicate: func(ftc *fedcorev1a1.FederatedTypeConfig) bool { return true }, + Generator: registeredResourceEventHandler.generateEventHandler, + }) + + ctx := context.Background() + manager.Start(ctx) + + ctxWithTimeout, cancel := context.WithTimeout(ctx, time.Second*3) + defer cancel() + if !cache.WaitForCacheSync(ctxWithTimeout.Done(), manager.HasSynced) { + g.Fail("Timed out waiting for InformerManager cache sync") + } + + // All existing EventHandlers for the FTC should be unregistered or stop receiving events. + + // sanity check: event handlers generated and initial events are received + + g.Eventually(func(g gomega.Gomega) { + g.Expect(registeredResourceEventHandler.getGenerateCount()).To(gomega.BeNumerically("==", len(defaultFTCs))) + g.Expect(registeredResourceEventHandler.getAddEventCount()).To(gomega.BeNumerically("==", len(defaultObjects))) + g.Expect(registeredResourceEventHandler.getUpdateEventCount()).To(gomega.BeZero()) + g.Expect(registeredResourceEventHandler.getDeleteEventCount()).To(gomega.BeZero()) + }).WithTimeout(time.Second * 2).Should(gomega.Succeed()) + + // delete deployment ftc + err := fedClient.CoreV1alpha1().FederatedTypeConfigs().Delete(ctx, deploymentFTC.Name, metav1.DeleteOptions{}) + g.Expect(err).ToNot(gomega.HaveOccurred()) + + // sleep for a second to allow the InformerManager to process the ftc deletion + <-time.After(time.Second) + + // events should no longer be received for deployments + dp2 := getDeployment("dp-2", "default") + dp2, err = dynamicClient.Resource(common.DeploymentGVR).Namespace("default").Create(ctx, dp2, metav1.CreateOptions{}) + g.Expect(err).ToNot(gomega.HaveOccurred()) + + dp2.SetAnnotations(map[string]string{"test-annotation": "test-value"}) + dp2, err = dynamicClient.Resource(common.DeploymentGVR).Namespace("default").Update(ctx, dp2, metav1.UpdateOptions{}) + g.Expect(err).ToNot(gomega.HaveOccurred()) + + err = dynamicClient.Resource(common.DeploymentGVR).Namespace("default").Delete(ctx, dp1.GetName(), metav1.DeleteOptions{}) + g.Expect(err).ToNot(gomega.HaveOccurred()) + + g.Consistently(func(g gomega.Gomega) { + g.Expect(registeredResourceEventHandler.getGenerateCount()).To(gomega.BeNumerically("==", len(defaultFTCs))) + g.Expect(registeredResourceEventHandler.getAddEventCount()).To(gomega.BeNumerically("==", len(defaultObjects))) + g.Expect(registeredResourceEventHandler.getUpdateEventCount()).To(gomega.BeZero()) + g.Expect(registeredResourceEventHandler.getDeleteEventCount()).To(gomega.BeZero()) + }).WithTimeout(time.Second * 2).Should(gomega.Succeed()) + + // sanity check: events should still be received for the other remaining ftcs' source types + + // +1 add + sc2 := getSecret("sc-2", "default") + sc2, err = dynamicClient.Resource(common.SecretGVR).Namespace("default").Create(ctx, sc2, metav1.CreateOptions{}) + g.Expect(err).ToNot(gomega.HaveOccurred()) + + // +1 delete + err = dynamicClient.Resource(common.ConfigMapGVR).Namespace("default").Delete(ctx, cm1.GetName(), metav1.DeleteOptions{}) + g.Expect(err).ToNot(gomega.HaveOccurred()) + + g.Eventually(func(g gomega.Gomega) { + g.Expect(registeredResourceEventHandler.getGenerateCount()).To(gomega.BeNumerically("==", len(defaultFTCs))) + g.Expect(registeredResourceEventHandler.getAddEventCount()).To(gomega.BeNumerically("==", len(defaultObjects)+1)) + g.Expect(registeredResourceEventHandler.getUpdateEventCount()).To(gomega.BeZero()) + g.Expect(registeredResourceEventHandler.getDeleteEventCount()).To(gomega.BeNumerically("==", 1)) + }).WithTimeout(time.Second * 2).Should(gomega.Succeed()) +} + +func TestEventHandlerRegistrationAfterInformerShutdown(t *testing.T) { + g := gomega.NewGomegaWithT(t) + + dp1 := getDeployment("dp-1", "default") + cm1 := getConfigMap("cm-1", "default") + sc1 := getSecret("sc-1", "default") + + defaultFTCs := []*fedcorev1a1.FederatedTypeConfig{deploymentFTC, configmapFTC, secretFTC} + defaultObjects := []*unstructured.Unstructured{dp1, cm1, sc1} + manager, dynamicClient, _ := boostrapInformerManagerWithFakeClients(defaultFTCs, defaultObjects) + + registeredResourceEventHandler := &countingResourceEventHandler{} + + manager.AddEventHandlerGenerator(&EventHandlerGenerator{ + Predicate: func(ftc *fedcorev1a1.FederatedTypeConfig) bool { return true }, + Generator: registeredResourceEventHandler.generateEventHandler, + }) + + ctx, managerCancel := context.WithCancel(context.Background()) + manager.Start(ctx) + + ctxWithTimeout, cancel := context.WithTimeout(ctx, time.Second*3) + defer cancel() + if !cache.WaitForCacheSync(ctxWithTimeout.Done(), manager.HasSynced) { + g.Fail("Timed out waiting for InformerManager cache sync") + } + + // All existing EventHandlers for the FTC should be unregistered or stop receiving events. + + // sanity check: event handlers generated and initial events are received + + g.Eventually(func(g gomega.Gomega) { + g.Expect(registeredResourceEventHandler.getGenerateCount()).To(gomega.BeNumerically("==", len(defaultFTCs))) + g.Expect(registeredResourceEventHandler.getAddEventCount()).To(gomega.BeNumerically("==", len(defaultObjects))) + g.Expect(registeredResourceEventHandler.getUpdateEventCount()).To(gomega.BeZero()) + g.Expect(registeredResourceEventHandler.getDeleteEventCount()).To(gomega.BeZero()) + }).WithTimeout(time.Second * 2).Should(gomega.Succeed()) + + // stop manager + managerCancel() + // sleep for a second to allow the InformerManager to process the shutdown + <-time.After(time.Second) + + // events should no longer be received for any ftc's source type + dp2 := getDeployment("dp-2", "default") + dp2, err := dynamicClient.Resource(common.DeploymentGVR).Namespace("default").Create(ctx, dp2, metav1.CreateOptions{}) + g.Expect(err).ToNot(gomega.HaveOccurred()) + + cm2 := getConfigMap("cm-2", "default") + cm2, err = dynamicClient.Resource(common.ConfigMapGVR).Namespace("default").Create(ctx, cm2, metav1.CreateOptions{}) + g.Expect(err).ToNot(gomega.HaveOccurred()) + + sc2 := getConfigMap("sc-2", "default") + sc2, err = dynamicClient.Resource(common.ConfigMapGVR).Namespace("default").Create(ctx, sc2, metav1.CreateOptions{}) + g.Expect(err).ToNot(gomega.HaveOccurred()) + + g.Consistently(func(g gomega.Gomega) { + g.Expect(registeredResourceEventHandler.getGenerateCount()).To(gomega.BeNumerically("==", len(defaultFTCs))) + g.Expect(registeredResourceEventHandler.getAddEventCount()).To(gomega.BeNumerically("==", len(defaultObjects))) + g.Expect(registeredResourceEventHandler.getUpdateEventCount()).To(gomega.BeZero()) + g.Expect(registeredResourceEventHandler.getDeleteEventCount()).To(gomega.BeZero()) + }).WithTimeout(time.Second * 2).Should(gomega.Succeed()) +} + +func boostrapInformerManagerWithFakeClients( + ftcs []*fedcorev1a1.FederatedTypeConfig, + objects []*unstructured.Unstructured, +) (InformerManager, dynamicclient.Interface, fedclient.Interface) { + scheme := runtime.NewScheme() + + corev1.AddToScheme(scheme) + appsv1.AddToScheme(scheme) + fedcorev1a1.AddToScheme(scheme) + + dynamicObjects := []runtime.Object{} + for _, object := range objects { + dynamicObjects = append(dynamicObjects, runtime.Object(object)) + } + dynamicClient := dynamicfake.NewSimpleDynamicClient(scheme, dynamicObjects...) + + fedObjects := []runtime.Object{} + for _, ftc := range ftcs { + fedObjects = append(fedObjects, runtime.Object(ftc)) + } + fedClient := fake.NewSimpleClientset(fedObjects...) + + factory := fedinformers.NewSharedInformerFactory(fedClient, 0) + informerManager := NewInformerManager(dynamicClient, factory.Core().V1alpha1().FederatedTypeConfigs()) + + factory.Start(context.TODO().Done()) + + return informerManager, dynamicClient, fedClient +} + +type countingResourceEventHandler struct { + lock sync.RWMutex + + generateCount int + + addEventCount int + updateEventCount int + deleteEventCount int +} + +func (h *countingResourceEventHandler) getAddEventCount() int { + h.lock.RLock() + defer h.lock.RUnlock() + return h.addEventCount +} + +func (h *countingResourceEventHandler) getUpdateEventCount() int { + h.lock.RLock() + defer h.lock.RUnlock() + return h.updateEventCount +} + +func (h *countingResourceEventHandler) getDeleteEventCount() int { + h.lock.RLock() + defer h.lock.RUnlock() + return h.deleteEventCount +} + +func (h *countingResourceEventHandler) getGenerateCount() int { + h.lock.RLock() + defer h.lock.RUnlock() + return h.generateCount +} + +func (h *countingResourceEventHandler) generateEventHandler(_ *fedcorev1a1.FederatedTypeConfig) cache.ResourceEventHandler { + h.lock.Lock() + defer h.lock.Unlock() + h.generateCount++ + return h +} + +func (h *countingResourceEventHandler) OnAdd(_ interface{}) { + h.lock.Lock() + defer h.lock.Unlock() + h.addEventCount++ +} + +func (h *countingResourceEventHandler) OnDelete(_ interface{}) { + h.lock.Lock() + defer h.lock.Unlock() + h.deleteEventCount++ +} + +func (h *countingResourceEventHandler) OnUpdate(_ interface{}, _ interface{}) { + h.lock.Lock() + defer h.lock.Unlock() + h.updateEventCount++ +} + +var _ cache.ResourceEventHandler = &countingResourceEventHandler{} + +func getDeployment(name, namespace string) *unstructured.Unstructured { + dp := &appsv1.Deployment{ + TypeMeta: metav1.TypeMeta{ + Kind: "Deployment", + APIVersion: "apps/v1", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + }, + } + + dpMap, err := runtime.DefaultUnstructuredConverter.ToUnstructured(dp) + if err != nil { + panic(err) + } + + return &unstructured.Unstructured{Object: dpMap} +} + +func getConfigMap(name, namespace string) *unstructured.Unstructured { + cm := &corev1.ConfigMap{ + TypeMeta: metav1.TypeMeta{ + Kind: "ConfigMap", + APIVersion: "v1", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + }, + } + + cmMap, err := runtime.DefaultUnstructuredConverter.ToUnstructured(cm) + if err != nil { + panic(err) + } + + return &unstructured.Unstructured{Object: cmMap} +} + +func getSecret(name, namespace string) *unstructured.Unstructured { + secret := &corev1.Secret{ + TypeMeta: metav1.TypeMeta{ + Kind: "Secret", + APIVersion: "v1", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + }, + } + + secretMap, err := runtime.DefaultUnstructuredConverter.ToUnstructured(secret) + if err != nil { + panic(err) + } + + return &unstructured.Unstructured{Object: secretMap} +} + +func getDaemonSet(name, namespace string) *unstructured.Unstructured { + dm := &appsv1.DaemonSet{ + TypeMeta: metav1.TypeMeta{ + Kind: "DaemonSet", + APIVersion: "apps/v1", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + }, + } + + dmMap, err := runtime.DefaultUnstructuredConverter.ToUnstructured(dm) + if err != nil { + panic(err) + } + + return &unstructured.Unstructured{Object: dmMap} +} + +var ( + daemonsetFTC = &fedcorev1a1.FederatedTypeConfig{ + ObjectMeta: metav1.ObjectMeta{ + Name: "daemonsets", + }, + Spec: fedcorev1a1.FederatedTypeConfigSpec{ + SourceType: fedcorev1a1.APIResource{ + Group: "apps", + Version: "v1", + Kind: "DaemonSet", + PluralName: "daemonsets", + Scope: v1beta1.NamespaceScoped, + }, + }, + } + deploymentFTC = &fedcorev1a1.FederatedTypeConfig{ + ObjectMeta: metav1.ObjectMeta{ + Name: "deployments", + }, + Spec: fedcorev1a1.FederatedTypeConfigSpec{ + SourceType: fedcorev1a1.APIResource{ + Group: "apps", + Version: "v1", + Kind: "Deployment", + PluralName: "deployments", + Scope: v1beta1.NamespaceScoped, + }, + }, + } + configmapFTC = &fedcorev1a1.FederatedTypeConfig{ + ObjectMeta: metav1.ObjectMeta{ + Name: "configmaps", + }, + Spec: fedcorev1a1.FederatedTypeConfigSpec{ + SourceType: fedcorev1a1.APIResource{ + Group: "", + Version: "v1", + Kind: "ConfigMap", + PluralName: "configmaps", + Scope: v1beta1.NamespaceScoped, + }, + }, + } + secretFTC = &fedcorev1a1.FederatedTypeConfig{ + + ObjectMeta: metav1.ObjectMeta{ + Name: "secrets", + }, + Spec: fedcorev1a1.FederatedTypeConfigSpec{ + SourceType: fedcorev1a1.APIResource{ + Group: "", + Version: "v1", + Kind: "Secret", + PluralName: "secrets", + Scope: v1beta1.NamespaceScoped, + }, + }, + } +) diff --git a/pkg/util/informermanager/interface.go b/pkg/util/informermanager/interface.go index 52bf5d4d..10b4e422 100644 --- a/pkg/util/informermanager/interface.go +++ b/pkg/util/informermanager/interface.go @@ -14,24 +14,28 @@ import ( // EventHandlerGenerator is used by InformerManger and FederatedInformerManager to generate and register // ResourceEventHandlers for each FTC's source type informer. type EventHandlerGenerator struct { - // Predicate is called for each FTC add or update event. Predicate should return True only if a new - // ResoureEventHandler should be generated and registered for the given FTC. Previous event handlers registered for - // this EventHandlerGenerator will also be removed. + // Predicate is called each time a FTC is reconciled to determine if a event handler needs to be registered for this + // EventHandlerGenerator. If Predicate returns false, any previously registered event handler for this + // EventHandlerGenerator will also be unregistered. // - // Note: we should be cautious about registering new ResourceEventHandler as it will receive synthetic add events - // for every object in the informer's cache. - Predicate func(oldFTC, newFTC *fedcorev1a1.FederatedTypeConfig) bool - // Generator is used to generate a ResourceEventHandler for the given FTC. If nil is returned, no - // ResourceEventHandler will be registered. + // Note: updating of event handlers is intentionally unsupported as registering a new event handler would cause all + // existing objects in the cache to be sent to it as add events, potentially causing performance problems. In other + // words, if Predicate returns true and there is already a registered event handler for this EventHandlerGenerator, + // a new event handler will not be generated. + Predicate func(ftc *fedcorev1a1.FederatedTypeConfig) bool + // Generator is used to generate a ResourceEventHandler for the given FTC. Generator MUST not return nil. Generator func(ftc *fedcorev1a1.FederatedTypeConfig) cache.ResourceEventHandler } // InformerManager provides an interface for controllers that need to dynamically register event handlers and access // objects based on FederatedTypeConfigs. InformerManager will listen to FTC events and maintain informers for the // source type of each FTC. +// +// Having multiple FTCs with the same source type is not supported and will cause InformerManager to behave incorrectly. +// Updating FTC source types is also not supported and will also cause InformerManager to behave incorrectly. type InformerManager interface { // Adds an EventHandler used to generate and register ResourceEventHandlers for each FTC's source type informer. - AddEventHandlerGenerator(generator EventHandlerGenerator) error + AddEventHandlerGenerator(generator *EventHandlerGenerator) error // Returns a lister for the given GroupResourceVersion if it exists. The lister for each FTC's source type will // eventually exist. GetResourceLister(gvr schema.GroupVersionResource) (lister cache.GenericLister, informerSynced cache.InformerSynced, exists bool) @@ -52,7 +56,7 @@ type ClusterEventHandler struct { // ClusterEventHandler should be called for the the given event. Predicate ClusterEventPredicate // Callback is a function that accepts a FederatedCluster object. - Callback func(cluster *fedcorev1a1.FederatedCluster) + Callback func(cluster *fedcorev1a1.FederatedCluster) } // ClusterEventPredicate determines if a callback should be called for a given cluster event. @@ -61,9 +65,13 @@ type ClusterEventPredicate func(oldCluster, newCluster *fedcorev1a1.FederatedClu // FederatedInformerManager provides an interface for controllers that need to dynamically register event handlers and // access objects in member clusters based on FederatedTypeConfigs. FederatedInformerManager will listen to FTC events // and maintian informers for each FTC's source type and joined member cluster. +// +// Having multiple FTCs with the same source type is not supported and will cause FederatedInformerManager to behave +// incorrectly. Updating FTC source types is also not supported and will also cause InformerManager to behave +// incorrectly. type FederatedInformerManager interface { // Adds an EventHandler used to generate and register ResourceEventHandlers for each FTC's source type informer. - AddEventHandlerGenerator(generator EventHandlerGenerator) error + AddEventHandlerGenerator(generator *EventHandlerGenerator) error // Returns a lister for the given GroupResourceVersion and cluster if it exists. The lister for each FTC's source // type and cluster will eventually exist. GetResourceLister( From fde4b01e7d487582be5f165712b0f165f0089182 Mon Sep 17 00:00:00 2001 From: "hawjia.lim" Date: Fri, 7 Jul 2023 10:55:45 +0800 Subject: [PATCH 010/173] start federatedinformer implementation --- .../federatedinformermanager.go | 243 ++++++++++++++++-- .../federatedinformermanager_test.go | 0 pkg/util/informermanager/interface.go | 6 +- 3 files changed, 223 insertions(+), 26 deletions(-) create mode 100644 pkg/util/informermanager/federatedinformermanager_test.go diff --git a/pkg/util/informermanager/federatedinformermanager.go b/pkg/util/informermanager/federatedinformermanager.go index a19d2de4..aefe7e00 100644 --- a/pkg/util/informermanager/federatedinformermanager.go +++ b/pkg/util/informermanager/federatedinformermanager.go @@ -2,52 +2,247 @@ package informermanager import ( "context" - "github.com/kubewharf/kubeadmiral/pkg/client/listers/core/v1alpha1" + "fmt" + "sync" + + apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/util/wait" "k8s.io/client-go/dynamic" "k8s.io/client-go/tools/cache" + "k8s.io/client-go/util/workqueue" + "k8s.io/klog/v2" + + fedcorev1a1 "github.com/kubewharf/kubeadmiral/pkg/apis/core/v1alpha1" + fedcorev1a1informers "github.com/kubewharf/kubeadmiral/pkg/client/informers/externalversions/core/v1alpha1" + fedcorev1a1listers "github.com/kubewharf/kubeadmiral/pkg/client/listers/core/v1alpha1" + "github.com/kubewharf/kubeadmiral/pkg/controllers/util" ) -type federatedInformerManager struct{} +type federatedInformerManager struct { + lock sync.RWMutex -// AddClusterEventHandler implements FederatedInformerManager. -func (*federatedInformerManager) AddClusterEventHandler(handler ClusterEventHandler) error { - panic("unimplemented") + started bool + + clientGetter ClusterClientGetter + ftcInformer fedcorev1a1informers.FederatedTypeConfigInformer + clusterInformer fedcorev1a1informers.FederatedClusterInformer + + eventHandlerGenerators []*EventHandlerGenerator + clusterEventHandler []*ClusterEventHandler + + clients map[string]dynamic.Interface + informerManagers map[string]InformerManager + informerManagersCancelFuncs map[string]context.CancelFunc + + queue workqueue.Interface + logger klog.Logger } -// AddEventHandlerGenerator implements FederatedInformerManager. -func (*federatedInformerManager) AddEventHandlerGenerator(generator *EventHandlerGenerator) error { - panic("unimplemented") +func NewFederatedInformerManager( + clientGetter ClusterClientGetter, + ftcInformer fedcorev1a1informers.FederatedTypeConfigInformer, + clusterInformer fedcorev1a1informers.FederatedClusterInformer, +) FederatedInformerManager { + manager := &federatedInformerManager{ + lock: sync.RWMutex{}, + started: false, + clientGetter: clientGetter, + ftcInformer: ftcInformer, + clusterInformer: clusterInformer, + eventHandlerGenerators: []*EventHandlerGenerator{}, + clusterEventHandler: []*ClusterEventHandler{}, + clients: map[string]dynamic.Interface{}, + informerManagers: map[string]InformerManager{}, + informerManagersCancelFuncs: map[string]context.CancelFunc{}, + queue: workqueue.NewRateLimitingQueue(workqueue.DefaultItemBasedRateLimiter()), + logger: klog.LoggerWithName(klog.Background(), "federated-informer-manager"), + } + + clusterInformer.Informer().AddEventHandler(cache.FilteringResourceEventHandler{ + FilterFunc: func(obj interface{}) bool { + cluster := obj.(*fedcorev1a1.FederatedCluster) + return util.IsClusterJoined(&cluster.Status) + }, + Handler: cache.ResourceEventHandlerFuncs{ + AddFunc: func(obj interface{}) { manager.enqueue(obj) }, + UpdateFunc: func(_ interface{}, obj interface{}) { manager.enqueue(obj) }, + DeleteFunc: func(obj interface{}) { manager.enqueue(obj) }, + }, + }) + + return manager } -// GetClusterClient implements FederatedInformerManager. -func (*federatedInformerManager) GetClusterClient(cluster string) (dynamic.Interface, bool) { - panic("unimplemented") +func (m *federatedInformerManager) enqueue(obj interface{}) { + key, err := cache.DeletionHandlingMetaNamespaceKeyFunc(obj) + if err != nil { + m.logger.Error(err, "Failed to enqueue FederatedCluster") + return + } + m.queue.Add(key) } -// GetFederatedClusterLister implements FederatedInformerManager. -func (*federatedInformerManager) GetFederatedClusterLister() v1alpha1.FederatedClusterLister { - panic("unimplemented") +func (m *federatedInformerManager) worker() { + key, shutdown := m.queue.Get() + if shutdown { + return + } + defer m.queue.Done(key) + + _, name, err := cache.SplitMetaNamespaceKey(key.(string)) + if err != nil { + m.logger.Error(err, "Failed to process FederatedCluster") + return + } + + cluster, err := m.clusterInformer.Lister().Get(name) + if err != nil && !apierrors.IsNotFound(err) { + m.logger.Error(err, "Failed to process FederatedCluster, will retry") + m.queue.Add(key) + return + } + if apierrors.IsNotFound(err) || !util.IsClusterJoined(&cluster.Status) { + if err := m.processClusterUnjoin(name); err != nil { + m.logger.Error(err, "Failed to process FederatedCluster, will retry") + m.queue.Add(key) + return + } + return + } + + if err := m.processCluster(cluster); err != nil { + m.logger.Error(err, "Failed to process FederatedCluster, will retry") + m.queue.Add(key) + } } -// GetFederatedTypeConfigLister implements FederatedInformerManager. -func (*federatedInformerManager) GetFederatedTypeConfigLister() v1alpha1.FederatedTypeConfigLister { +func (m *federatedInformerManager) processCluster(cluster *fedcorev1a1.FederatedCluster) error { + m.lock.Lock() + defer m.lock.Unlock() + panic("unimplemented") } -// GetResourceLister implements FederatedInformerManager. -func (*federatedInformerManager) GetResourceLister(gvr schema.GroupVersionResource, cluster string) (lister cache.GenericLister, informerSynced cache.InformerSynced, exists bool) { +func (m *federatedInformerManager) processClusterUnjoin(clusterName string) error { + m.lock.Lock() + defer m.lock.Unlock() + panic("unimplemented") } -// HasSynced implements FederatedInformerManager. -func (*federatedInformerManager) HasSynced() bool { - panic("unimplemented") +func (m *federatedInformerManager) AddClusterEventHandler(handler *ClusterEventHandler) error { + m.lock.Lock() + defer m.lock.Unlock() + + if m.started { + return fmt.Errorf("FederatedInformerManager is already started.") + } + + m.clusterEventHandler = append(m.clusterEventHandler, handler) + return nil + } -// Start implements FederatedInformerManager. -func (*federatedInformerManager) Start(ctx context.Context) { - panic("unimplemented") +func (m *federatedInformerManager) AddEventHandlerGenerator(generator *EventHandlerGenerator) error { + m.lock.Lock() + defer m.lock.Unlock() + + if m.started { + return fmt.Errorf("FederatedInformerManager is already started.") + } + + m.eventHandlerGenerators = append(m.eventHandlerGenerators, generator) + return nil +} + +func (m *federatedInformerManager) GetClusterClient(cluster string) (client dynamic.Interface, exists bool) { + m.lock.RLock() + defer m.lock.RUnlock() + client, ok := m.clients[cluster] + return client, ok +} + +func (m *federatedInformerManager) GetFederatedClusterLister() fedcorev1a1listers.FederatedClusterLister { + return m.clusterInformer.Lister() +} + +func (m *federatedInformerManager) GetFederatedTypeConfigLister() fedcorev1a1listers.FederatedTypeConfigLister { + return m.ftcInformer.Lister() +} + +func (m *federatedInformerManager) GetResourceLister( + gvr schema.GroupVersionResource, + cluster string, +) (lister cache.GenericLister, informerSynced cache.InformerSynced, exists bool) { + m.lock.RLock() + defer m.lock.RUnlock() + + manager, ok := m.informerManagers[cluster] + if !ok { + return nil, nil, false + } + + return manager.GetResourceLister(gvr) +} + +func (m *federatedInformerManager) HasSynced() bool { + return m.ftcInformer.Informer().HasSynced() && m.clusterInformer.Informer().HasSynced() +} + +func (m *federatedInformerManager) Start(ctx context.Context) { + if !cache.WaitForNamedCacheSync("federated-informer-manager", ctx.Done(), m.HasSynced) { + return + } + + m.lock.Lock() + defer m.lock.Unlock() + + if m.started { + m.logger.Error(nil, "FederatedInformerManager cannot be started more than once") + return + } + + m.started = true + + for _, handler := range m.clusterEventHandler { + predicate := handler.Predicate + callback := handler.Callback + + m.clusterInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{ + AddFunc: func(obj interface{}) { + cluster := obj.(*fedcorev1a1.FederatedCluster) + if predicate(nil, cluster) { + callback(cluster) + } + }, + UpdateFunc: func(oldObj interface{}, newObj interface{}) { + oldCluster := oldObj.(*fedcorev1a1.FederatedCluster) + newCluster := newObj.(*fedcorev1a1.FederatedCluster) + if predicate(oldCluster, newCluster) { + callback(newCluster) + } + }, + DeleteFunc: func(obj interface{}) { + cluster := obj.(*fedcorev1a1.FederatedCluster) + if predicate(cluster, nil) { + callback(cluster) + } + }, + }) + } + + go wait.Until(m.worker, 0, ctx.Done()) + go func() { + <-ctx.Done() + m.queue.ShutDown() + + m.lock.Lock() + defer m.lock.Unlock() + for _, cancelFunc := range m.informerManagersCancelFuncs { + cancelFunc() + } + }() } var _ FederatedInformerManager = &federatedInformerManager{} diff --git a/pkg/util/informermanager/federatedinformermanager_test.go b/pkg/util/informermanager/federatedinformermanager_test.go new file mode 100644 index 00000000..e69de29b diff --git a/pkg/util/informermanager/interface.go b/pkg/util/informermanager/interface.go index 10b4e422..61342cc1 100644 --- a/pkg/util/informermanager/interface.go +++ b/pkg/util/informermanager/interface.go @@ -79,7 +79,7 @@ type FederatedInformerManager interface { cluster string, ) (lister cache.GenericLister, informerSynced cache.InformerSynced, exists bool) // Returns a client for the given cluster if it exists. The client for each cluster will eventually exist. - GetClusterClient(cluster string) (dynamic.Interface, bool) + GetClusterClient(cluster string) (client dynamic.Interface, exists bool) // Returns the FederatedTypeConfig lister used by the FederatedInformerManager. GetFederatedTypeConfigLister() fedcorev1a1listers.FederatedTypeConfigLister @@ -90,8 +90,10 @@ type FederatedInformerManager interface { // Adds a ClusterEventHandler that can be used by controllers to hook into the cluster events received by the // FederatedInformerManager. - AddClusterEventHandler(handler ClusterEventHandler) error + AddClusterEventHandler(handler *ClusterEventHandler) error // Starts processing FederatedTypeConfig and FederatedCluster events. Start(ctx context.Context) } + +type ClusterClientGetter func(cluster *fedcorev1a1.FederatedCluster) (dynamic.Interface, error) From a52252052f59d57851e312815cda0e3eb0d5f2a6 Mon Sep 17 00:00:00 2001 From: "hawjia.lim" Date: Fri, 7 Jul 2023 14:18:26 +0800 Subject: [PATCH 011/173] add bijection map to handle source type changes --- pkg/controllers/util/overrides.go | 232 --- pkg/controllers/util/propagatedversion.go | 270 +-- pkg/controllers/util/rolloutplan.go | 1698 ++++++++--------- .../federatedinformermanager.go | 39 +- .../federatedinformermanager_test.go | 19 + pkg/util/informermanager/informermanager.go | 63 +- pkg/util/tools/bijection.go | 93 + 7 files changed, 1174 insertions(+), 1240 deletions(-) delete mode 100644 pkg/controllers/util/overrides.go create mode 100644 pkg/util/tools/bijection.go diff --git a/pkg/controllers/util/overrides.go b/pkg/controllers/util/overrides.go deleted file mode 100644 index 383fb3cf..00000000 --- a/pkg/controllers/util/overrides.go +++ /dev/null @@ -1,232 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. - -This file may have been modified by The KubeAdmiral Authors -("KubeAdmiral Modifications"). All KubeAdmiral Modifications -are Copyright 2023 The KubeAdmiral Authors. -*/ - -package util - -import ( - "encoding/json" - "sort" - - jsonpatch "github.com/evanphx/json-patch" - "github.com/pkg/errors" - "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" - "k8s.io/apimachinery/pkg/util/sets" - - fedtypesv1a1 "github.com/kubewharf/kubeadmiral/pkg/apis/types/v1alpha1" - "github.com/kubewharf/kubeadmiral/pkg/controllers/common" -) - -// Namespace and name may not be overridden since these fields are the -// primary mechanism of association between a federated resource in -// the host cluster and the target resources in the member clusters. -// -// Kind should always be sourced from the FTC and not vary across -// member clusters. -// -// apiVersion can be overridden to support managing resources like -// Ingress which can exist in different groups at different -// versions. Users will need to take care not to abuse this -// capability. -var invalidPaths = sets.NewString( - "/metadata/namespace", - "/metadata/name", - "/metadata/generateName", - "/kind", -) - -// Mapping of clusterName to overrides for the cluster -type OverridesMap map[string]fedtypesv1a1.OverridePatches - -func UnmarshalGenericOverrides(uns *unstructured.Unstructured) (*fedtypesv1a1.GenericObjectWithOverrides, error) { - obj := &fedtypesv1a1.GenericObjectWithOverrides{} - err := UnstructuredToInterface(uns, obj) - if err != nil { - return nil, err - } - return obj, nil -} - -// GetOverrides returns a map of overrides populated from the given -// unstructured object. -func GetOverrides(rawObj *unstructured.Unstructured, controller string) (OverridesMap, error) { - overridesMap := make(OverridesMap) - - if rawObj == nil { - return overridesMap, nil - } - - overrideObj, err := UnmarshalGenericOverrides(rawObj) - if err != nil { - return nil, err - } - - if overrideObj.Spec == nil || overrideObj.Spec.Overrides == nil { - // No overrides defined for the federated type - return overridesMap, nil - } - - overrides := overrideObj.Spec.Overrides - var clusterOverrides []fedtypesv1a1.ClusterOverride - for i := range overrides { - if overrides[i].Controller == controller { - clusterOverrides = overrides[i].Clusters - break - } - } - - if clusterOverrides == nil { - return overridesMap, nil - } - - for _, overrideItem := range clusterOverrides { - clusterName := overrideItem.ClusterName - if _, ok := overridesMap[clusterName]; ok { - return nil, errors.Errorf("cluster %q appears more than once", clusterName) - } - - for i, pathEntry := range overrideItem.Patches { - path := pathEntry.Path - if invalidPaths.Has(path) { - return nil, errors.Errorf("override[%d] for cluster %q has an invalid path: %s", i, clusterName, path) - } - } - overridesMap[clusterName] = overrideItem.Patches - } - - return overridesMap, nil -} - -// SetOverrides sets the spec.overrides field of the unstructured -// object from the provided overrides map. -// -// This function takes ownership of the `overridesMap` and may mutate it arbitrarily. -func SetOverrides(uns *unstructured.Unstructured, controller string, overridesMap OverridesMap) error { - for clusterName, clusterOverrides := range overridesMap { - if len(clusterOverrides) == 0 { - delete(overridesMap, clusterName) - } - } - - overrideObj, err := UnmarshalGenericOverrides(uns) - if err != nil { - return err - } - - index := -1 - for i, overrides := range overrideObj.Spec.Overrides { - if overrides.Controller == controller { - index = i - break - } - } - - if len(overridesMap) == 0 { - // delete index - if index != -1 { - overrideObj.Spec.Overrides = append(overrideObj.Spec.Overrides[:index], overrideObj.Spec.Overrides[(index+1):]...) - } - } else { - if index == -1 { - index = len(overrideObj.Spec.Overrides) - overrideObj.Spec.Overrides = append(overrideObj.Spec.Overrides, fedtypesv1a1.ControllerOverride{ - Controller: controller, - }) - } - - overrides := &overrideObj.Spec.Overrides[index] - overrides.Clusters = nil - - // Write in ascending order of cluster names for better readability - clusterNames := make([]string, 0, len(overridesMap)) - for clusterName := range overridesMap { - clusterNames = append(clusterNames, clusterName) - } - sort.Strings(clusterNames) - for _, clusterName := range clusterNames { - clusterOverrides := overridesMap[clusterName] - overrides.Clusters = append(overrides.Clusters, fedtypesv1a1.ClusterOverride{ - ClusterName: clusterName, - Patches: clusterOverrides, - }) - } - } - - overridesUns, err := InterfaceToUnstructured(overrideObj.Spec.Overrides) - if err != nil { - return err - } - - return unstructured.SetNestedField(uns.Object, overridesUns, common.OverridesPath...) -} - -// UnstructuredToInterface converts an unstructured object to the -// provided interface by json marshalling/unmarshalling. -func UnstructuredToInterface(rawObj *unstructured.Unstructured, obj interface{}) error { - content, err := rawObj.MarshalJSON() - if err != nil { - return err - } - return json.Unmarshal(content, obj) -} - -// InterfaceToUnstructured converts the provided object to an -// unstructured by json marshalling/unmarshalling. -func InterfaceToUnstructured(obj interface{}) (ret interface{}, err error) { - var buf []byte - buf, err = json.Marshal(obj) - if err != nil { - return - } - - err = json.Unmarshal(buf, &ret) - return -} - -// ApplyJsonPatch applies the override on to the given unstructured object. -func ApplyJsonPatch(obj *unstructured.Unstructured, overrides fedtypesv1a1.OverridePatches) error { - // TODO: Do the defaulting of "op" field to "replace" in API defaulting - for i, overrideItem := range overrides { - if overrideItem.Op == "" { - overrides[i].Op = "replace" - } - } - jsonPatchBytes, err := json.Marshal(overrides) - if err != nil { - return err - } - - patch, err := jsonpatch.DecodePatch(jsonPatchBytes) - if err != nil { - return err - } - - ObjectJSONBytes, err := obj.MarshalJSON() - if err != nil { - return err - } - - patchedObjectJSONBytes, err := patch.Apply(ObjectJSONBytes) - if err != nil { - return err - } - - err = obj.UnmarshalJSON(patchedObjectJSONBytes) - return err -} diff --git a/pkg/controllers/util/propagatedversion.go b/pkg/controllers/util/propagatedversion.go index 4df3d4c6..77292917 100644 --- a/pkg/controllers/util/propagatedversion.go +++ b/pkg/controllers/util/propagatedversion.go @@ -20,138 +20,138 @@ are Copyright 2023 The KubeAdmiral Authors. package util -import ( - "fmt" - "reflect" - "sort" - "strconv" - "strings" - - "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" - - fedcorev1a1 "github.com/kubewharf/kubeadmiral/pkg/apis/core/v1alpha1" - utilunstructured "github.com/kubewharf/kubeadmiral/pkg/controllers/util/unstructured" -) - -const ( - generationPrefix = "gen:" - resourceVersionPrefix = "rv:" -) - -// ObjectVersion retrieves the field type-prefixed value used for -// determining currency of the given cluster object. -func ObjectVersion(clusterObj *unstructured.Unstructured) string { - generation := clusterObj.GetGeneration() - if generation != 0 { - return fmt.Sprintf("%s%d", generationPrefix, generation) - } - return fmt.Sprintf("%s%s", resourceVersionPrefix, clusterObj.GetResourceVersion()) -} - -// ObjectNeedsUpdate determines whether the 2 objects provided cluster -// object needs to be updated according to the desired object and the -// recorded version. -func ObjectNeedsUpdate( - desiredObj, clusterObj *unstructured.Unstructured, - recordedVersion string, - typeConfig *fedcorev1a1.FederatedTypeConfig, -) bool { - targetVersion := ObjectVersion(clusterObj) - - if recordedVersion != targetVersion { - return true - } - - needUpdate := true - if desiredReplicas, err := utilunstructured.GetInt64FromPath(desiredObj, typeConfig.Spec.PathDefinition.ReplicasSpec, nil); err == nil { - if currentReplicas, err := utilunstructured.GetInt64FromPath(clusterObj, typeConfig.Spec.PathDefinition.ReplicasSpec, nil); err == nil { - if desiredReplicas == nil && currentReplicas == nil || - desiredReplicas != nil && currentReplicas != nil && *desiredReplicas == *currentReplicas { - needUpdate = false - } - } - } - if needUpdate { - return true - } - - needUpdate = true - if desiredMaxSurge, ok, err := unstructured.NestedString(desiredObj.Object, MaxSurgePathSlice...); err == nil { - if currentMaxSurge, ok2, err := unstructured.NestedString(clusterObj.Object, MaxSurgePathSlice...); err == nil && - ok == ok2 { - if desiredMaxSurge == currentMaxSurge { - needUpdate = false - } - } - } else if desiredMaxSurge, ok, err := unstructured.NestedInt64(desiredObj.Object, MaxSurgePathSlice...); err == nil { - if currentMaxSurge, ok2, err := unstructured.NestedInt64(clusterObj.Object, MaxSurgePathSlice...); err == nil && ok == ok2 { - if desiredMaxSurge == currentMaxSurge { - needUpdate = false - } - } - } - if needUpdate { - return true - } - - needUpdate = true - if desiredMaxUnavailable, ok, err := unstructured.NestedString(desiredObj.Object, MaxUnavailablePathSlice...); err == nil { - if currentMaxUnavailable, ok2, err := unstructured.NestedString(clusterObj.Object, MaxUnavailablePathSlice...); err == nil && - ok == ok2 { - if desiredMaxUnavailable == currentMaxUnavailable { - needUpdate = false - } - } - } else if desiredMaxUnavailable, ok, err := unstructured.NestedInt64(desiredObj.Object, MaxUnavailablePathSlice...); err == nil { - if currentMaxUnavailable, ok2, err := unstructured.NestedInt64(clusterObj.Object, MaxUnavailablePathSlice...); err == nil && ok == ok2 { - if desiredMaxUnavailable == currentMaxUnavailable { - needUpdate = false - } - } - } - if needUpdate { - return true - } - // If versions match and the version is sourced from the - // generation field, a further check of metadata equivalency is - // required. - return strings.HasPrefix(targetVersion, generationPrefix) && !ObjectMetaObjEquivalent(desiredObj, clusterObj) -} - -// SortClusterVersions ASCII sorts the given cluster versions slice -// based on cluster name. -func SortClusterVersions(versions []fedcorev1a1.ClusterObjectVersion) { - sort.Slice(versions, func(i, j int) bool { - return versions[i].ClusterName < versions[j].ClusterName - }) -} - -// PropagatedVersionStatusEquivalent returns true if both statuses are equal by -// comparing Template and Override version, and their ClusterVersion slices; -// false otherwise. -func PropagatedVersionStatusEquivalent(pvs1, pvs2 *fedcorev1a1.PropagatedVersionStatus) bool { - return pvs1.TemplateVersion == pvs2.TemplateVersion && - pvs1.OverrideVersion == pvs2.OverrideVersion && - reflect.DeepEqual(pvs1.ClusterVersions, pvs2.ClusterVersions) -} - -func ConvertVersionMapToGenerationMap(versionMap map[string]string) map[string]int64 { - generationMap := make(map[string]int64, len(versionMap)) - for key, version := range versionMap { - if strings.HasPrefix(version, resourceVersionPrefix) { - generationMap[key] = 0 - continue - } - if !strings.HasPrefix(version, generationPrefix) { - continue - } - - generationString := strings.TrimPrefix(version, generationPrefix) - generation, err := strconv.ParseInt(generationString, 10, 64) - if err != nil { - continue - } - generationMap[key] = generation - } - return generationMap -} +// import ( +// "fmt" +// "reflect" +// "sort" +// "strconv" +// "strings" + +// "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + +// fedcorev1a1 "github.com/kubewharf/kubeadmiral/pkg/apis/core/v1alpha1" +// utilunstructured "github.com/kubewharf/kubeadmiral/pkg/controllers/util/unstructured" +// ) + +// const ( +// generationPrefix = "gen:" +// resourceVersionPrefix = "rv:" +// ) + +// // ObjectVersion retrieves the field type-prefixed value used for +// // determining currency of the given cluster object. +// func ObjectVersion(clusterObj *unstructured.Unstructured) string { +// generation := clusterObj.GetGeneration() +// if generation != 0 { +// return fmt.Sprintf("%s%d", generationPrefix, generation) +// } +// return fmt.Sprintf("%s%s", resourceVersionPrefix, clusterObj.GetResourceVersion()) +// } + +// // ObjectNeedsUpdate determines whether the 2 objects provided cluster +// // object needs to be updated according to the desired object and the +// // recorded version. +// func ObjectNeedsUpdate( +// desiredObj, clusterObj *unstructured.Unstructured, +// recordedVersion string, +// typeConfig *fedcorev1a1.FederatedTypeConfig, +// ) bool { +// targetVersion := ObjectVersion(clusterObj) + +// if recordedVersion != targetVersion { +// return true +// } + +// needUpdate := true +// if desiredReplicas, err := utilunstructured.GetInt64FromPath(desiredObj, typeConfig.Spec.PathDefinition.ReplicasSpec, nil); err == nil { +// if currentReplicas, err := utilunstructured.GetInt64FromPath(clusterObj, typeConfig.Spec.PathDefinition.ReplicasSpec, nil); err == nil { +// if desiredReplicas == nil && currentReplicas == nil || +// desiredReplicas != nil && currentReplicas != nil && *desiredReplicas == *currentReplicas { +// needUpdate = false +// } +// } +// } +// if needUpdate { +// return true +// } + +// needUpdate = true +// if desiredMaxSurge, ok, err := unstructured.NestedString(desiredObj.Object, MaxSurgePathSlice...); err == nil { +// if currentMaxSurge, ok2, err := unstructured.NestedString(clusterObj.Object, MaxSurgePathSlice...); err == nil && +// ok == ok2 { +// if desiredMaxSurge == currentMaxSurge { +// needUpdate = false +// } +// } +// } else if desiredMaxSurge, ok, err := unstructured.NestedInt64(desiredObj.Object, MaxSurgePathSlice...); err == nil { +// if currentMaxSurge, ok2, err := unstructured.NestedInt64(clusterObj.Object, MaxSurgePathSlice...); err == nil && ok == ok2 { +// if desiredMaxSurge == currentMaxSurge { +// needUpdate = false +// } +// } +// } +// if needUpdate { +// return true +// } + +// needUpdate = true +// if desiredMaxUnavailable, ok, err := unstructured.NestedString(desiredObj.Object, MaxUnavailablePathSlice...); err == nil { +// if currentMaxUnavailable, ok2, err := unstructured.NestedString(clusterObj.Object, MaxUnavailablePathSlice...); err == nil && +// ok == ok2 { +// if desiredMaxUnavailable == currentMaxUnavailable { +// needUpdate = false +// } +// } +// } else if desiredMaxUnavailable, ok, err := unstructured.NestedInt64(desiredObj.Object, MaxUnavailablePathSlice...); err == nil { +// if currentMaxUnavailable, ok2, err := unstructured.NestedInt64(clusterObj.Object, MaxUnavailablePathSlice...); err == nil && ok == ok2 { +// if desiredMaxUnavailable == currentMaxUnavailable { +// needUpdate = false +// } +// } +// } +// if needUpdate { +// return true +// } +// // If versions match and the version is sourced from the +// // generation field, a further check of metadata equivalency is +// // required. +// return strings.HasPrefix(targetVersion, generationPrefix) && !ObjectMetaObjEquivalent(desiredObj, clusterObj) +// } + +// // SortClusterVersions ASCII sorts the given cluster versions slice +// // based on cluster name. +// func SortClusterVersions(versions []fedcorev1a1.ClusterObjectVersion) { +// sort.Slice(versions, func(i, j int) bool { +// return versions[i].ClusterName < versions[j].ClusterName +// }) +// } + +// // PropagatedVersionStatusEquivalent returns true if both statuses are equal by +// // comparing Template and Override version, and their ClusterVersion slices; +// // false otherwise. +// func PropagatedVersionStatusEquivalent(pvs1, pvs2 *fedcorev1a1.PropagatedVersionStatus) bool { +// return pvs1.TemplateVersion == pvs2.TemplateVersion && +// pvs1.OverrideVersion == pvs2.OverrideVersion && +// reflect.DeepEqual(pvs1.ClusterVersions, pvs2.ClusterVersions) +// } + +// func ConvertVersionMapToGenerationMap(versionMap map[string]string) map[string]int64 { +// generationMap := make(map[string]int64, len(versionMap)) +// for key, version := range versionMap { +// if strings.HasPrefix(version, resourceVersionPrefix) { +// generationMap[key] = 0 +// continue +// } +// if !strings.HasPrefix(version, generationPrefix) { +// continue +// } + +// generationString := strings.TrimPrefix(version, generationPrefix) +// generation, err := strconv.ParseInt(generationString, 10, 64) +// if err != nil { +// continue +// } +// generationMap[key] = generation +// } +// return generationMap +// } diff --git a/pkg/controllers/util/rolloutplan.go b/pkg/controllers/util/rolloutplan.go index 7f777770..7e96c589 100644 --- a/pkg/controllers/util/rolloutplan.go +++ b/pkg/controllers/util/rolloutplan.go @@ -16,852 +16,852 @@ limitations under the License. package util -import ( - "fmt" - "sort" - "strconv" - "strings" - - "github.com/pkg/errors" - "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" - intstrutil "k8s.io/apimachinery/pkg/util/intstr" - "k8s.io/klog/v2" - - fedcorev1a1 "github.com/kubewharf/kubeadmiral/pkg/apis/core/v1alpha1" - fedtypesv1a1 "github.com/kubewharf/kubeadmiral/pkg/apis/types/v1alpha1" - "github.com/kubewharf/kubeadmiral/pkg/controllers/common" - utilunstructured "github.com/kubewharf/kubeadmiral/pkg/controllers/util/unstructured" -) - -const ( - ReplicaPath = "/spec/replicas" - MaxSurgePath = "/spec/strategy/rollingUpdate/maxSurge" - MaxUnavailablePath = "/spec/strategy/rollingUpdate/maxUnavailable" - Nil = "nil" -) - -var ( - MaxSurgePathSlice = []string{ - common.SpecField, - common.StrategyField, - common.RollingUpdateField, - common.MaxSurgeField, - } - MaxUnavailablePathSlice = []string{ - common.SpecField, - common.StrategyField, - common.RollingUpdateField, - common.MaxUnavailableField, - } -) - -type RolloutPlan struct { - Replicas *int32 - MaxSurge *int32 - MaxUnavailable *int32 - OnlyPatchReplicas bool -} - -func (p RolloutPlan) String() string { - r, s, u := Nil, Nil, Nil - if p.Replicas != nil { - r = fmt.Sprintf("%d", *p.Replicas) - } - if p.MaxSurge != nil { - s = fmt.Sprintf("%d", *p.MaxSurge) - } - if p.MaxUnavailable != nil { - u = fmt.Sprintf("%d", *p.MaxUnavailable) - } - return fmt.Sprintf("%s,%s,%s,%t", r, s, u, p.OnlyPatchReplicas) -} - -func (p RolloutPlan) toOverrides() fedtypesv1a1.OverridePatches { - overrides := fedtypesv1a1.OverridePatches{} - if p.Replicas != nil { - overrides = append(overrides, fedtypesv1a1.OverridePatch{Path: ReplicaPath, Value: *p.Replicas}) - } - if p.MaxSurge != nil { - overrides = append(overrides, fedtypesv1a1.OverridePatch{Path: MaxSurgePath, Value: *p.MaxSurge}) - } - if p.MaxUnavailable != nil { - overrides = append(overrides, fedtypesv1a1.OverridePatch{Path: MaxUnavailablePath, Value: *p.MaxUnavailable}) - } - return overrides -} - -func (p *RolloutPlan) correctFencepost(t *TargetInfo, defaultIsSurge bool) { - completed := t.UpdateCompleted() - isSurge := t.IsSurge() - flip := t.Flip(defaultIsSurge) - - if completed && !flip { - // If the new replica set is saturated, set maxSurge & maxUnavailable to the final value. - // If there are unavailable instances in the new replica set, they will be part of maxUnavailable - p.MaxSurge = nil - p.MaxUnavailable = nil - } else if *p.MaxSurge == 0 && *p.MaxUnavailable == 0 { - // Like deployment controller, we set one of them to one if both maxSurge & maxUnavailable is zero - var one int32 = 1 - if isSurge { - p.MaxSurge = &one - } else { - p.MaxUnavailable = &one - } - } -} - -type RolloutPlans map[string]*RolloutPlan - -func (r RolloutPlans) String() string { - var strs []string - for k, v := range r { - strs = append(strs, fmt.Sprintf("%s:%v", k, v)) - } - return strings.Join(strs, "; ") -} - -func (r RolloutPlans) GetRolloutOverrides(clusterName string) fedtypesv1a1.OverridePatches { - p, ok := r[clusterName] - if !ok { - return fedtypesv1a1.OverridePatches{} - } - return p.toOverrides() -} - -type Targets []*TargetInfo - -func (s Targets) CurrentReplicas() int32 { - var currentReplicas int32 - for _, t := range s { - currentReplicas += t.Status.Replicas - } - return currentReplicas -} - -func (s Targets) DesiredReplicas() int32 { - var desiredReplicas int32 - for _, t := range s { - desiredReplicas += t.DesiredReplicas - } - return desiredReplicas -} - -func (s Targets) AvailableReplicas() int32 { - var totalAvailable int32 - for _, t := range s { - totalAvailable += t.Status.AvailableReplicas - } - return totalAvailable -} - -func (s Targets) ActualReplicas() int32 { - var totalActual int32 - for _, t := range s { - totalActual += t.Status.ActualReplicas - } - return totalActual -} - -type TargetStatus struct { - Replicas int32 // dp.Spec.Replicas - ActualReplicas int32 // dp.Status.Replicas - AvailableReplicas int32 // dp.Status.AvailableReplicas - UpdatedReplicas int32 // latestreplicaset.kubeadmiral.io/replicas if it's up-to-date, else 0 - UpdatedAvailableReplicas int32 // latestreplicaset.kubeadmiral.io/available-replicas if it's up-to-date, else 0 - CurrentNewReplicas int32 // the replicas of new replicaset which belong to current deployment - CurrentNewAvailableReplicas int32 // the available replicas of new replicaset which belong to current deployment - Updated bool // whether pod template is up to date in current dp with which in fedDp - MaxSurge int32 // maxSurge in current dp - MaxUnavailable int32 // maxUnavailable in current dp -} - -type TargetInfo struct { - ClusterName string - Status TargetStatus - DesiredReplicas int32 -} - -func (t *TargetInfo) String() string { - return fmt.Sprintf("%s:%d->%d,%d/%d,%d/%d,%d/%d,%d,%d,%t", t.ClusterName, t.Status.Replicas, t.DesiredReplicas, - t.Status.UpdatedAvailableReplicas, t.Status.UpdatedReplicas, - t.Status.CurrentNewAvailableReplicas, t.Status.CurrentNewReplicas, - t.Status.AvailableReplicas, t.Status.ActualReplicas, - t.Status.MaxSurge, t.Status.MaxUnavailable, t.Status.Updated) -} - -func (t *TargetInfo) MaxSurge(maxSurge, leastSurge int32) (int32, int32) { - res := Int32Min(maxSurge+leastSurge, t.ReplicasToUpdate()) - if res < 0 { - res = 0 - } - more := res - leastSurge - // impossible in normal cases - // normalize to zero to get a more strict plan, try the best to correct the unexpected situation - if more < 0 { - more = 0 - } - if maxSurge < 0 && leastSurge > t.Status.MaxSurge && res > t.Status.MaxSurge { - res = t.Status.MaxSurge - } - return res, more -} - -func (t *TargetInfo) MaxUnavailable(maxUnavailable, leastUnavailable int32) (int32, int32) { - res := Int32Min(maxUnavailable+leastUnavailable, t.ReplicasToUpdatedAvailable()) - if res < 0 { - res = 0 - } - more := res - leastUnavailable - // impossible in normal cases - // normalize to zero to get a more strict plan, try the best to correct the unexpected situation - if more < 0 { - more = 0 - } - if maxUnavailable < 0 && leastUnavailable > t.Status.MaxUnavailable && res > t.Status.MaxUnavailable { - res = t.Status.MaxUnavailable - } - return res, more -} - -func (t *TargetInfo) MaxScaleOut(maxScaleOut, leastSurge int32) (int32, int32) { - res := Int32Min(maxScaleOut+leastSurge, t.DesiredReplicas-t.Status.Replicas) - if res < 0 { - res = 0 - } - more := res - leastSurge - if more < 0 { - more = 0 - } - return res, more -} - -func (t *TargetInfo) MaxScaleIn(maxScaleIn, leastUnavailable int32) (int32, int32) { - res := Int32Min(maxScaleIn+leastUnavailable, t.Status.Replicas-t.DesiredReplicas) - // impossible - if res > t.Status.Replicas { - res = t.Status.Replicas - } - if res < 0 { - res = 0 - } - more := res - leastUnavailable - if more < 0 { - more = 0 - } - return res, more -} - -func (t *TargetInfo) LeastSurge() int32 { - res := t.Status.ActualReplicas - t.Status.Replicas - if res < 0 { - res = 0 - } - if !t.DuringUpdating() { - return res - } - return Int32Max(res, Int32Min(t.Status.MaxSurge, res+t.ReplicasToUpdateCurrently())) -} - -func (t *TargetInfo) LeastUnavailable() int32 { - res := t.Status.Replicas - t.Status.AvailableReplicas - if res < 0 { - res = 0 - } - if !t.DuringUpdating() { - return res - } - return Int32Max(res, Int32Min(t.Status.MaxUnavailable, t.ReplicasToUpdatedAvailableCurrently())) -} - -func (t *TargetInfo) ReplicasToUpdate() int32 { - res := t.Status.Replicas - t.Status.UpdatedReplicas - if res < 0 { - res = 0 - } - return res -} - -func (t *TargetInfo) ReplicasToUpdatedAvailable() int32 { - res := t.Status.Replicas - t.Status.UpdatedAvailableReplicas - if res < 0 { - res = 0 - } - return res -} - -func (t *TargetInfo) ReplicasToUpdateCurrently() int32 { - res := t.Status.Replicas - t.Status.CurrentNewReplicas - if res < 0 { - res = 0 - } - return res -} - -func (t *TargetInfo) ReplicasToUpdatedAvailableCurrently() int32 { - res := t.Status.Replicas - t.Status.CurrentNewAvailableReplicas - if res < 0 { - res = 0 - } - return res -} - -func (t *TargetInfo) DuringUpdating() bool { - // todo: only return t.Status.CurrentNewReplicas < t.Status.Replicas after we get the real currentNewReplicas - if t.Status.CurrentNewReplicas < t.Status.Replicas { - return true - } - if t.Status.Updated && t.ReplicasToUpdate() > 0 { - return true - } - return false -} - -func (t *TargetInfo) UpdateCompleted() bool { - return t.ReplicasToUpdate() == 0 -} - -func (t *TargetInfo) IsSurge() bool { - return t.Status.MaxSurge != 0 && t.Status.MaxUnavailable == 0 -} - -func (t *TargetInfo) Flip(defaultIsSurge bool) bool { - // a temporary fix to avoid unexpected flipping - // todo: avoiding this nasty judgment by restricting the replicas changes to be used only for scaling - return t.IsSurge() && !defaultIsSurge && t.ReplicasToUpdatedAvailable() > 0 -} - -func (t *TargetInfo) SkipPlanForUpdate(maxSurge, maxUnavailable int32) bool { - return maxSurge <= 0 && maxUnavailable <= 0 && !t.Status.Updated && !t.DuringUpdating() && t.LeastSurge() <= 0 && - t.LeastUnavailable() <= 0 -} - -func (t *TargetInfo) SkipPlanForUpdateForThoseToScaleIn(maxSurge, maxUnavailable, leastUnavailable int32) bool { - if maxSurge <= 0 && maxUnavailable <= 0 && !t.Status.Updated && !t.DuringUpdating() { - if leastUnavailable > 0 { - return false - } - leastSurge := t.LeastSurge() - if t.DesiredReplicas < t.Status.Replicas { - leastSurge = 0 - } - if leastSurge > 0 { - return false - } - return true - } - return false -} - -func (t *TargetInfo) SkipPlanForScaleIn(maxUnavailable int32) bool { - return maxUnavailable <= 0 && t.LeastUnavailable() <= 0 -} - -func (t *TargetInfo) SkipPlanForScaleOut(maxSurge int32) bool { - return maxSurge <= 0 && t.LeastSurge() <= 0 -} - -func unstructuredObjToTargetInfo(clusterName string, unstructuredObj *unstructured.Unstructured, desiredReplicas int32, - desiredRevision string, typeConfig *fedcorev1a1.FederatedTypeConfig, -) (*TargetInfo, error) { - if unstructuredObj == nil { - return &TargetInfo{ - ClusterName: clusterName, - DesiredReplicas: desiredReplicas, - }, nil - } - - replicas, err := utilunstructured.GetInt64FromPath(unstructuredObj, typeConfig.Spec.PathDefinition.ReplicasSpec, nil) - if err != nil || replicas == nil { - return nil, errors.Errorf("failed to retrieve replicas, err: %v", err) - } - maxSurge, maxUnavailable, err := RetrieveFencepost( - unstructuredObj, - MaxSurgePathSlice, - MaxUnavailablePathSlice, - int32(*replicas), - ) - if err != nil { - return nil, errors.Wrap(err, "failed to retrieve fencepost") - } - revision, ok := unstructuredObj.GetAnnotations()[common.CurrentRevisionAnnotation] - if !ok { - return nil, errors.Errorf("failed to retrieve annotation %s", common.CurrentRevisionAnnotation) - } - // consider it has been updated as long as the template is updated. We don't wait for the refresh of - // latestreplicaset annotations since the latency due to asynchronous updates may bring some problems - updated := revision == desiredRevision - currentNewReplicas, currentNewAvailableReplicas, err := retrieveNewReplicaSetInfo(unstructuredObj) - if err != nil { - return nil, errors.Wrap(err, "failed to retrieve new replicaSet info") - } - - updatedReplicas, updatedAvailableReplicas := currentNewReplicas, currentNewAvailableReplicas - if !updated { - updatedReplicas, updatedAvailableReplicas = 0, 0 - } - - actualReplicasOption, err := utilunstructured.GetInt64FromPath( - unstructuredObj, - typeConfig.Spec.PathDefinition.ReplicasSpec, - nil, - ) - if err != nil { - return nil, errors.Wrap(err, "failed to retrieve actual replicas") - } - var actualReplicas int32 - if actualReplicasOption != nil { - actualReplicas = int32(*actualReplicasOption) - } - - availableReplicasOption, err := utilunstructured.GetInt64FromPath( - unstructuredObj, - typeConfig.Spec.PathDefinition.AvailableReplicasStatus, - nil, - ) - if err != nil { - return nil, errors.Wrap(err, "failed to retrieve actual available replicas") - } - var availableReplicas int32 - if availableReplicasOption != nil { - availableReplicas = int32(*availableReplicasOption) - } - - t := &TargetInfo{ - ClusterName: clusterName, - Status: TargetStatus{ - Replicas: int32(*replicas), - ActualReplicas: actualReplicas, - AvailableReplicas: availableReplicas, - UpdatedReplicas: updatedReplicas, - UpdatedAvailableReplicas: updatedAvailableReplicas, - CurrentNewReplicas: currentNewReplicas, - CurrentNewAvailableReplicas: currentNewAvailableReplicas, - Updated: updated, - MaxSurge: maxSurge, - MaxUnavailable: maxUnavailable, - }, - DesiredReplicas: desiredReplicas, - } - return t, nil -} - -type RolloutPlanner struct { - typeConfig *fedcorev1a1.FederatedTypeConfig - Key string - Targets Targets - MaxSurge int32 - MaxUnavailable int32 - Replicas int32 - Revision string -} - -func NewRolloutPlanner( - key string, - typeConfig *fedcorev1a1.FederatedTypeConfig, - federatedResource *unstructured.Unstructured, - replicas int32, -) (*RolloutPlanner, error) { - pathPrefix := []string{common.SpecField, common.TemplateField} - maxSurgePath := append(pathPrefix, MaxSurgePathSlice...) - maxUnavailablePath := append(pathPrefix, MaxUnavailablePathSlice...) - maxSurge, maxUnavailable, err := RetrieveFencepost(federatedResource, maxSurgePath, maxUnavailablePath, replicas) - if err != nil { - return nil, errors.Wrap(err, "failed to retrieve maxSurge or maxUnavailable from federated resource") - } - desiredRevision, ok := federatedResource.GetAnnotations()[common.CurrentRevisionAnnotation] - if !ok { - return nil, errors.Errorf( - "failed to retrieve annotation %s from federated resource", - common.CurrentRevisionAnnotation, - ) - } - return &RolloutPlanner{ - typeConfig: typeConfig, - Key: key, - MaxSurge: maxSurge, - MaxUnavailable: maxUnavailable, - Replicas: replicas, - Revision: desiredRevision, - }, nil -} - -func (p *RolloutPlanner) RegisterTarget( - clusterName string, - targetObj *unstructured.Unstructured, - desiredReplicas int32, -) error { - t, err := unstructuredObjToTargetInfo(clusterName, targetObj, desiredReplicas, p.Revision, p.typeConfig) - if err != nil { - return err - } - p.Targets = append(p.Targets, t) - return nil -} - -func (p *RolloutPlanner) IsScalingEvent() bool { - _, targetsToScaleOut, targetsToScaleIn := sortTargets(p.Targets) - // create / scale out / scale in - if len(targetsToScaleOut) != 0 && len(targetsToScaleIn) != 0 { - return false - } - if len(targetsToScaleOut) == 0 && len(targetsToScaleIn) == 0 { - return false - } - for _, t := range p.Targets { - if !t.UpdateCompleted() { - return false - } - if t.Flip(p.IsSurge()) { - return false - } - } - return true -} - -func (p *RolloutPlanner) PlanScale() RolloutPlans { - plans := make(map[string]*RolloutPlan) - for _, t := range p.Targets { - plans[t.ClusterName] = &RolloutPlan{} - } - return plans -} - -func (p *RolloutPlanner) String() string { - var ts []string - for _, t := range p.Targets { - ts = append(ts, fmt.Sprintf("%v", t)) - } - return fmt.Sprintf("%s[%d,%d,%d,%s]: %v", - p.Key, p.Replicas, p.MaxSurge, p.MaxUnavailable, p.Revision, strings.Join(ts, "; ")) -} - -func (p *RolloutPlanner) RemainingMaxSurge() int32 { - // maxSurge := p.Replicas + p.MaxSurge - p.Targets.ActualReplicas() - // maxSurge := p.MaxSurge - (p.Targets.ActualReplicas() - p.Replicas) - var replicas, occupied int32 - for _, t := range p.Targets { - replicas += t.Status.Replicas - occupied += t.LeastSurge() - } - return p.MaxSurge - (replicas - p.Replicas) - occupied -} - -func (p *RolloutPlanner) RemainingMaxUnavailable() int32 { - // maxUnavailable := p.Targets.AvailableReplicas() - (p.Replicas - p.MaxUnavailable) - // maxUnavailable := p.MaxUnavailable - (p.Replicas - p.Targets.AvailableReplicas()) - var replicas, occupied int32 - for _, t := range p.Targets { - replicas += t.Status.Replicas - occupied += t.LeastUnavailable() - } - return p.MaxUnavailable - (p.Replicas - replicas) - occupied -} - -func (p *RolloutPlanner) IsSurge() bool { - return p.MaxSurge != 0 && p.MaxUnavailable == 0 -} - -// ComputeRolloutPlans compute maxUnavailable, maxSurge, replicas during rollout process. It returns a map that -// contains all the targets which are planned according to current status. Nil in a plan means the corresponding field -// won't be overridden by the rollout plan and should be set with the original value. If there's no plan for a target, -// it means "don't rollout it, it should wait for it's turn". -func (p *RolloutPlanner) Plan() RolloutPlans { - targetsToUpdate, targetsToScaleOut, targetsToScaleIn := sortTargets(p.Targets) - plans := make(map[string]*RolloutPlan) - - if p.IsScalingEvent() { - return p.PlanScale() - } - - // the remaining maxSurge & maxUnavailable that can be dispatched to deployments. If there are clusters that are - // not ready, or that we failed to get deployment from, the maxSurge/maxUnavailble will be increased/decreased - maxSurge, maxUnavailable := p.RemainingMaxSurge(), p.RemainingMaxUnavailable() - - // execution sequence (try to upgrade before scale out and scale in before upgrade): - // 1. upgrade targets waiting to be scaled out - // 2. scale in targets waiting to be scaled in - // 3. upgrade targets that only need to be upgraded - // 4. scale out targets waiting to be scaled out - // 5. upgrade targets waiting to be scaled in - for _, t := range targetsToScaleOut { - if t.SkipPlanForUpdate(maxSurge, maxUnavailable) { - continue - } - s, sm := t.MaxSurge(maxSurge, t.LeastSurge()) - u, um := t.MaxUnavailable(maxUnavailable, t.LeastUnavailable()) - maxSurge -= sm - maxUnavailable -= um - r := t.Status.Replicas - plan := &RolloutPlan{Replicas: &r} - plan.MaxSurge = &s - plan.MaxUnavailable = &u - plan.correctFencepost(t, p.IsSurge()) - plans[t.ClusterName] = plan - } - - for _, t := range targetsToScaleIn { - if t.SkipPlanForScaleIn(maxUnavailable) { - continue - } - // we tend to scale in those that are already unavailable - leastUnavailable := t.LeastUnavailable() - if t.DuringUpdating() { - // if it' during updating (for example, the maxUnavailable is enough for scale in and updating coming next, - // so we set the replica and maxUnavailable; but a fed weight adjusting followed so we have to scale in again - // even though it's being updated), scaling will be performed proportionally and may not cover the - // unavailable instances as expected. - leastUnavailable = 0 - } - scale, more := t.MaxScaleIn(maxUnavailable, leastUnavailable) - maxUnavailable -= more - plan := &RolloutPlan{OnlyPatchReplicas: true} - r := t.Status.Replicas - scale - plan.Replicas = &r - plans[t.ClusterName] = plan - } - - for _, t := range targetsToUpdate { - if t.SkipPlanForUpdate(maxSurge, maxUnavailable) { - continue - } - s, sm := t.MaxSurge(maxSurge, t.LeastSurge()) - u, um := t.MaxUnavailable(maxUnavailable, t.LeastUnavailable()) - maxSurge -= sm - maxUnavailable -= um - plan := &RolloutPlan{} - plan.MaxSurge = &s - plan.MaxUnavailable = &u - plan.correctFencepost(t, p.IsSurge()) - plans[t.ClusterName] = plan - } - - for _, t := range targetsToScaleOut { - if t.SkipPlanForScaleOut(maxSurge) { - continue - } - // make sure new rs exists to avoid too much unnecessary work - if !t.Status.Updated && t.Status.Replicas != 0 { - continue - } - leastSurge := t.LeastSurge() - if t.DuringUpdating() { - leastSurge = 0 - } - scale, more := t.MaxScaleOut(maxSurge, leastSurge) - maxSurge -= more - plan, ok := plans[t.ClusterName] - if !ok || plan == nil { - plan = &RolloutPlan{} - } - r := t.Status.Replicas + scale - plan.Replicas = &r - plans[t.ClusterName] = plan - } - - for _, t := range targetsToScaleIn { - plan, ok := plans[t.ClusterName] - if !ok || plan == nil { - r := t.Status.Replicas - plan = &RolloutPlan{Replicas: &r} - } - // we have already scale in some unavailable instances in the second step, exclude them - leastUnavailable := t.LeastUnavailable() - if !t.DuringUpdating() { - leastUnavailable -= t.Status.Replicas - *plan.Replicas - if leastUnavailable < 0 { - leastUnavailable = 0 - } - } - if t.SkipPlanForUpdateForThoseToScaleIn(maxSurge, maxUnavailable, leastUnavailable) { - continue - } - - plan.OnlyPatchReplicas = false - s, sm := t.MaxSurge(maxSurge, t.LeastSurge()) - u, um := t.MaxUnavailable(maxUnavailable, leastUnavailable) - maxSurge -= sm - maxUnavailable -= um - plan.MaxSurge = &s - plan.MaxUnavailable = &u - plan.correctFencepost(t, p.IsSurge()) - plans[t.ClusterName] = plan - } - if err := validatePlans(p, plans); err != nil { - klog.Errorf("Failed to generate rollout plan for %s: %v. Current status: %s", p.Key, err, p) - return RolloutPlans{} - } - return plans -} - -func sortTargets(targets []*TargetInfo) ([]*TargetInfo, []*TargetInfo, []*TargetInfo) { - // sort the list to first update the targets that are already in update process - sort.Slice(targets, func(i, j int) bool { - return targets[i].ClusterName < targets[j].ClusterName - }) - var toUpdate, toScaleOut, toScaleIn []*TargetInfo - for _, t := range targets { - change := t.DesiredReplicas - t.Status.Replicas - switch { - case change < 0: - toScaleIn = append(toScaleIn, t) - case change > 0: - toScaleOut = append(toScaleOut, t) - default: - toUpdate = append(toUpdate, t) - } - } - return toUpdate, toScaleOut, toScaleIn -} - -func Int32Min(a, b int32) int32 { - if b < a { - return b - } - return a -} - -func Int32Max(a, b int32) int32 { - if b > a { - return b - } - return a -} - -func resolveFenceposts(maxSurge, maxUnavailable *intstrutil.IntOrString, desired int32) (int32, int32, error) { - surge, err := intstrutil.GetValueFromIntOrPercent( - intstrutil.ValueOrDefault(maxSurge, intstrutil.FromInt(0)), - int(desired), - true, - ) - if err != nil { - return 0, 0, err - } - unavailable, err := intstrutil.GetValueFromIntOrPercent( - intstrutil.ValueOrDefault(maxUnavailable, intstrutil.FromInt(0)), - int(desired), - false, - ) - if err != nil { - return 0, 0, err - } - - if surge == 0 && unavailable == 0 { - // Validation should never allow the user to explicitly use zero values for both maxSurge - // maxUnavailable. Due to rounding down maxUnavailable though, it may resolve to zero. - // If both fenceposts resolve to zero, then we should set maxUnavailable to 1 on the - // theory that surge might not work due to quota. - unavailable = 1 - } - - return int32(surge), int32(unavailable), nil -} - -func RetrieveFencepost(unstructuredObj *unstructured.Unstructured, maxSurgePath []string, maxUnavailablePath []string, - replicas int32, -) (int32, int32, error) { - var maxSurge, maxUnavailable *intstrutil.IntOrString - if ms, ok, err := unstructured.NestedString(unstructuredObj.Object, maxSurgePath...); ok && err == nil { - maxSurge = &intstrutil.IntOrString{Type: intstrutil.String, StrVal: ms} - } else { - if ms, ok, err2 := unstructured.NestedInt64(unstructuredObj.Object, maxSurgePath...); ok && err2 == nil { - maxSurge = &intstrutil.IntOrString{Type: intstrutil.Int, IntVal: int32(ms)} - } else { - klog.V(4).Infof("Failed to retrieve maxSurge from %s/%s: %v, %v", - unstructuredObj.GetNamespace(), unstructuredObj.GetName(), err, err2) - } - } - if mu, ok, err := unstructured.NestedString(unstructuredObj.Object, maxUnavailablePath...); ok && err == nil { - maxUnavailable = &intstrutil.IntOrString{Type: intstrutil.String, StrVal: mu} - } else { - if mu, ok, err2 := unstructured.NestedInt64(unstructuredObj.Object, maxUnavailablePath...); ok && err2 == nil { - maxUnavailable = &intstrutil.IntOrString{Type: intstrutil.Int, IntVal: int32(mu)} - } else { - klog.V(4).Infof("Failed to retrieve maxUnavailable from %s/%s: %v, %v", - unstructuredObj.GetNamespace(), unstructuredObj.GetName(), err, err2) - } - } - - ms, mu, err := resolveFenceposts(maxSurge, maxUnavailable, replicas) - if err != nil { - return 0, 0, err - } - if ms < 0 { - ms = 0 - } - if mu < 0 { - mu = 0 - } - return ms, mu, nil -} - -func retrieveNewReplicaSetInfo(unstructuredObj *unstructured.Unstructured) (int32, int32, error) { - ann, ok := unstructuredObj.GetAnnotations()[LatestReplicasetReplicasAnnotation] - if !ok || ann == "" { - return 0, 0, errors.Errorf("missing annotation %s", LatestReplicasetReplicasAnnotation) - } - replicas, err := strconv.ParseInt(ann, 10, 32) - if err != nil { - return 0, 0, err - } - ann, ok = unstructuredObj.GetAnnotations()[LatestReplicasetAvailableReplicasAnnotation] - if !ok || ann == "" { - return 0, 0, errors.Errorf( - "missing annotation %s", LatestReplicasetAvailableReplicasAnnotation) - } - availableReplicas, err := strconv.ParseInt(ann, 10, 32) - if err != nil { - return 0, 0, err - } - // todo: make sure the latestreplicaset annotations describe the current pod template of deployment - // a simple way to tell if the latestreplicaset annotations is up to date with current deployment. - lastRsName, lastRsNameExists := unstructuredObj.GetAnnotations()[common.LastReplicasetName] - rsName, rsNameExists := unstructuredObj.GetAnnotations()[LatestReplicasetNameAnnotation] - if !rsNameExists { - return 0, 0, errors.Errorf("missing annotation %s", LatestReplicasetNameAnnotation) - } - rsNameOutdated := lastRsNameExists && rsNameExists && lastRsName == rsName - if rsNameOutdated { - // paused=true may also result in this situation - replicas, availableReplicas = 0, 0 - } - return int32(replicas), int32(availableReplicas), nil -} - -func validatePlans(p *RolloutPlanner, plans RolloutPlans) error { - var planned, desired, current, maxUnavailable int32 - for _, t := range p.Targets { - desired += t.DesiredReplicas - cluster := t.ClusterName - r := t.Status.Replicas - current += r - if p, ok := plans[cluster]; ok { - if p == nil { - return errors.Errorf("invalid plan for %s: %v", cluster, p) - } - if p.Replicas != nil { - r = *p.Replicas - } else { - r = t.DesiredReplicas - } - if p.MaxUnavailable != nil { - if p.MaxSurge == nil || *p.MaxSurge != 0 || *p.MaxUnavailable != 1 { - maxUnavailable += *p.MaxUnavailable - } - } - } - planned += r - } - if p.Replicas-desired > p.MaxUnavailable { - return errors.Errorf("desired replicas deviates too much from the initial replicas, maybe some " + - "clusters are not ready") - } - l, h := desired, current - if desired > current { - l, h = current, desired - } - if l-planned > p.MaxUnavailable || planned-h > p.MaxSurge { - return errors.Errorf("invalid plan: %v", plans) - } - return nil -} +// import ( +// "fmt" +// "sort" +// "strconv" +// "strings" + +// "github.com/pkg/errors" +// v1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" +// "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" +// intstrutil "k8s.io/apimachinery/pkg/util/intstr" +// "k8s.io/klog/v2" + +// fedcorev1a1 "github.com/kubewharf/kubeadmiral/pkg/apis/core/v1alpha1" +// "github.com/kubewharf/kubeadmiral/pkg/controllers/common" +// utilunstructured "github.com/kubewharf/kubeadmiral/pkg/controllers/util/unstructured" +// ) + +// const ( +// ReplicaPath = "/spec/replicas" +// MaxSurgePath = "/spec/strategy/rollingUpdate/maxSurge" +// MaxUnavailablePath = "/spec/strategy/rollingUpdate/maxUnavailable" +// Nil = "nil" +// ) + +// var ( +// MaxSurgePathSlice = []string{ +// common.SpecField, +// common.StrategyField, +// common.RollingUpdateField, +// common.MaxSurgeField, +// } +// MaxUnavailablePathSlice = []string{ +// common.SpecField, +// common.StrategyField, +// common.RollingUpdateField, +// common.MaxUnavailableField, +// } +// ) + +// type RolloutPlan struct { +// Replicas *int64 +// MaxSurge *int64 +// MaxUnavailable *int64 +// OnlyPatchReplicas bool +// } + +// func (p RolloutPlan) String() string { +// r, s, u := Nil, Nil, Nil +// if p.Replicas != nil { +// r = fmt.Sprintf("%d", *p.Replicas) +// } +// if p.MaxSurge != nil { +// s = fmt.Sprintf("%d", *p.MaxSurge) +// } +// if p.MaxUnavailable != nil { +// u = fmt.Sprintf("%d", *p.MaxUnavailable) +// } +// return fmt.Sprintf("%s,%s,%s,%t", r, s, u, p.OnlyPatchReplicas) +// } + +// func (p RolloutPlan) toOverrides() fedcorev1a1.OverridePatches { +// overrides := fedcorev1a1.OverridePatches{} +// if p.Replicas != nil { +// overrides = append(overrides, fedcorev1a1.OverridePatch{Path: ReplicaPath, Value: v1.JSON{Raw: []byte(*p.Replicas)}}) +// } +// if p.MaxSurge != nil { +// overrides = append(overrides, fedcorev1a1.OverridePatch{Path: MaxSurgePath, Value: *p.MaxSurge}) +// } +// if p.MaxUnavailable != nil { +// overrides = append(overrides, fedcorev1a1.OverridePatch{Path: MaxUnavailablePath, Value: *p.MaxUnavailable}) +// } +// return overrides +// } + +// func (p *RolloutPlan) correctFencepost(t *TargetInfo, defaultIsSurge bool) { +// completed := t.UpdateCompleted() +// isSurge := t.IsSurge() +// flip := t.Flip(defaultIsSurge) + +// if completed && !flip { +// // If the new replica set is saturated, set maxSurge & maxUnavailable to the final value. +// // If there are unavailable instances in the new replica set, they will be part of maxUnavailable +// p.MaxSurge = nil +// p.MaxUnavailable = nil +// } else if *p.MaxSurge == 0 && *p.MaxUnavailable == 0 { +// // Like deployment controller, we set one of them to one if both maxSurge & maxUnavailable is zero +// var one int32 = 1 +// if isSurge { +// p.MaxSurge = &one +// } else { +// p.MaxUnavailable = &one +// } +// } +// } + +// type RolloutPlans map[string]*RolloutPlan + +// func (r RolloutPlans) String() string { +// var strs []string +// for k, v := range r { +// strs = append(strs, fmt.Sprintf("%s:%v", k, v)) +// } +// return strings.Join(strs, "; ") +// } + +// func (r RolloutPlans) GetRolloutOverrides(clusterName string) fedcorev1a1.OverridePatches { +// p, ok := r[clusterName] +// if !ok { +// return fedcorev1a1.OverridePatches{} +// } +// return p.toOverrides() +// } + +// type Targets []*TargetInfo + +// func (s Targets) CurrentReplicas() int32 { +// var currentReplicas int32 +// for _, t := range s { +// currentReplicas += t.Status.Replicas +// } +// return currentReplicas +// } + +// func (s Targets) DesiredReplicas() int32 { +// var desiredReplicas int32 +// for _, t := range s { +// desiredReplicas += t.DesiredReplicas +// } +// return desiredReplicas +// } + +// func (s Targets) AvailableReplicas() int32 { +// var totalAvailable int32 +// for _, t := range s { +// totalAvailable += t.Status.AvailableReplicas +// } +// return totalAvailable +// } + +// func (s Targets) ActualReplicas() int32 { +// var totalActual int32 +// for _, t := range s { +// totalActual += t.Status.ActualReplicas +// } +// return totalActual +// } + +// type TargetStatus struct { +// Replicas int32 // dp.Spec.Replicas +// ActualReplicas int32 // dp.Status.Replicas +// AvailableReplicas int32 // dp.Status.AvailableReplicas +// UpdatedReplicas int32 // latestreplicaset.kubeadmiral.io/replicas if it's up-to-date, else 0 +// UpdatedAvailableReplicas int32 // latestreplicaset.kubeadmiral.io/available-replicas if it's up-to-date, else 0 +// CurrentNewReplicas int32 // the replicas of new replicaset which belong to current deployment +// CurrentNewAvailableReplicas int32 // the available replicas of new replicaset which belong to current deployment +// Updated bool // whether pod template is up to date in current dp with which in fedDp +// MaxSurge int32 // maxSurge in current dp +// MaxUnavailable int32 // maxUnavailable in current dp +// } + +// type TargetInfo struct { +// ClusterName string +// Status TargetStatus +// DesiredReplicas int32 +// } + +// func (t *TargetInfo) String() string { +// return fmt.Sprintf("%s:%d->%d,%d/%d,%d/%d,%d/%d,%d,%d,%t", t.ClusterName, t.Status.Replicas, t.DesiredReplicas, +// t.Status.UpdatedAvailableReplicas, t.Status.UpdatedReplicas, +// t.Status.CurrentNewAvailableReplicas, t.Status.CurrentNewReplicas, +// t.Status.AvailableReplicas, t.Status.ActualReplicas, +// t.Status.MaxSurge, t.Status.MaxUnavailable, t.Status.Updated) +// } + +// func (t *TargetInfo) MaxSurge(maxSurge, leastSurge int32) (int32, int32) { +// res := Int32Min(maxSurge+leastSurge, t.ReplicasToUpdate()) +// if res < 0 { +// res = 0 +// } +// more := res - leastSurge +// // impossible in normal cases +// // normalize to zero to get a more strict plan, try the best to correct the unexpected situation +// if more < 0 { +// more = 0 +// } +// if maxSurge < 0 && leastSurge > t.Status.MaxSurge && res > t.Status.MaxSurge { +// res = t.Status.MaxSurge +// } +// return res, more +// } + +// func (t *TargetInfo) MaxUnavailable(maxUnavailable, leastUnavailable int32) (int32, int32) { +// res := Int32Min(maxUnavailable+leastUnavailable, t.ReplicasToUpdatedAvailable()) +// if res < 0 { +// res = 0 +// } +// more := res - leastUnavailable +// // impossible in normal cases +// // normalize to zero to get a more strict plan, try the best to correct the unexpected situation +// if more < 0 { +// more = 0 +// } +// if maxUnavailable < 0 && leastUnavailable > t.Status.MaxUnavailable && res > t.Status.MaxUnavailable { +// res = t.Status.MaxUnavailable +// } +// return res, more +// } + +// func (t *TargetInfo) MaxScaleOut(maxScaleOut, leastSurge int32) (int32, int32) { +// res := Int32Min(maxScaleOut+leastSurge, t.DesiredReplicas-t.Status.Replicas) +// if res < 0 { +// res = 0 +// } +// more := res - leastSurge +// if more < 0 { +// more = 0 +// } +// return res, more +// } + +// func (t *TargetInfo) MaxScaleIn(maxScaleIn, leastUnavailable int32) (int32, int32) { +// res := Int32Min(maxScaleIn+leastUnavailable, t.Status.Replicas-t.DesiredReplicas) +// // impossible +// if res > t.Status.Replicas { +// res = t.Status.Replicas +// } +// if res < 0 { +// res = 0 +// } +// more := res - leastUnavailable +// if more < 0 { +// more = 0 +// } +// return res, more +// } + +// func (t *TargetInfo) LeastSurge() int32 { +// res := t.Status.ActualReplicas - t.Status.Replicas +// if res < 0 { +// res = 0 +// } +// if !t.DuringUpdating() { +// return res +// } +// return Int32Max(res, Int32Min(t.Status.MaxSurge, res+t.ReplicasToUpdateCurrently())) +// } + +// func (t *TargetInfo) LeastUnavailable() int32 { +// res := t.Status.Replicas - t.Status.AvailableReplicas +// if res < 0 { +// res = 0 +// } +// if !t.DuringUpdating() { +// return res +// } +// return Int32Max(res, Int32Min(t.Status.MaxUnavailable, t.ReplicasToUpdatedAvailableCurrently())) +// } + +// func (t *TargetInfo) ReplicasToUpdate() int32 { +// res := t.Status.Replicas - t.Status.UpdatedReplicas +// if res < 0 { +// res = 0 +// } +// return res +// } + +// func (t *TargetInfo) ReplicasToUpdatedAvailable() int32 { +// res := t.Status.Replicas - t.Status.UpdatedAvailableReplicas +// if res < 0 { +// res = 0 +// } +// return res +// } + +// func (t *TargetInfo) ReplicasToUpdateCurrently() int32 { +// res := t.Status.Replicas - t.Status.CurrentNewReplicas +// if res < 0 { +// res = 0 +// } +// return res +// } + +// func (t *TargetInfo) ReplicasToUpdatedAvailableCurrently() int32 { +// res := t.Status.Replicas - t.Status.CurrentNewAvailableReplicas +// if res < 0 { +// res = 0 +// } +// return res +// } + +// func (t *TargetInfo) DuringUpdating() bool { +// // todo: only return t.Status.CurrentNewReplicas < t.Status.Replicas after we get the real currentNewReplicas +// if t.Status.CurrentNewReplicas < t.Status.Replicas { +// return true +// } +// if t.Status.Updated && t.ReplicasToUpdate() > 0 { +// return true +// } +// return false +// } + +// func (t *TargetInfo) UpdateCompleted() bool { +// return t.ReplicasToUpdate() == 0 +// } + +// func (t *TargetInfo) IsSurge() bool { +// return t.Status.MaxSurge != 0 && t.Status.MaxUnavailable == 0 +// } + +// func (t *TargetInfo) Flip(defaultIsSurge bool) bool { +// // a temporary fix to avoid unexpected flipping +// // todo: avoiding this nasty judgment by restricting the replicas changes to be used only for scaling +// return t.IsSurge() && !defaultIsSurge && t.ReplicasToUpdatedAvailable() > 0 +// } + +// func (t *TargetInfo) SkipPlanForUpdate(maxSurge, maxUnavailable int32) bool { +// return maxSurge <= 0 && maxUnavailable <= 0 && !t.Status.Updated && !t.DuringUpdating() && t.LeastSurge() <= 0 && +// t.LeastUnavailable() <= 0 +// } + +// func (t *TargetInfo) SkipPlanForUpdateForThoseToScaleIn(maxSurge, maxUnavailable, leastUnavailable int32) bool { +// if maxSurge <= 0 && maxUnavailable <= 0 && !t.Status.Updated && !t.DuringUpdating() { +// if leastUnavailable > 0 { +// return false +// } +// leastSurge := t.LeastSurge() +// if t.DesiredReplicas < t.Status.Replicas { +// leastSurge = 0 +// } +// if leastSurge > 0 { +// return false +// } +// return true +// } +// return false +// } + +// func (t *TargetInfo) SkipPlanForScaleIn(maxUnavailable int32) bool { +// return maxUnavailable <= 0 && t.LeastUnavailable() <= 0 +// } + +// func (t *TargetInfo) SkipPlanForScaleOut(maxSurge int32) bool { +// return maxSurge <= 0 && t.LeastSurge() <= 0 +// } + +// func unstructuredObjToTargetInfo(clusterName string, unstructuredObj *unstructured.Unstructured, desiredReplicas int32, +// desiredRevision string, typeConfig *fedcorev1a1.FederatedTypeConfig, +// ) (*TargetInfo, error) { +// if unstructuredObj == nil { +// return &TargetInfo{ +// ClusterName: clusterName, +// DesiredReplicas: desiredReplicas, +// }, nil +// } + +// replicas, err := utilunstructured.GetInt64FromPath(unstructuredObj, typeConfig.Spec.PathDefinition.ReplicasSpec, nil) +// if err != nil || replicas == nil { +// return nil, errors.Errorf("failed to retrieve replicas, err: %v", err) +// } +// maxSurge, maxUnavailable, err := RetrieveFencepost( +// unstructuredObj, +// MaxSurgePathSlice, +// MaxUnavailablePathSlice, +// int32(*replicas), +// ) +// if err != nil { +// return nil, errors.Wrap(err, "failed to retrieve fencepost") +// } +// revision, ok := unstructuredObj.GetAnnotations()[common.CurrentRevisionAnnotation] +// if !ok { +// return nil, errors.Errorf("failed to retrieve annotation %s", common.CurrentRevisionAnnotation) +// } +// // consider it has been updated as long as the template is updated. We don't wait for the refresh of +// // latestreplicaset annotations since the latency due to asynchronous updates may bring some problems +// updated := revision == desiredRevision +// currentNewReplicas, currentNewAvailableReplicas, err := retrieveNewReplicaSetInfo(unstructuredObj) +// if err != nil { +// return nil, errors.Wrap(err, "failed to retrieve new replicaSet info") +// } + +// updatedReplicas, updatedAvailableReplicas := currentNewReplicas, currentNewAvailableReplicas +// if !updated { +// updatedReplicas, updatedAvailableReplicas = 0, 0 +// } + +// actualReplicasOption, err := utilunstructured.GetInt64FromPath( +// unstructuredObj, +// typeConfig.Spec.PathDefinition.ReplicasSpec, +// nil, +// ) +// if err != nil { +// return nil, errors.Wrap(err, "failed to retrieve actual replicas") +// } +// var actualReplicas int32 +// if actualReplicasOption != nil { +// actualReplicas = int32(*actualReplicasOption) +// } + +// availableReplicasOption, err := utilunstructured.GetInt64FromPath( +// unstructuredObj, +// typeConfig.Spec.PathDefinition.AvailableReplicasStatus, +// nil, +// ) +// if err != nil { +// return nil, errors.Wrap(err, "failed to retrieve actual available replicas") +// } +// var availableReplicas int32 +// if availableReplicasOption != nil { +// availableReplicas = int32(*availableReplicasOption) +// } + +// t := &TargetInfo{ +// ClusterName: clusterName, +// Status: TargetStatus{ +// Replicas: int32(*replicas), +// ActualReplicas: actualReplicas, +// AvailableReplicas: availableReplicas, +// UpdatedReplicas: updatedReplicas, +// UpdatedAvailableReplicas: updatedAvailableReplicas, +// CurrentNewReplicas: currentNewReplicas, +// CurrentNewAvailableReplicas: currentNewAvailableReplicas, +// Updated: updated, +// MaxSurge: maxSurge, +// MaxUnavailable: maxUnavailable, +// }, +// DesiredReplicas: desiredReplicas, +// } +// return t, nil +// } + +// type RolloutPlanner struct { +// typeConfig *fedcorev1a1.FederatedTypeConfig +// Key string +// Targets Targets +// MaxSurge int32 +// MaxUnavailable int32 +// Replicas int32 +// Revision string +// } + +// func NewRolloutPlanner( +// key string, +// typeConfig *fedcorev1a1.FederatedTypeConfig, +// federatedResource *unstructured.Unstructured, +// replicas int32, +// ) (*RolloutPlanner, error) { +// pathPrefix := []string{common.SpecField, common.TemplateField} +// maxSurgePath := append(pathPrefix, MaxSurgePathSlice...) +// maxUnavailablePath := append(pathPrefix, MaxUnavailablePathSlice...) +// maxSurge, maxUnavailable, err := RetrieveFencepost(federatedResource, maxSurgePath, maxUnavailablePath, replicas) +// if err != nil { +// return nil, errors.Wrap(err, "failed to retrieve maxSurge or maxUnavailable from federated resource") +// } +// desiredRevision, ok := federatedResource.GetAnnotations()[common.CurrentRevisionAnnotation] +// if !ok { +// return nil, errors.Errorf( +// "failed to retrieve annotation %s from federated resource", +// common.CurrentRevisionAnnotation, +// ) +// } +// return &RolloutPlanner{ +// typeConfig: typeConfig, +// Key: key, +// MaxSurge: maxSurge, +// MaxUnavailable: maxUnavailable, +// Replicas: replicas, +// Revision: desiredRevision, +// }, nil +// } + +// func (p *RolloutPlanner) RegisterTarget( +// clusterName string, +// targetObj *unstructured.Unstructured, +// desiredReplicas int32, +// ) error { +// t, err := unstructuredObjToTargetInfo(clusterName, targetObj, desiredReplicas, p.Revision, p.typeConfig) +// if err != nil { +// return err +// } +// p.Targets = append(p.Targets, t) +// return nil +// } + +// func (p *RolloutPlanner) IsScalingEvent() bool { +// _, targetsToScaleOut, targetsToScaleIn := sortTargets(p.Targets) +// // create / scale out / scale in +// if len(targetsToScaleOut) != 0 && len(targetsToScaleIn) != 0 { +// return false +// } +// if len(targetsToScaleOut) == 0 && len(targetsToScaleIn) == 0 { +// return false +// } +// for _, t := range p.Targets { +// if !t.UpdateCompleted() { +// return false +// } +// if t.Flip(p.IsSurge()) { +// return false +// } +// } +// return true +// } + +// func (p *RolloutPlanner) PlanScale() RolloutPlans { +// plans := make(map[string]*RolloutPlan) +// for _, t := range p.Targets { +// plans[t.ClusterName] = &RolloutPlan{} +// } +// return plans +// } + +// func (p *RolloutPlanner) String() string { +// var ts []string +// for _, t := range p.Targets { +// ts = append(ts, fmt.Sprintf("%v", t)) +// } +// return fmt.Sprintf("%s[%d,%d,%d,%s]: %v", +// p.Key, p.Replicas, p.MaxSurge, p.MaxUnavailable, p.Revision, strings.Join(ts, "; ")) +// } + +// func (p *RolloutPlanner) RemainingMaxSurge() int32 { +// // maxSurge := p.Replicas + p.MaxSurge - p.Targets.ActualReplicas() +// // maxSurge := p.MaxSurge - (p.Targets.ActualReplicas() - p.Replicas) +// var replicas, occupied int32 +// for _, t := range p.Targets { +// replicas += t.Status.Replicas +// occupied += t.LeastSurge() +// } +// return p.MaxSurge - (replicas - p.Replicas) - occupied +// } + +// func (p *RolloutPlanner) RemainingMaxUnavailable() int32 { +// // maxUnavailable := p.Targets.AvailableReplicas() - (p.Replicas - p.MaxUnavailable) +// // maxUnavailable := p.MaxUnavailable - (p.Replicas - p.Targets.AvailableReplicas()) +// var replicas, occupied int32 +// for _, t := range p.Targets { +// replicas += t.Status.Replicas +// occupied += t.LeastUnavailable() +// } +// return p.MaxUnavailable - (p.Replicas - replicas) - occupied +// } + +// func (p *RolloutPlanner) IsSurge() bool { +// return p.MaxSurge != 0 && p.MaxUnavailable == 0 +// } + +// // ComputeRolloutPlans compute maxUnavailable, maxSurge, replicas during rollout process. It returns a map that +// // contains all the targets which are planned according to current status. Nil in a plan means the corresponding field +// // won't be overridden by the rollout plan and should be set with the original value. If there's no plan for a target, +// // it means "don't rollout it, it should wait for it's turn". +// func (p *RolloutPlanner) Plan() RolloutPlans { +// targetsToUpdate, targetsToScaleOut, targetsToScaleIn := sortTargets(p.Targets) +// plans := make(map[string]*RolloutPlan) + +// if p.IsScalingEvent() { +// return p.PlanScale() +// } + +// // the remaining maxSurge & maxUnavailable that can be dispatched to deployments. If there are clusters that are +// // not ready, or that we failed to get deployment from, the maxSurge/maxUnavailble will be increased/decreased +// maxSurge, maxUnavailable := p.RemainingMaxSurge(), p.RemainingMaxUnavailable() + +// // execution sequence (try to upgrade before scale out and scale in before upgrade): +// // 1. upgrade targets waiting to be scaled out +// // 2. scale in targets waiting to be scaled in +// // 3. upgrade targets that only need to be upgraded +// // 4. scale out targets waiting to be scaled out +// // 5. upgrade targets waiting to be scaled in +// for _, t := range targetsToScaleOut { +// if t.SkipPlanForUpdate(maxSurge, maxUnavailable) { +// continue +// } +// s, sm := t.MaxSurge(maxSurge, t.LeastSurge()) +// u, um := t.MaxUnavailable(maxUnavailable, t.LeastUnavailable()) +// maxSurge -= sm +// maxUnavailable -= um +// r := t.Status.Replicas +// plan := &RolloutPlan{Replicas: &r} +// plan.MaxSurge = &s +// plan.MaxUnavailable = &u +// plan.correctFencepost(t, p.IsSurge()) +// plans[t.ClusterName] = plan +// } + +// for _, t := range targetsToScaleIn { +// if t.SkipPlanForScaleIn(maxUnavailable) { +// continue +// } +// // we tend to scale in those that are already unavailable +// leastUnavailable := t.LeastUnavailable() +// if t.DuringUpdating() { +// // if it' during updating (for example, the maxUnavailable is enough for scale in and updating coming next, +// // so we set the replica and maxUnavailable; but a fed weight adjusting followed so we have to scale in again +// // even though it's being updated), scaling will be performed proportionally and may not cover the +// // unavailable instances as expected. +// leastUnavailable = 0 +// } +// scale, more := t.MaxScaleIn(maxUnavailable, leastUnavailable) +// maxUnavailable -= more +// plan := &RolloutPlan{OnlyPatchReplicas: true} +// r := t.Status.Replicas - scale +// plan.Replicas = &r +// plans[t.ClusterName] = plan +// } + +// for _, t := range targetsToUpdate { +// if t.SkipPlanForUpdate(maxSurge, maxUnavailable) { +// continue +// } +// s, sm := t.MaxSurge(maxSurge, t.LeastSurge()) +// u, um := t.MaxUnavailable(maxUnavailable, t.LeastUnavailable()) +// maxSurge -= sm +// maxUnavailable -= um +// plan := &RolloutPlan{} +// plan.MaxSurge = &s +// plan.MaxUnavailable = &u +// plan.correctFencepost(t, p.IsSurge()) +// plans[t.ClusterName] = plan +// } + +// for _, t := range targetsToScaleOut { +// if t.SkipPlanForScaleOut(maxSurge) { +// continue +// } +// // make sure new rs exists to avoid too much unnecessary work +// if !t.Status.Updated && t.Status.Replicas != 0 { +// continue +// } +// leastSurge := t.LeastSurge() +// if t.DuringUpdating() { +// leastSurge = 0 +// } +// scale, more := t.MaxScaleOut(maxSurge, leastSurge) +// maxSurge -= more +// plan, ok := plans[t.ClusterName] +// if !ok || plan == nil { +// plan = &RolloutPlan{} +// } +// r := t.Status.Replicas + scale +// plan.Replicas = &r +// plans[t.ClusterName] = plan +// } + +// for _, t := range targetsToScaleIn { +// plan, ok := plans[t.ClusterName] +// if !ok || plan == nil { +// r := t.Status.Replicas +// plan = &RolloutPlan{Replicas: &r} +// } +// // we have already scale in some unavailable instances in the second step, exclude them +// leastUnavailable := t.LeastUnavailable() +// if !t.DuringUpdating() { +// leastUnavailable -= t.Status.Replicas - *plan.Replicas +// if leastUnavailable < 0 { +// leastUnavailable = 0 +// } +// } +// if t.SkipPlanForUpdateForThoseToScaleIn(maxSurge, maxUnavailable, leastUnavailable) { +// continue +// } + +// plan.OnlyPatchReplicas = false +// s, sm := t.MaxSurge(maxSurge, t.LeastSurge()) +// u, um := t.MaxUnavailable(maxUnavailable, leastUnavailable) +// maxSurge -= sm +// maxUnavailable -= um +// plan.MaxSurge = &s +// plan.MaxUnavailable = &u +// plan.correctFencepost(t, p.IsSurge()) +// plans[t.ClusterName] = plan +// } +// if err := validatePlans(p, plans); err != nil { +// klog.Errorf("Failed to generate rollout plan for %s: %v. Current status: %s", p.Key, err, p) +// return RolloutPlans{} +// } +// return plans +// } + +// func sortTargets(targets []*TargetInfo) ([]*TargetInfo, []*TargetInfo, []*TargetInfo) { +// // sort the list to first update the targets that are already in update process +// sort.Slice(targets, func(i, j int) bool { +// return targets[i].ClusterName < targets[j].ClusterName +// }) +// var toUpdate, toScaleOut, toScaleIn []*TargetInfo +// for _, t := range targets { +// change := t.DesiredReplicas - t.Status.Replicas +// switch { +// case change < 0: +// toScaleIn = append(toScaleIn, t) +// case change > 0: +// toScaleOut = append(toScaleOut, t) +// default: +// toUpdate = append(toUpdate, t) +// } +// } +// return toUpdate, toScaleOut, toScaleIn +// } + +// func Int32Min(a, b int32) int32 { +// if b < a { +// return b +// } +// return a +// } + +// func Int32Max(a, b int32) int32 { +// if b > a { +// return b +// } +// return a +// } + +// func resolveFenceposts(maxSurge, maxUnavailable *intstrutil.IntOrString, desired int32) (int32, int32, error) { +// surge, err := intstrutil.GetValueFromIntOrPercent( +// intstrutil.ValueOrDefault(maxSurge, intstrutil.FromInt(0)), +// int(desired), +// true, +// ) +// if err != nil { +// return 0, 0, err +// } +// unavailable, err := intstrutil.GetValueFromIntOrPercent( +// intstrutil.ValueOrDefault(maxUnavailable, intstrutil.FromInt(0)), +// int(desired), +// false, +// ) +// if err != nil { +// return 0, 0, err +// } + +// if surge == 0 && unavailable == 0 { +// // Validation should never allow the user to explicitly use zero values for both maxSurge +// // maxUnavailable. Due to rounding down maxUnavailable though, it may resolve to zero. +// // If both fenceposts resolve to zero, then we should set maxUnavailable to 1 on the +// // theory that surge might not work due to quota. +// unavailable = 1 +// } + +// return int32(surge), int32(unavailable), nil +// } + +// func RetrieveFencepost(unstructuredObj *unstructured.Unstructured, maxSurgePath []string, maxUnavailablePath []string, +// replicas int32, +// ) (int32, int32, error) { +// var maxSurge, maxUnavailable *intstrutil.IntOrString +// if ms, ok, err := unstructured.NestedString(unstructuredObj.Object, maxSurgePath...); ok && err == nil { +// maxSurge = &intstrutil.IntOrString{Type: intstrutil.String, StrVal: ms} +// } else { +// if ms, ok, err2 := unstructured.NestedInt64(unstructuredObj.Object, maxSurgePath...); ok && err2 == nil { +// maxSurge = &intstrutil.IntOrString{Type: intstrutil.Int, IntVal: int32(ms)} +// } else { +// klog.V(4).Infof("Failed to retrieve maxSurge from %s/%s: %v, %v", +// unstructuredObj.GetNamespace(), unstructuredObj.GetName(), err, err2) +// } +// } +// if mu, ok, err := unstructured.NestedString(unstructuredObj.Object, maxUnavailablePath...); ok && err == nil { +// maxUnavailable = &intstrutil.IntOrString{Type: intstrutil.String, StrVal: mu} +// } else { +// if mu, ok, err2 := unstructured.NestedInt64(unstructuredObj.Object, maxUnavailablePath...); ok && err2 == nil { +// maxUnavailable = &intstrutil.IntOrString{Type: intstrutil.Int, IntVal: int32(mu)} +// } else { +// klog.V(4).Infof("Failed to retrieve maxUnavailable from %s/%s: %v, %v", +// unstructuredObj.GetNamespace(), unstructuredObj.GetName(), err, err2) +// } +// } + +// ms, mu, err := resolveFenceposts(maxSurge, maxUnavailable, replicas) +// if err != nil { +// return 0, 0, err +// } +// if ms < 0 { +// ms = 0 +// } +// if mu < 0 { +// mu = 0 +// } +// return ms, mu, nil +// } + +// func retrieveNewReplicaSetInfo(unstructuredObj *unstructured.Unstructured) (int32, int32, error) { +// ann, ok := unstructuredObj.GetAnnotations()[LatestReplicasetReplicasAnnotation] +// if !ok || ann == "" { +// return 0, 0, errors.Errorf("missing annotation %s", LatestReplicasetReplicasAnnotation) +// } +// replicas, err := strconv.ParseInt(ann, 10, 32) +// if err != nil { +// return 0, 0, err +// } +// ann, ok = unstructuredObj.GetAnnotations()[LatestReplicasetAvailableReplicasAnnotation] +// if !ok || ann == "" { +// return 0, 0, errors.Errorf( +// "missing annotation %s", LatestReplicasetAvailableReplicasAnnotation) +// } +// availableReplicas, err := strconv.ParseInt(ann, 10, 32) +// if err != nil { +// return 0, 0, err +// } +// // todo: make sure the latestreplicaset annotations describe the current pod template of deployment +// // a simple way to tell if the latestreplicaset annotations is up to date with current deployment. +// lastRsName, lastRsNameExists := unstructuredObj.GetAnnotations()[common.LastReplicasetName] +// rsName, rsNameExists := unstructuredObj.GetAnnotations()[LatestReplicasetNameAnnotation] +// if !rsNameExists { +// return 0, 0, errors.Errorf("missing annotation %s", LatestReplicasetNameAnnotation) +// } +// rsNameOutdated := lastRsNameExists && rsNameExists && lastRsName == rsName +// if rsNameOutdated { +// // paused=true may also result in this situation +// replicas, availableReplicas = 0, 0 +// } +// return int32(replicas), int32(availableReplicas), nil +// } + +// func validatePlans(p *RolloutPlanner, plans RolloutPlans) error { +// var planned, desired, current, maxUnavailable int32 +// for _, t := range p.Targets { +// desired += t.DesiredReplicas +// cluster := t.ClusterName +// r := t.Status.Replicas +// current += r +// if p, ok := plans[cluster]; ok { +// if p == nil { +// return errors.Errorf("invalid plan for %s: %v", cluster, p) +// } +// if p.Replicas != nil { +// r = *p.Replicas +// } else { +// r = t.DesiredReplicas +// } +// if p.MaxUnavailable != nil { +// if p.MaxSurge == nil || *p.MaxSurge != 0 || *p.MaxUnavailable != 1 { +// maxUnavailable += *p.MaxUnavailable +// } +// } +// } +// planned += r +// } +// if p.Replicas-desired > p.MaxUnavailable { +// return errors.Errorf("desired replicas deviates too much from the initial replicas, maybe some " + +// "clusters are not ready") +// } +// l, h := desired, current +// if desired > current { +// l, h = current, desired +// } +// if l-planned > p.MaxUnavailable || planned-h > p.MaxSurge { +// return errors.Errorf("invalid plan: %v", plans) +// } +// return nil +// } diff --git a/pkg/util/informermanager/federatedinformermanager.go b/pkg/util/informermanager/federatedinformermanager.go index aefe7e00..6801d487 100644 --- a/pkg/util/informermanager/federatedinformermanager.go +++ b/pkg/util/informermanager/federatedinformermanager.go @@ -121,14 +121,49 @@ func (m *federatedInformerManager) processCluster(cluster *fedcorev1a1.Federated m.lock.Lock() defer m.lock.Unlock() - panic("unimplemented") + clusterName := cluster.Name + + clusterClient, ok := m.clients[clusterName] + if !ok { + var err error + if clusterClient, err = m.clientGetter(cluster); err != nil { + return fmt.Errorf("failed to get client for cluster %s: %s", clusterName, err) + } + m.clients[clusterName] = clusterClient + } + + manager, ok := m.informerManagers[clusterName] + if !ok { + manager = NewInformerManager(clusterClient, m.ftcInformer) + ctx, cancel := context.WithCancel(context.Background()) + + for _, generator := range m.eventHandlerGenerators { + if err := manager.AddEventHandlerGenerator(generator); err != nil { + return fmt.Errorf("failed to initialized InformerManager for cluster %s: %w", clusterName, err) + } + } + + manager.Start(ctx) + m.informerManagers[clusterName] = manager + m.informerManagersCancelFuncs[clusterName] = cancel + } + + return nil } func (m *federatedInformerManager) processClusterUnjoin(clusterName string) error { m.lock.Lock() defer m.lock.Unlock() - panic("unimplemented") + delete(m.clients, clusterName) + + if cancel, ok := m.informerManagersCancelFuncs[clusterName]; ok { + cancel() + delete(m.informerManagers, clusterName) + delete(m.informerManagersCancelFuncs, clusterName) + } + + return nil } func (m *federatedInformerManager) AddClusterEventHandler(handler *ClusterEventHandler) error { diff --git a/pkg/util/informermanager/federatedinformermanager_test.go b/pkg/util/informermanager/federatedinformermanager_test.go index e69de29b..c54c2c6e 100644 --- a/pkg/util/informermanager/federatedinformermanager_test.go +++ b/pkg/util/informermanager/federatedinformermanager_test.go @@ -0,0 +1,19 @@ +package informermanager + +import "testing" + +func TestClientAvailableForExistingClusters(t *testing.T) { + +} + +func TestClientAvailableForNewCluster(t *testing.T) { + +} + +func TestListerAvailableForExistingFTCsAndClusters(t *testing.T) { + +} + +func TestListerAvailableForNewFTCAndCluster(t *testing.T) { + +} diff --git a/pkg/util/informermanager/informermanager.go b/pkg/util/informermanager/informermanager.go index e07d9e36..889a491b 100644 --- a/pkg/util/informermanager/informermanager.go +++ b/pkg/util/informermanager/informermanager.go @@ -20,6 +20,7 @@ import ( fedcorev1a1informers "github.com/kubewharf/kubeadmiral/pkg/client/informers/externalversions/core/v1alpha1" "github.com/kubewharf/kubeadmiral/pkg/client/listers/core/v1alpha1" schemautil "github.com/kubewharf/kubeadmiral/pkg/controllers/util/schema" + "github.com/kubewharf/kubeadmiral/pkg/util/tools" ) type informerManager struct { @@ -32,7 +33,7 @@ type informerManager struct { eventHandlerGenerators []*EventHandlerGenerator - gvrMapping map[schema.GroupVersionResource]string + gvrMapping *tools.BijectionMap[string, schema.GroupVersionResource] informers map[string]informers.GenericInformer informerStopChs map[string]chan struct{} @@ -49,7 +50,7 @@ func NewInformerManager(client dynamic.Interface, ftcInformer fedcorev1a1informe client: client, ftcInformer: ftcInformer, eventHandlerGenerators: []*EventHandlerGenerator{}, - gvrMapping: map[schema.GroupVersionResource]string{}, + gvrMapping: tools.NewBijectionMap[string, schema.GroupVersionResource](), informers: map[string]informers.GenericInformer{}, informerStopChs: map[string]chan struct{}{}, eventHandlerRegistrations: map[string]map[*EventHandlerGenerator]cache.ResourceEventHandlerRegistration{}, @@ -103,13 +104,20 @@ func (m *informerManager) worker() { return } - if err := m.processFTC(ftc); err != nil { - m.logger.Error(err, "Failed to process FederatedTypeConfig, will retry") + err, needReenqueue := m.processFTC(ftc) + if err != nil { + if needReenqueue { + m.logger.Error(err, "Failed to process FederatedTypeConfig, will retry") + } else { + m.logger.Error(err, "Failed to process FederatedTypeConfig") + } + } + if needReenqueue { m.queue.Add(key) } } -func (m *informerManager) processFTC(ftc *fedcorev1a1.FederatedTypeConfig) error { +func (m *informerManager) processFTC(ftc *fedcorev1a1.FederatedTypeConfig) (err error, needReenqueue bool) { m.lock.Lock() defer m.lock.Unlock() @@ -117,10 +125,23 @@ func (m *informerManager) processFTC(ftc *fedcorev1a1.FederatedTypeConfig) error apiResource := ftc.GetSourceType() gvr := schemautil.APIResourceToGVR(&apiResource) - m.gvrMapping[gvr] = ftcName + var informer informers.GenericInformer + + if oldGVR, exists := m.gvrMapping.Lookup(ftcName); exists { + if oldGVR != gvr { + // this might occur if a ftc was deleted and recreated with a different source type within a short period of + // time and we skipped processing the deletion. we simply process the ftc deletion and reenqueue. + err := m.processFTCDeletionUnlocked(ftcName) + return err, true + } + + informer = m.informers[ftcName] + } else { + if err := m.gvrMapping.Add(ftcName, gvr); err != nil { + // there must be another ftc with the same source type GVR. + return fmt.Errorf("source type is already referenced by another FederatedTypeConfig: %w", err), false + } - informer, ok := m.informers[ftcName] - if !ok { informer = dynamicinformer.NewFilteredDynamicInformer( m.client, gvr, @@ -146,7 +167,7 @@ func (m *informerManager) processFTC(ftc *fedcorev1a1.FederatedTypeConfig) error switch { case !shouldRegister && oldRegistrationExists: if err := informer.Informer().RemoveEventHandler(oldRegistration); err != nil { - return fmt.Errorf("failed to unregister event handler: %w", err) + return fmt.Errorf("failed to unregister event handler: %w", err), true } delete(registrations, generator) @@ -154,35 +175,33 @@ func (m *informerManager) processFTC(ftc *fedcorev1a1.FederatedTypeConfig) error handler := generator.Generator(ftc) newRegistration, err := informer.Informer().AddEventHandler(handler) if err != nil { - return fmt.Errorf("failed to register event handler: %w", err) + return fmt.Errorf("failed to register event handler: %w", err), true } registrations[generator] = newRegistration } } - return nil + return nil, false } func (m *informerManager) processFTCDeletion(ftcName string) error { m.lock.Lock() defer m.lock.Unlock() - stopCh, ok := m.informerStopChs[ftcName] - if !ok { - return nil + return m.processFTCDeletionUnlocked(ftcName) +} + +func (m *informerManager) processFTCDeletionUnlocked(ftcName string) error { + if stopCh, ok := m.informerStopChs[ftcName]; ok { + close(stopCh) } - close(stopCh) + m.gvrMapping.Delete(ftcName) + delete(m.informers, ftcName) delete(m.informerStopChs, ftcName) delete(m.eventHandlerRegistrations, ftcName) - for gvr, ftc := range m.gvrMapping { - if ftc == ftcName { - delete(m.gvrMapping, gvr) - } - } - return nil } @@ -208,7 +227,7 @@ func (m *informerManager) GetResourceLister( m.lock.RLock() defer m.lock.RUnlock() - ftc, ok := m.gvrMapping[gvr] + ftc, ok := m.gvrMapping.ReverseLookup(gvr) if !ok { return nil, nil, false } diff --git a/pkg/util/tools/bijection.go b/pkg/util/tools/bijection.go new file mode 100644 index 00000000..f4ba099e --- /dev/null +++ b/pkg/util/tools/bijection.go @@ -0,0 +1,93 @@ +package tools + +import ( + "fmt" + "sync" +) + +func NewBijectionMap[T1, T2 comparable]() *BijectionMap[T1, T2] { + return &BijectionMap[T1, T2]{ + lock: sync.RWMutex{}, + forwardMap: map[T1]T2{}, + reverseMap: map[T2]T1{}, + } +} + +type BijectionMap[T1, T2 comparable] struct { + lock sync.RWMutex + + forwardMap map[T1]T2 + reverseMap map[T2]T1 +} + +func (m *BijectionMap[T1, T2]) Lookup(key T1) (value T2, exists bool) { + m.lock.RLock() + defer m.lock.RUnlock() + + val, exists := m.forwardMap[key] + if !exists { + return *new(T2), false + } + + return val, true +} + +func (m *BijectionMap[T1, T2]) ReverseLookup(key T2) (value T1, exists bool) { + m.lock.RLock() + defer m.lock.RUnlock() + + val, exists := m.reverseMap[key] + if !exists { + return *new(T1), false + } + + return val, true +} + +func (m *BijectionMap[T1, T2]) Add(key1 T1, key2 T2) error { + m.lock.Lock() + defer m.lock.Unlock() + + if val, ok := m.forwardMap[key1]; ok { + return fmt.Errorf("%v is already mapped to %v", key1, val) + } + + if val, ok := m.reverseMap[key2]; ok { + return fmt.Errorf("%v is already mapped to %v", key2, val) + } + + m.forwardMap[key1] = key2 + m.reverseMap[key2] = key1 + + return nil +} + +func (m *BijectionMap[T1, T2]) Delete(key T1) bool { + m.lock.Lock() + defer m.lock.Unlock() + + val, ok := m.forwardMap[key] + if !ok { + return false + } + + delete(m.forwardMap, key) + delete(m.reverseMap, val) + + return true +} + +func (m *BijectionMap[T1, T2]) ReverseDelete(key T2) bool { + m.lock.Lock() + defer m.lock.Unlock() + + val, ok := m.reverseMap[key] + if !ok { + return false + } + + delete(m.reverseMap, key) + delete(m.forwardMap, val) + + return true +} From fa0020db6c2e8e7bd2756e84704f2f82ad8b70af Mon Sep 17 00:00:00 2001 From: "hawjia.lim" Date: Fri, 7 Jul 2023 15:36:53 +0800 Subject: [PATCH 012/173] finish implementation of federatedinformermanager --- .../federatedinformermanager.go | 66 +- .../federatedinformermanager_test.go | 52 +- pkg/util/informermanager/informermanager.go | 7 +- .../informermanager/informermanager_test.go | 597 +++++++++--------- pkg/util/informermanager/interface.go | 21 +- pkg/util/informermanager/testutils.go | 216 +++++++ 6 files changed, 612 insertions(+), 347 deletions(-) create mode 100644 pkg/util/informermanager/testutils.go diff --git a/pkg/util/informermanager/federatedinformermanager.go b/pkg/util/informermanager/federatedinformermanager.go index 6801d487..60d7c66b 100644 --- a/pkg/util/informermanager/federatedinformermanager.go +++ b/pkg/util/informermanager/federatedinformermanager.go @@ -32,6 +32,7 @@ type federatedInformerManager struct { clusterEventHandler []*ClusterEventHandler clients map[string]dynamic.Interface + connectionMap map[string]string informerManagers map[string]InformerManager informerManagersCancelFuncs map[string]context.CancelFunc @@ -53,6 +54,7 @@ func NewFederatedInformerManager( eventHandlerGenerators: []*EventHandlerGenerator{}, clusterEventHandler: []*ClusterEventHandler{}, clients: map[string]dynamic.Interface{}, + connectionMap: map[string]string{}, informerManagers: map[string]InformerManager{}, informerManagersCancelFuncs: map[string]context.CancelFunc{}, queue: workqueue.NewRateLimitingQueue(workqueue.DefaultItemBasedRateLimiter()), @@ -103,7 +105,7 @@ func (m *federatedInformerManager) worker() { return } if apierrors.IsNotFound(err) || !util.IsClusterJoined(&cluster.Status) { - if err := m.processClusterUnjoin(name); err != nil { + if err := m.processClusterDeletion(name); err != nil { m.logger.Error(err, "Failed to process FederatedCluster, will retry") m.queue.Add(key) return @@ -111,57 +113,77 @@ func (m *federatedInformerManager) worker() { return } - if err := m.processCluster(cluster); err != nil { - m.logger.Error(err, "Failed to process FederatedCluster, will retry") + err, needReenqueue := m.processCluster(cluster) + if err != nil { + if needReenqueue { + m.logger.Error(err, "Failed to process FederatedCluster, will retry") + } else { + m.logger.Error(err, "Failed to process FederatedCluster") + } + } + if needReenqueue { m.queue.Add(key) } } -func (m *federatedInformerManager) processCluster(cluster *fedcorev1a1.FederatedCluster) error { +func (m *federatedInformerManager) processCluster(cluster *fedcorev1a1.FederatedCluster) (err error, needReenqueue bool) { m.lock.Lock() defer m.lock.Unlock() clusterName := cluster.Name - clusterClient, ok := m.clients[clusterName] - if !ok { - var err error - if clusterClient, err = m.clientGetter(cluster); err != nil { - return fmt.Errorf("failed to get client for cluster %s: %s", clusterName, err) + connectionHash := m.clientGetter.ConnectionHash(cluster) + if oldConnectionHash, exists := m.connectionMap[clusterName]; exists { + if oldConnectionHash != connectionHash { + // This might occur if a cluster was deleted and recreated with different connection details within a short + // period of time and we missed processing the deletion. We simply process the cluster deletion and + // reenqueue. + // Note: updating of cluster connetion details, however, is still not a supported use case. + err := m.processClusterDeletionUnlocked(clusterName) + return err, true + } + } else { + clusterClient, err := m.clientGetter.ClientGetter(cluster) + if err != nil { + return fmt.Errorf("failed to get client for cluster %s: %s", clusterName, err), true } - m.clients[clusterName] = clusterClient - } - manager, ok := m.informerManagers[clusterName] - if !ok { - manager = NewInformerManager(clusterClient, m.ftcInformer) + manager := NewInformerManager(clusterClient, m.ftcInformer) ctx, cancel := context.WithCancel(context.Background()) for _, generator := range m.eventHandlerGenerators { if err := manager.AddEventHandlerGenerator(generator); err != nil { - return fmt.Errorf("failed to initialized InformerManager for cluster %s: %w", clusterName, err) + cancel() + return fmt.Errorf("failed to initialized InformerManager for cluster %s: %w", clusterName, err), true } } manager.Start(ctx) + + m.connectionMap[clusterName] = connectionHash + m.clients[clusterName] = clusterClient m.informerManagers[clusterName] = manager m.informerManagersCancelFuncs[clusterName] = cancel } - return nil + return nil, false } -func (m *federatedInformerManager) processClusterUnjoin(clusterName string) error { +func (m *federatedInformerManager) processClusterDeletion(clusterName string) error { m.lock.Lock() - defer m.lock.Unlock() + m.lock.Unlock() + return m.processClusterDeletionUnlocked(clusterName) +} +func (m *federatedInformerManager) processClusterDeletionUnlocked(clusterName string) error { + delete(m.connectionMap, clusterName) delete(m.clients, clusterName) if cancel, ok := m.informerManagersCancelFuncs[clusterName]; ok { cancel() - delete(m.informerManagers, clusterName) - delete(m.informerManagersCancelFuncs, clusterName) } + delete(m.informerManagers, clusterName) + delete(m.informerManagersCancelFuncs, clusterName) return nil } @@ -281,3 +303,7 @@ func (m *federatedInformerManager) Start(ctx context.Context) { } var _ FederatedInformerManager = &federatedInformerManager{} + +func getConnectionHash(cluster *fedcorev1a1.FederatedCluster) string { + panic("unimplemented") +} diff --git a/pkg/util/informermanager/federatedinformermanager_test.go b/pkg/util/informermanager/federatedinformermanager_test.go index c54c2c6e..8633bcd9 100644 --- a/pkg/util/informermanager/federatedinformermanager_test.go +++ b/pkg/util/informermanager/federatedinformermanager_test.go @@ -2,18 +2,62 @@ package informermanager import "testing" -func TestClientAvailableForExistingClusters(t *testing.T) { +func TestFederatedInformerManagerClientAvailableForExistingClusters(t *testing.T) { } -func TestClientAvailableForNewCluster(t *testing.T) { +func TestFederatedInformerManagerClientAvailableForNewCluster(t *testing.T) { } -func TestListerAvailableForExistingFTCsAndClusters(t *testing.T) { +func TestFederatedInformerManagerListerAvailableForExistingFTCsAndClusters(t *testing.T) { } -func TestListerAvailableForNewFTCAndCluster(t *testing.T) { +func TestFederatedInformerManagerListerAvailableForNewFTC(t *testing.T) { + +} + +func TestFederatedInformerManagerListerAvailableForNewCluster(t *testing.T) { + +} + +func TestFederatedInformerManagerEventHandlerRegistrationForExistingFTCsAndClusters(t *testing.T) { + +} + +func TestFederatedInformerManagerEventHandlerRegistrationForNewFTC(t *testing.T) { + +} + +func TestFederatedInformerManagerEventHandlerRegistrationOnFTCUpdate(t *testing.T) { + +} + +func TestFederatedInformerManagerEventHandlerRegistrationOnFTCDelete(t *testing.T) { + +} + +func TestFederatedInformerManagerEventHandlerRegistrationForNewCluster(t *testing.T) { + +} + +func TestFederatedInformerManagerEventHandlerRegistrationOnClusterDelete(t *testing.T) { + +} + +func TestFederatedInformerManagerClusterEventHandlerForExistingClusters(t *testing.T) { + +} + +func TestFederatedInformerManagerClusterEventHandlerForNewCluster(t *testing.T) { + +} + +func TestFederatedInformerManagerClusterEventHandlerOnClusterUpdate(t *testing.T) { + +} + +func TestFederatedInformerManagerClusterEventHandlerOnClusterDelete(t *testing.T) { } diff --git a/pkg/util/informermanager/informermanager.go b/pkg/util/informermanager/informermanager.go index 889a491b..55985e60 100644 --- a/pkg/util/informermanager/informermanager.go +++ b/pkg/util/informermanager/informermanager.go @@ -129,8 +129,9 @@ func (m *informerManager) processFTC(ftc *fedcorev1a1.FederatedTypeConfig) (err if oldGVR, exists := m.gvrMapping.Lookup(ftcName); exists { if oldGVR != gvr { - // this might occur if a ftc was deleted and recreated with a different source type within a short period of - // time and we skipped processing the deletion. we simply process the ftc deletion and reenqueue. + // This might occur if a ftc was deleted and recreated with a different source type within a short period of + // time and we missed processing the deletion. We simply process the ftc deletion and reenqueue. Note: + // updating of ftc source types, however, is still not a supported use case. err := m.processFTCDeletionUnlocked(ftcName) return err, true } @@ -138,7 +139,7 @@ func (m *informerManager) processFTC(ftc *fedcorev1a1.FederatedTypeConfig) (err informer = m.informers[ftcName] } else { if err := m.gvrMapping.Add(ftcName, gvr); err != nil { - // there must be another ftc with the same source type GVR. + // There must be another ftc with the same source type GVR. return fmt.Errorf("source type is already referenced by another FederatedTypeConfig: %w", err), false } diff --git a/pkg/util/informermanager/informermanager_test.go b/pkg/util/informermanager/informermanager_test.go index 7e0e34f5..46ee615b 100644 --- a/pkg/util/informermanager/informermanager_test.go +++ b/pkg/util/informermanager/informermanager_test.go @@ -2,14 +2,12 @@ package informermanager import ( "context" - "sync" "testing" "time" "github.com/onsi/gomega" appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" - "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime" @@ -27,12 +25,18 @@ import ( schemautil "github.com/kubewharf/kubeadmiral/pkg/controllers/util/schema" ) -func TestListerAvailableForExistingFTCs(t *testing.T) { +// Verifies that the listers for the SourceType GVR of existing FTCs in the cluster are eventually available after the +// InformerManager is started. +func TestInformerManagerListerAvailableForExistingFTCs(t *testing.T) { g := gomega.NewGomegaWithT(t) + // 1. Bootstrap an environment with FTCs for deployments, configmaps and secrets. + defaultFTCs := []*fedcorev1a1.FederatedTypeConfig{deploymentFTC, configmapFTC, secretFTC} manager, _, _ := boostrapInformerManagerWithFakeClients(defaultFTCs, []*unstructured.Unstructured{}) + // 2. Start the manager + ctx := context.Background() manager.Start(ctx) @@ -42,6 +46,8 @@ func TestListerAvailableForExistingFTCs(t *testing.T) { g.Fail("Timed out waiting for InformerManager cache sync") } + // 3. Verify that the listers for each FTC's SourceType GVR is eventually available. + for _, ftc := range defaultFTCs { apiresource := ftc.GetSourceType() gvr := schemautil.APIResourceToGVR(&apiresource) @@ -54,7 +60,8 @@ func TestListerAvailableForExistingFTCs(t *testing.T) { }).WithTimeout(time.Second * 2).Should(gomega.Succeed()) } - // sanity check: gvr without corresponding ftc should not exist + // 4. Sanity check: the lister for a GVR without a corresponding FTC should not exist + gvr := schema.GroupVersionResource{ Group: "apps", Version: "v1", @@ -66,11 +73,17 @@ func TestListerAvailableForExistingFTCs(t *testing.T) { g.Expect(informerSynced).To(gomega.BeNil()) } -func TestListerAvailableForNewFTC(t *testing.T) { +// Verifies that the listers for the SourceType of FTCs created after the InformerManager is started eventually becomes +// avialable. +func TestInformerManagerListerAvailableForNewFTC(t *testing.T) { g := gomega.NewGomegaWithT(t) + // 1. Bootstrap an environment with no FTCs to begin with. + manager, _, fedClient := boostrapInformerManagerWithFakeClients([]*fedcorev1a1.FederatedTypeConfig{}, []*unstructured.Unstructured{}) + // 2. Start the InformerManager. + ctx := context.Background() manager.Start(ctx) @@ -80,12 +93,26 @@ func TestListerAvailableForNewFTC(t *testing.T) { g.Fail("Timed out waiting for InformerManager cache sync") } + // 3. Initialize daemonset FTC that will be created later. + ftc := daemonsetFTC + apiresource := ftc.GetSourceType() + gvr := schemautil.APIResourceToGVR(&apiresource) + + + // 4. Santiy check: verify that the lister for daemonsets is initially not available + + lister, informerSynced, exists := manager.GetResourceLister(gvr) + g.Expect(exists).To(gomega.BeFalse()) + g.Expect(lister).To(gomega.BeNil()) + g.Expect(informerSynced).To(gomega.BeNil()) + + // 5. Create the daemonset FTC. + _, err := fedClient.CoreV1alpha1().FederatedTypeConfigs().Create(ctx, ftc, metav1.CreateOptions{}) g.Expect(err).ToNot(gomega.HaveOccurred()) - apiresource := ftc.GetSourceType() - gvr := schemautil.APIResourceToGVR(&apiresource) + // 6. Verify the the lister for the SourceType of the newly created daemonset FTC is eventually available. g.Eventually(func(g gomega.Gomega) { lister, informerSynced, exists := manager.GetResourceLister(gvr) @@ -94,21 +121,22 @@ func TestListerAvailableForNewFTC(t *testing.T) { g.Expect(informerSynced()).To(gomega.BeTrue()) }).WithTimeout(time.Second * 2).Should(gomega.Succeed()) - // sanity check: gvr without corresponding ftc should not exist - gvr = schema.GroupVersionResource{ - Group: "apps", - Version: "v1", - Resource: "statefulsets", - } - lister, informerSynced, exists := manager.GetResourceLister(gvr) + // 7. Sanity check: the lister for a GVR without a corresponding FTC should not exist + + lister, informerSynced, exists = manager.GetResourceLister(common.DeploymentGVR) g.Expect(exists).To(gomega.BeFalse()) g.Expect(lister).To(gomega.BeNil()) g.Expect(informerSynced).To(gomega.BeNil()) } -func TestEventHandlerRegistrationForExistingFTCs(t *testing.T) { +// Verifies that event handlers from EventHandlerGenerators are properly registered for existing FTCs after the +// InformerManager is started. +func TestInformerManagerEventHandlerRegistrationForExistingFTCs(t *testing.T) { g := gomega.NewGomegaWithT(t) + // 1. Bootstrap an environment with FTCs for deplyoments, configmaps and secrets. Also create an existing + // deployment, configmap and secret. + dp1 := getDeployment("dp-1", "default") cm1 := getConfigMap("cm-1", "default") sc1 := getSecret("sc-1", "default") @@ -117,6 +145,10 @@ func TestEventHandlerRegistrationForExistingFTCs(t *testing.T) { defaultObjects := []*unstructured.Unstructured{dp1, cm1, sc1} manager, dynamicClient, _ := boostrapInformerManagerWithFakeClients(defaultFTCs, defaultObjects) + // 2. Add EventHandlerGenerators to the InformerManager. registeredResourceEventHandler SHOULD be registered to ALL + // FTCs (based on its Predicate), unregisteredResourceEventHandler SHOULD NOT be registered for ANY FTCs (based on + // its Predicate). + registeredResourceEventHandler := &countingResourceEventHandler{} unregisteredResourceEventHandler := &countingResourceEventHandler{} @@ -129,6 +161,8 @@ func TestEventHandlerRegistrationForExistingFTCs(t *testing.T) { Generator: unregisteredResourceEventHandler.generateEventHandler, }) + // 3. Start the InformerManager. + ctx := context.Background() manager.Start(ctx) @@ -138,34 +172,39 @@ func TestEventHandlerRegistrationForExistingFTCs(t *testing.T) { g.Fail("Timed out waiting for InformerManager cache sync") } - // 1. If Predicate returns true, EventHandler should be generated and registered. - - // check event handlers generated and initial events are received + // 4. Verify that the registeredResourceEventHandler is eventually registered for ALL FTCs and that the add events + // for the existing objects are ALL RECEIVED. g.Eventually(func(g gomega.Gomega) { + // The generate function should be called once for each FTC. g.Expect(registeredResourceEventHandler.getGenerateCount()).To(gomega.BeNumerically("==", len(defaultFTCs))) + // The number of add events should be equal to the number of current existing objects. g.Expect(registeredResourceEventHandler.getAddEventCount()).To(gomega.BeNumerically("==", len(defaultObjects))) g.Expect(registeredResourceEventHandler.getUpdateEventCount()).To(gomega.BeZero()) g.Expect(registeredResourceEventHandler.getDeleteEventCount()).To(gomega.BeZero()) }).WithTimeout(time.Second * 2).Should(gomega.Succeed()) - // check if additional events can be received + // 5. Verify that additional events continue to be received by registeredResourceEventHandler. + + // 5a. Generate +1 add event for secrets. - // +1 add sc2 := getSecret("sc-2", "default") sc2, err := dynamicClient.Resource(common.SecretGVR).Namespace("default").Create(ctx, sc2, metav1.CreateOptions{}) g.Expect(err).ToNot(gomega.HaveOccurred()) - // +1 update + // 5b. Generate +1 update event for deployments. + dp1.SetAnnotations(map[string]string{"test-annotation": "test-value"}) dp1, err = dynamicClient.Resource(common.DeploymentGVR).Namespace("default").Update(ctx, dp1, metav1.UpdateOptions{}) g.Expect(err).ToNot(gomega.HaveOccurred()) - // +1 delete + // 5c. Generate +1 delete event for configmaps. + err = dynamicClient.Resource(common.ConfigMapGVR).Namespace("default").Delete(ctx, cm1.GetName(), metav1.DeleteOptions{}) g.Expect(err).ToNot(gomega.HaveOccurred()) - // santiy check: events for gvr without corresponding FTC should not be received + // 5d. Santiy check: events for GVR without a corresponding FTC should not be received. + dm1 := getDaemonSet("dm-1", "default") _, err = dynamicClient.Resource(common.DaemonSetGVR).Namespace("default").Create(ctx, dm1, metav1.CreateOptions{}) g.Expect(err).ToNot(gomega.HaveOccurred()) @@ -176,7 +215,7 @@ func TestEventHandlerRegistrationForExistingFTCs(t *testing.T) { g.Expect(registeredResourceEventHandler.getDeleteEventCount()).To(gomega.BeNumerically("==", 1)) }) - // 2. If Predicate returns false, EventHandler should not be generated and registered. + // 6. Verify that unregisteredResourceEventHandler is not generated and receives 0 events. g.Consistently(func(g gomega.Gomega) { g.Expect(unregisteredResourceEventHandler.getGenerateCount()).To(gomega.BeZero()) @@ -186,9 +225,13 @@ func TestEventHandlerRegistrationForExistingFTCs(t *testing.T) { }).WithTimeout(time.Second * 2).Should(gomega.Succeed()) } -func TestEventHandlerRegistrationForNewFTC(t *testing.T) { +// Verifies that event handlers from EventHandlerGenerators are properly registered for new FTCs created after the +// InformerManager is started. +func TestInformerManagerEventHandlerRegistrationForNewFTC(t *testing.T) { g := gomega.NewGomegaWithT(t) + // 1. Bootstrap an environment with no FTCs to begin with, but with 4 existing daemonsets. + dm1 := getDaemonSet("dm-1", "default") dm2 := getDaemonSet("dm-2", "default") dm3 := getDaemonSet("dm-3", "default") @@ -197,6 +240,10 @@ func TestEventHandlerRegistrationForNewFTC(t *testing.T) { defaultObjects := []*unstructured.Unstructured{dm1, dm2, dm3, dm4} manager, dynamicClient, fedClient := boostrapInformerManagerWithFakeClients([]*fedcorev1a1.FederatedTypeConfig{}, defaultObjects) + // 2. Add EventHandlerGenerators to the InformerManager. registeredResourceEventHandler SHOULD be registered to ALL + // FTCs (based on its Predicate), unregisteredResourceEventHandler SHOULD NOT be registered for ANY FTCs (based on + // its Predicate). + registeredResourceEventHandler := &countingResourceEventHandler{} unregisteredResourceEventHandler := &countingResourceEventHandler{} @@ -209,6 +256,8 @@ func TestEventHandlerRegistrationForNewFTC(t *testing.T) { Generator: unregisteredResourceEventHandler.generateEventHandler, }) + // 3. Start InformerManager. + ctx := context.Background() manager.Start(ctx) @@ -218,13 +267,14 @@ func TestEventHandlerRegistrationForNewFTC(t *testing.T) { g.Fail("Timed out waiting for InformerManager cache sync") } + // 4. Create a new FTC for daemonsets. + ftc := daemonsetFTC _, err := fedClient.CoreV1alpha1().FederatedTypeConfigs().Create(ctx, ftc, metav1.CreateOptions{}) g.Expect(err).ToNot(gomega.HaveOccurred()) - // 1. If Predicate returns true, a new EventHandler should be generated and registered. - - // check event handlers generated and initial events are received + // 5. Verify that the registeredResourceEventHandler is eventually registered for the new daemonset FTC and that the + // add events for the existing objects are ALL RECEIVED. g.Eventually(func(g gomega.Gomega) { g.Expect(registeredResourceEventHandler.getGenerateCount()).To(gomega.BeNumerically("==", 1)) @@ -233,9 +283,10 @@ func TestEventHandlerRegistrationForNewFTC(t *testing.T) { g.Expect(registeredResourceEventHandler.getDeleteEventCount()).To(gomega.BeZero()) }).WithTimeout(time.Second * 2).Should(gomega.Succeed()) - // check if additional events can be received + // 6. Verify that additional events continue to be received by registeredResourceEventHandler. + + // 6a. Generate +2 update events for daemonsets. - // +2 update dm1.SetAnnotations(map[string]string{"test-annotation": "test-value"}) dm1, err = dynamicClient.Resource(common.DaemonSetGVR).Namespace("default").Update(ctx, dm1, metav1.UpdateOptions{}) g.Expect(err).ToNot(gomega.HaveOccurred()) @@ -243,11 +294,13 @@ func TestEventHandlerRegistrationForNewFTC(t *testing.T) { dm2, err = dynamicClient.Resource(common.DaemonSetGVR).Namespace("default").Update(ctx, dm2, metav1.UpdateOptions{}) g.Expect(err).ToNot(gomega.HaveOccurred()) - // +1 delete + // 6b. Generate +1 delete event for daemonsets. + err = dynamicClient.Resource(common.DaemonSetGVR).Namespace("default").Delete(ctx, dm4.GetName(), metav1.DeleteOptions{}) g.Expect(err).ToNot(gomega.HaveOccurred()) - // santiy check: events for gvr without corresponding FTC should not be received + // 6c. Santiy check: events for GVRs without a corresponding FTC should not be received. + sc1 := getSecret("sc-1", "default") _, err = dynamicClient.Resource(common.SecretGVR).Namespace("default").Create(ctx, sc1, metav1.CreateOptions{}) g.Expect(err).ToNot(gomega.HaveOccurred()) @@ -258,7 +311,7 @@ func TestEventHandlerRegistrationForNewFTC(t *testing.T) { g.Expect(registeredResourceEventHandler.getDeleteEventCount()).To(gomega.BeNumerically("==", 1)) }) - // 2. If Predicate returns false, no new EventHandlers should be generated and registered. + // 7. Verify that unregisteredResourceEventHandler is not generated and receives 0 events. g.Consistently(func(g gomega.Gomega) { g.Expect(unregisteredResourceEventHandler.getGenerateCount()).To(gomega.BeZero()) @@ -268,25 +321,38 @@ func TestEventHandlerRegistrationForNewFTC(t *testing.T) { }).WithTimeout(time.Second * 2).Should(gomega.Succeed()) } -func TestEventHandlerRegistrationOnFTCUpdate(t *testing.T) { +// Verifies that event handlers from EventHandlerGenerator are unregistered and registered according to its Predicate +// when an FTC is updated. +func TestInformerManagerEventHandlerRegistrationOnFTCUpdate(t *testing.T) { g := gomega.NewGomegaWithT(t) + // 1. Bootstrap an environemnt with a single Deployment FTC. + ftc := deploymentFTC.DeepCopy() ftc.SetAnnotations(map[string]string{"predicate": "false"}) defaultFTCs := []*fedcorev1a1.FederatedTypeConfig{ftc} manager, dynamicClient, fedClient := boostrapInformerManagerWithFakeClients(defaultFTCs, []*unstructured.Unstructured{}) - registeredResourceEventHandler := &countingResourceEventHandler{} + // 2. Add EventHandlerGenerators to the InformerManager. eventHandler should be registered for FTCs based on the + // "predicate" annotation found on FTC object. If this annotation exists and its value == "true", the eventHandler + // SHOULD be registered. + // + // Because the deployment FTC was created with "predicate" annotation value as "false", eventHandler SHOULD NOT be + // registered at the start. + + eventHandler := &countingResourceEventHandler{} manager.AddEventHandlerGenerator(&EventHandlerGenerator{ Predicate: func(ftc *fedcorev1a1.FederatedTypeConfig) bool { anno := ftc.GetAnnotations() return anno != nil && anno["predicate"] == "true" }, - Generator: registeredResourceEventHandler.generateEventHandler, + Generator: eventHandler.generateEventHandler, }) + // 3. Start the InformerManager. + ctx := context.Background() manager.Start(ctx) @@ -296,109 +362,142 @@ func TestEventHandlerRegistrationOnFTCUpdate(t *testing.T) { g.Fail("Timed out waiting for InformerManager cache sync") } - // sanity check: no event handler should have been generated and no events should have been received + // 4. Verify that eventHandler IS NOT generated or registered. This is because the deployment FTC was created with + // "predicate" annotation value = "false". + g.Consistently(func(g gomega.Gomega) { - g.Expect(registeredResourceEventHandler.getGenerateCount()).To(gomega.BeZero()) - g.Expect(registeredResourceEventHandler.getAddEventCount()).To(gomega.BeZero()) - g.Expect(registeredResourceEventHandler.getUpdateEventCount()).To(gomega.BeZero()) - g.Expect(registeredResourceEventHandler.getDeleteEventCount()).To(gomega.BeZero()) + g.Expect(eventHandler.getGenerateCount()).To(gomega.BeZero()) + g.Expect(eventHandler.getAddEventCount()).To(gomega.BeZero()) + g.Expect(eventHandler.getUpdateEventCount()).To(gomega.BeZero()) + g.Expect(eventHandler.getDeleteEventCount()).To(gomega.BeZero()) }).WithTimeout(time.Second * 2).Should(gomega.Succeed()) - // 1. If Predicate returns true and there is no existing EventHandler, a new EventHandler should be generated and - // registered. + // 5a. Update the FTC so that the "predicate" annotation == "true". ftc.SetAnnotations(map[string]string{"predicate": "true"}) ftc, err := fedClient.CoreV1alpha1().FederatedTypeConfigs().Update(ctx, ftc, metav1.UpdateOptions{}) g.Expect(err).ToNot(gomega.HaveOccurred()) - // sleep for a second to allow the InformerManager to process the ftc update + + // 5b. Sleep for a second to allow the InformerManager time to process the FTC update. + <-time.After(time.Second) + // 5c. Verify that eventHandler is eventually registered and it receives any deployment events. + + // 5d. Generate +1 add event for deployments + dp1 := getDeployment("dp-1", "default") dp1, err = dynamicClient.Resource(common.DeploymentGVR).Namespace("default").Create(ctx, dp1, metav1.CreateOptions{}) g.Expect(err).ToNot(gomega.HaveOccurred()) g.Eventually(func(g gomega.Gomega) { - g.Expect(registeredResourceEventHandler.getGenerateCount()).To(gomega.BeNumerically("==", 1)) - g.Expect(registeredResourceEventHandler.getAddEventCount()).To(gomega.BeNumerically("==", 1)) - g.Expect(registeredResourceEventHandler.getUpdateEventCount()).To(gomega.BeZero()) - g.Expect(registeredResourceEventHandler.getDeleteEventCount()).To(gomega.BeZero()) + g.Expect(eventHandler.getGenerateCount()).To(gomega.BeNumerically("==", 1)) + g.Expect(eventHandler.getAddEventCount()).To(gomega.BeNumerically("==", 1)) + g.Expect(eventHandler.getUpdateEventCount()).To(gomega.BeZero()) + g.Expect(eventHandler.getDeleteEventCount()).To(gomega.BeZero()) }).WithTimeout(time.Second * 2).Should(gomega.Succeed()) - // 2. If Predicate returns true and there is an existing EventHandler, a new EventHandler should not be generated but - // the existing EventHandler should remain registered. + // 6a. Update the FTC such that the "predicate" annotation remains == "true" ftc.SetAnnotations(map[string]string{"predicate": "true", "update-trigger": "1"}) ftc, err = fedClient.CoreV1alpha1().FederatedTypeConfigs().Update(ctx, ftc, metav1.UpdateOptions{}) g.Expect(err).ToNot(gomega.HaveOccurred()) - // sleep for a second to allow the InformerManager to process the ftc update + + // 6b. Sleep for a second to allow the InformerManager to process the FTC update + <-time.After(time.Second) + // 6c. Verify that the generate function is NOT called (a new event handler was not generated) + g.Consistently(func(g gomega.Gomega) { - g.Expect(registeredResourceEventHandler.getGenerateCount()).To(gomega.BeNumerically("==", 1)) + g.Expect(eventHandler.getGenerateCount()).To(gomega.BeNumerically("==", 1)) }).WithTimeout(time.Second * 2).Should(gomega.Succeed()) + // 6d. Verify that eventHandler was NOT re-registered and additional events continue to be received. + + // 6e. Generate +1 update event for deployments. + dp1.SetAnnotations(map[string]string{"test-annotation": "test-value"}) dp1, err = dynamicClient.Resource(common.DeploymentGVR).Namespace("default").Update(ctx, dp1, metav1.UpdateOptions{}) g.Expect(err).ToNot(gomega.HaveOccurred()) g.Eventually(func(g gomega.Gomega) { - g.Expect(registeredResourceEventHandler.getGenerateCount()).To(gomega.BeNumerically("==", 1)) - g.Expect(registeredResourceEventHandler.getAddEventCount()).To(gomega.BeNumerically("==", 1)) - g.Expect(registeredResourceEventHandler.getUpdateEventCount()).To(gomega.BeNumerically("==", 1)) - g.Expect(registeredResourceEventHandler.getDeleteEventCount()).To(gomega.BeZero()) + g.Expect(eventHandler.getGenerateCount()).To(gomega.BeNumerically("==", 1)) + // Add events should stay at 1 since eventHandler was not re-registered (registering will cause it to receive + // synthentic add events for all objects in the cache). + g.Expect(eventHandler.getAddEventCount()).To(gomega.BeNumerically("==", 1)) + g.Expect(eventHandler.getUpdateEventCount()).To(gomega.BeNumerically("==", 1)) + g.Expect(eventHandler.getDeleteEventCount()).To(gomega.BeZero()) }).WithTimeout(time.Second * 2).Should(gomega.Succeed()) - // 3. If Predicate returns false and there is an existing EventHandler, the existing EventHandler should be - // unregistered. + // 7a. Update the FTC such that "predicate" annotation becomes "false". ftc.SetAnnotations(map[string]string{"predicate": "false"}) ftc, err = fedClient.CoreV1alpha1().FederatedTypeConfigs().Update(ctx, ftc, metav1.UpdateOptions{}) g.Expect(err).ToNot(gomega.HaveOccurred()) - // sleep for a second to allow the InformerManager to process the ftc update + + // 7b. Sleep for a second to allow the InformerManager to process the FTC update. + <-time.After(time.Second) - // events should no longer be received for deployments + // 7c. Verify that events are no longer received by eventHandler since it should be unregistered. + + // 7d. Generate +1 add event for deployments. + dp2 := getDeployment("dp-2", "default") dp2, err = dynamicClient.Resource(common.DeploymentGVR).Namespace("default").Create(ctx, dp2, metav1.CreateOptions{}) g.Expect(err).ToNot(gomega.HaveOccurred()) + // 7e. Generate +1 update event for deployments. + dp2.SetAnnotations(map[string]string{"test-annotation": "test-value"}) dp2, err = dynamicClient.Resource(common.DeploymentGVR).Namespace("default").Update(ctx, dp2, metav1.UpdateOptions{}) g.Expect(err).ToNot(gomega.HaveOccurred()) + // 7f. Generate +1 delete event for deployments. + err = dynamicClient.Resource(common.DeploymentGVR).Namespace("default").Delete(ctx, dp1.GetName(), metav1.DeleteOptions{}) g.Expect(err).ToNot(gomega.HaveOccurred()) g.Consistently(func(g gomega.Gomega) { - g.Expect(registeredResourceEventHandler.getGenerateCount()).To(gomega.BeNumerically("==", 1)) - g.Expect(registeredResourceEventHandler.getAddEventCount()).To(gomega.BeNumerically("==", 1)) - g.Expect(registeredResourceEventHandler.getUpdateEventCount()).To(gomega.BeNumerically("==", 1)) - g.Expect(registeredResourceEventHandler.getDeleteEventCount()).To(gomega.BeZero()) + g.Expect(eventHandler.getGenerateCount()).To(gomega.BeNumerically("==", 1)) + g.Expect(eventHandler.getAddEventCount()).To(gomega.BeNumerically("==", 1)) + g.Expect(eventHandler.getUpdateEventCount()).To(gomega.BeNumerically("==", 1)) + g.Expect(eventHandler.getDeleteEventCount()).To(gomega.BeZero()) }).WithTimeout(time.Second * 2).Should(gomega.Succeed()) - // 4. If Predicate returns false and there is no existing EventHandler, no new EventHandlers should be generated and - // registered. + // 8a. Update the FTC such that predicate remains == "false". ftc.SetAnnotations(map[string]string{"predicate": "false", "update-trigger": "1"}) ftc, err = fedClient.CoreV1alpha1().FederatedTypeConfigs().Update(ctx, ftc, metav1.UpdateOptions{}) g.Expect(err).ToNot(gomega.HaveOccurred()) - // sleep for a second to allow the InformerManager to process the ftc update + + // 8b. Sleep for a second to allow the InformerManager to process the ftc update. + <-time.After(time.Second) + // 8c. Verify that events are still not received by eventHandler since it should remain unregistered. + + // 8d. Generate +1 delete event for deplyoments. + err = dynamicClient.Resource(common.DeploymentGVR).Namespace("default").Delete(ctx, dp2.GetName(), metav1.DeleteOptions{}) g.Expect(err).ToNot(gomega.HaveOccurred()) g.Consistently(func(g gomega.Gomega) { - g.Expect(registeredResourceEventHandler.getGenerateCount()).To(gomega.BeNumerically("==", 1)) - g.Expect(registeredResourceEventHandler.getAddEventCount()).To(gomega.BeNumerically("==", 1)) - g.Expect(registeredResourceEventHandler.getUpdateEventCount()).To(gomega.BeNumerically("==", 1)) - g.Expect(registeredResourceEventHandler.getDeleteEventCount()).To(gomega.BeZero()) + g.Expect(eventHandler.getGenerateCount()).To(gomega.BeNumerically("==", 1)) + g.Expect(eventHandler.getAddEventCount()).To(gomega.BeNumerically("==", 1)) + g.Expect(eventHandler.getUpdateEventCount()).To(gomega.BeNumerically("==", 1)) + g.Expect(eventHandler.getDeleteEventCount()).To(gomega.BeZero()) }).WithTimeout(time.Second * 2).Should(gomega.Succeed()) } -func TestEventHandlerRegistrationOnFTCDelete(t *testing.T) { +// Verifies that event handlers from EventHandlerGenerators are unregistered when a FTC is deleted. +func TestInformerManagerEventHandlerRegistrationOnFTCDelete(t *testing.T) { g := gomega.NewGomegaWithT(t) + // 1. Bootstrap an environment with FTCs for deplyoments, configmaps and secrets. Also create an existing + // deployment, configmap and secret. + dp1 := getDeployment("dp-1", "default") cm1 := getConfigMap("cm-1", "default") sc1 := getSecret("sc-1", "default") @@ -407,13 +506,23 @@ func TestEventHandlerRegistrationOnFTCDelete(t *testing.T) { defaultObjects := []*unstructured.Unstructured{dp1, cm1, sc1} manager, dynamicClient, fedClient := boostrapInformerManagerWithFakeClients(defaultFTCs, defaultObjects) - registeredResourceEventHandler := &countingResourceEventHandler{} + // 2. Add EventHandlerGenerators to the InformerManager. eventHandler1 and eventHandler2 SHOULD be registered to ALL + // FTCs (based on its Predicate). + + eventHandler1 := &countingResourceEventHandler{} + eventHandler2 := &countingResourceEventHandler{} manager.AddEventHandlerGenerator(&EventHandlerGenerator{ Predicate: func(ftc *fedcorev1a1.FederatedTypeConfig) bool { return true }, - Generator: registeredResourceEventHandler.generateEventHandler, + Generator: eventHandler1.generateEventHandler, + }) + manager.AddEventHandlerGenerator(&EventHandlerGenerator{ + Predicate: func(ftc *fedcorev1a1.FederatedTypeConfig) bool { return true }, + Generator: eventHandler2.generateEventHandler, }) + // 3. Start the InformerManager. + ctx := context.Background() manager.Start(ctx) @@ -423,65 +532,95 @@ func TestEventHandlerRegistrationOnFTCDelete(t *testing.T) { g.Fail("Timed out waiting for InformerManager cache sync") } - // All existing EventHandlers for the FTC should be unregistered or stop receiving events. - - // sanity check: event handlers generated and initial events are received + // 4. Santiy check: verify that both eventHandler1 and eventHandler2 is registered and received events for the + // existing objects. g.Eventually(func(g gomega.Gomega) { - g.Expect(registeredResourceEventHandler.getGenerateCount()).To(gomega.BeNumerically("==", len(defaultFTCs))) - g.Expect(registeredResourceEventHandler.getAddEventCount()).To(gomega.BeNumerically("==", len(defaultObjects))) - g.Expect(registeredResourceEventHandler.getUpdateEventCount()).To(gomega.BeZero()) - g.Expect(registeredResourceEventHandler.getDeleteEventCount()).To(gomega.BeZero()) + g.Expect(eventHandler1.getGenerateCount()).To(gomega.BeNumerically("==", len(defaultFTCs))) + g.Expect(eventHandler1.getAddEventCount()).To(gomega.BeNumerically("==", len(defaultObjects))) + g.Expect(eventHandler1.getUpdateEventCount()).To(gomega.BeZero()) + g.Expect(eventHandler1.getDeleteEventCount()).To(gomega.BeZero()) + + g.Expect(eventHandler2.getGenerateCount()).To(gomega.BeNumerically("==", len(defaultFTCs))) + g.Expect(eventHandler2.getAddEventCount()).To(gomega.BeNumerically("==", len(defaultObjects))) + g.Expect(eventHandler2.getUpdateEventCount()).To(gomega.BeZero()) + g.Expect(eventHandler2.getDeleteEventCount()).To(gomega.BeZero()) }).WithTimeout(time.Second * 2).Should(gomega.Succeed()) - // delete deployment ftc + // 5. Delete the deployment FTC. + err := fedClient.CoreV1alpha1().FederatedTypeConfigs().Delete(ctx, deploymentFTC.Name, metav1.DeleteOptions{}) g.Expect(err).ToNot(gomega.HaveOccurred()) - // sleep for a second to allow the InformerManager to process the ftc deletion + // 6. Sleep for a second to allow the InformerManager to process the FTC deletion. + <-time.After(time.Second) - // events should no longer be received for deployments + // 7. Verify that events are no longer received by eventHandler1 and eventHandler2. + + // 7a. Generate +1 add event for deployments. + dp2 := getDeployment("dp-2", "default") dp2, err = dynamicClient.Resource(common.DeploymentGVR).Namespace("default").Create(ctx, dp2, metav1.CreateOptions{}) g.Expect(err).ToNot(gomega.HaveOccurred()) + // 7b. Generate +1 update event for deployments. + dp2.SetAnnotations(map[string]string{"test-annotation": "test-value"}) dp2, err = dynamicClient.Resource(common.DeploymentGVR).Namespace("default").Update(ctx, dp2, metav1.UpdateOptions{}) g.Expect(err).ToNot(gomega.HaveOccurred()) + // 7c. Generate +1 delete event for deployments. + err = dynamicClient.Resource(common.DeploymentGVR).Namespace("default").Delete(ctx, dp1.GetName(), metav1.DeleteOptions{}) g.Expect(err).ToNot(gomega.HaveOccurred()) g.Consistently(func(g gomega.Gomega) { - g.Expect(registeredResourceEventHandler.getGenerateCount()).To(gomega.BeNumerically("==", len(defaultFTCs))) - g.Expect(registeredResourceEventHandler.getAddEventCount()).To(gomega.BeNumerically("==", len(defaultObjects))) - g.Expect(registeredResourceEventHandler.getUpdateEventCount()).To(gomega.BeZero()) - g.Expect(registeredResourceEventHandler.getDeleteEventCount()).To(gomega.BeZero()) + g.Expect(eventHandler1.getGenerateCount()).To(gomega.BeNumerically("==", len(defaultFTCs))) + g.Expect(eventHandler1.getAddEventCount()).To(gomega.BeNumerically("==", len(defaultObjects))) + g.Expect(eventHandler1.getUpdateEventCount()).To(gomega.BeZero()) + g.Expect(eventHandler1.getDeleteEventCount()).To(gomega.BeZero()) + + g.Expect(eventHandler2.getGenerateCount()).To(gomega.BeNumerically("==", len(defaultFTCs))) + g.Expect(eventHandler2.getAddEventCount()).To(gomega.BeNumerically("==", len(defaultObjects))) + g.Expect(eventHandler2.getUpdateEventCount()).To(gomega.BeZero()) + g.Expect(eventHandler2.getDeleteEventCount()).To(gomega.BeZero()) }).WithTimeout(time.Second * 2).Should(gomega.Succeed()) - // sanity check: events should still be received for the other remaining ftcs' source types + // 8. Sanity check: verify that events continue to be received for the other remaining FTCs' source types + + // 8a. Generate +1 add event for secrets. - // +1 add sc2 := getSecret("sc-2", "default") sc2, err = dynamicClient.Resource(common.SecretGVR).Namespace("default").Create(ctx, sc2, metav1.CreateOptions{}) g.Expect(err).ToNot(gomega.HaveOccurred()) - // +1 delete + // 8b. Generate +1 update event for configmaps. + err = dynamicClient.Resource(common.ConfigMapGVR).Namespace("default").Delete(ctx, cm1.GetName(), metav1.DeleteOptions{}) g.Expect(err).ToNot(gomega.HaveOccurred()) g.Eventually(func(g gomega.Gomega) { - g.Expect(registeredResourceEventHandler.getGenerateCount()).To(gomega.BeNumerically("==", len(defaultFTCs))) - g.Expect(registeredResourceEventHandler.getAddEventCount()).To(gomega.BeNumerically("==", len(defaultObjects)+1)) - g.Expect(registeredResourceEventHandler.getUpdateEventCount()).To(gomega.BeZero()) - g.Expect(registeredResourceEventHandler.getDeleteEventCount()).To(gomega.BeNumerically("==", 1)) + g.Expect(eventHandler1.getGenerateCount()).To(gomega.BeNumerically("==", len(defaultFTCs))) + g.Expect(eventHandler1.getAddEventCount()).To(gomega.BeNumerically("==", len(defaultObjects)+1)) + g.Expect(eventHandler1.getUpdateEventCount()).To(gomega.BeZero()) + g.Expect(eventHandler1.getDeleteEventCount()).To(gomega.BeNumerically("==", 1)) + + g.Expect(eventHandler2.getGenerateCount()).To(gomega.BeNumerically("==", len(defaultFTCs))) + g.Expect(eventHandler2.getAddEventCount()).To(gomega.BeNumerically("==", len(defaultObjects)+1)) + g.Expect(eventHandler2.getUpdateEventCount()).To(gomega.BeZero()) + g.Expect(eventHandler2.getDeleteEventCount()).To(gomega.BeNumerically("==", 1)) }).WithTimeout(time.Second * 2).Should(gomega.Succeed()) } -func TestEventHandlerRegistrationAfterInformerShutdown(t *testing.T) { +// Verifies that all event handlers from EventHandlerGenerators no longer receive any events after the InformerManager +// is shutdown (or when the context passed to the Start method expires). +func TestInformerManagerEventHandlerRegistrationOnShutdown(t *testing.T) { g := gomega.NewGomegaWithT(t) + // 1. Bootstrap an environment with FTCs for deplyoments, configmaps and secrets. Also create an existing + // deployment, configmap and secret. + dp1 := getDeployment("dp-1", "default") cm1 := getConfigMap("cm-1", "default") sc1 := getSecret("sc-1", "default") @@ -490,13 +629,23 @@ func TestEventHandlerRegistrationAfterInformerShutdown(t *testing.T) { defaultObjects := []*unstructured.Unstructured{dp1, cm1, sc1} manager, dynamicClient, _ := boostrapInformerManagerWithFakeClients(defaultFTCs, defaultObjects) - registeredResourceEventHandler := &countingResourceEventHandler{} + // 2. Add EventHandlerGenerators to the InformerManager. eventHandler1 and eventHandler2 SHOULD be registered to ALL + // FTCs (based on its Predicate). + + eventHandler1 := &countingResourceEventHandler{} + eventHandler2 := &countingResourceEventHandler{} manager.AddEventHandlerGenerator(&EventHandlerGenerator{ Predicate: func(ftc *fedcorev1a1.FederatedTypeConfig) bool { return true }, - Generator: registeredResourceEventHandler.generateEventHandler, + Generator: eventHandler1.generateEventHandler, + }) + manager.AddEventHandlerGenerator(&EventHandlerGenerator{ + Predicate: func(ftc *fedcorev1a1.FederatedTypeConfig) bool { return true }, + Generator: eventHandler2.generateEventHandler, }) + // 3. Start the InformerManager. + ctx, managerCancel := context.WithCancel(context.Background()) manager.Start(ctx) @@ -506,40 +655,59 @@ func TestEventHandlerRegistrationAfterInformerShutdown(t *testing.T) { g.Fail("Timed out waiting for InformerManager cache sync") } - // All existing EventHandlers for the FTC should be unregistered or stop receiving events. - - // sanity check: event handlers generated and initial events are received + // 4. Santiy check: verify that both eventHandler1 and eventHandler2 is registered and received events for the + // existing objects. g.Eventually(func(g gomega.Gomega) { - g.Expect(registeredResourceEventHandler.getGenerateCount()).To(gomega.BeNumerically("==", len(defaultFTCs))) - g.Expect(registeredResourceEventHandler.getAddEventCount()).To(gomega.BeNumerically("==", len(defaultObjects))) - g.Expect(registeredResourceEventHandler.getUpdateEventCount()).To(gomega.BeZero()) - g.Expect(registeredResourceEventHandler.getDeleteEventCount()).To(gomega.BeZero()) + g.Expect(eventHandler1.getGenerateCount()).To(gomega.BeNumerically("==", len(defaultFTCs))) + g.Expect(eventHandler1.getAddEventCount()).To(gomega.BeNumerically("==", len(defaultObjects))) + g.Expect(eventHandler1.getUpdateEventCount()).To(gomega.BeZero()) + g.Expect(eventHandler1.getDeleteEventCount()).To(gomega.BeZero()) + + g.Expect(eventHandler2.getGenerateCount()).To(gomega.BeNumerically("==", len(defaultFTCs))) + g.Expect(eventHandler2.getAddEventCount()).To(gomega.BeNumerically("==", len(defaultObjects))) + g.Expect(eventHandler2.getUpdateEventCount()).To(gomega.BeZero()) + g.Expect(eventHandler2.getDeleteEventCount()).To(gomega.BeZero()) }).WithTimeout(time.Second * 2).Should(gomega.Succeed()) - // stop manager + // 5. Stop the InformerManager + managerCancel() - // sleep for a second to allow the InformerManager to process the shutdown + + // 6. Sleep for a second to allow the InformerManager to process the shutdown + <-time.After(time.Second) - // events should no longer be received for any ftc's source type + // 7. Verify that events are not received for ANY FTCs by both eventHandler1 and eventHandler2. + + // 7a. Generate +1 add event for deployments. + dp2 := getDeployment("dp-2", "default") dp2, err := dynamicClient.Resource(common.DeploymentGVR).Namespace("default").Create(ctx, dp2, metav1.CreateOptions{}) g.Expect(err).ToNot(gomega.HaveOccurred()) + // 7b. Generate +1 add event for configmaps. + cm2 := getConfigMap("cm-2", "default") cm2, err = dynamicClient.Resource(common.ConfigMapGVR).Namespace("default").Create(ctx, cm2, metav1.CreateOptions{}) g.Expect(err).ToNot(gomega.HaveOccurred()) - sc2 := getConfigMap("sc-2", "default") - sc2, err = dynamicClient.Resource(common.ConfigMapGVR).Namespace("default").Create(ctx, sc2, metav1.CreateOptions{}) + // 7b. Generate +1 add event for secrets. + + sc2 := getSecret("sc-2", "default") + sc2, err = dynamicClient.Resource(common.SecretGVR).Namespace("default").Create(ctx, sc2, metav1.CreateOptions{}) g.Expect(err).ToNot(gomega.HaveOccurred()) g.Consistently(func(g gomega.Gomega) { - g.Expect(registeredResourceEventHandler.getGenerateCount()).To(gomega.BeNumerically("==", len(defaultFTCs))) - g.Expect(registeredResourceEventHandler.getAddEventCount()).To(gomega.BeNumerically("==", len(defaultObjects))) - g.Expect(registeredResourceEventHandler.getUpdateEventCount()).To(gomega.BeZero()) - g.Expect(registeredResourceEventHandler.getDeleteEventCount()).To(gomega.BeZero()) + g.Expect(eventHandler1.getGenerateCount()).To(gomega.BeNumerically("==", len(defaultFTCs))) + g.Expect(eventHandler1.getAddEventCount()).To(gomega.BeNumerically("==", len(defaultObjects))) + g.Expect(eventHandler1.getUpdateEventCount()).To(gomega.BeZero()) + g.Expect(eventHandler1.getDeleteEventCount()).To(gomega.BeZero()) + + g.Expect(eventHandler2.getGenerateCount()).To(gomega.BeNumerically("==", len(defaultFTCs))) + g.Expect(eventHandler2.getAddEventCount()).To(gomega.BeNumerically("==", len(defaultObjects))) + g.Expect(eventHandler2.getUpdateEventCount()).To(gomega.BeZero()) + g.Expect(eventHandler2.getDeleteEventCount()).To(gomega.BeZero()) }).WithTimeout(time.Second * 2).Should(gomega.Succeed()) } @@ -572,204 +740,3 @@ func boostrapInformerManagerWithFakeClients( return informerManager, dynamicClient, fedClient } - -type countingResourceEventHandler struct { - lock sync.RWMutex - - generateCount int - - addEventCount int - updateEventCount int - deleteEventCount int -} - -func (h *countingResourceEventHandler) getAddEventCount() int { - h.lock.RLock() - defer h.lock.RUnlock() - return h.addEventCount -} - -func (h *countingResourceEventHandler) getUpdateEventCount() int { - h.lock.RLock() - defer h.lock.RUnlock() - return h.updateEventCount -} - -func (h *countingResourceEventHandler) getDeleteEventCount() int { - h.lock.RLock() - defer h.lock.RUnlock() - return h.deleteEventCount -} - -func (h *countingResourceEventHandler) getGenerateCount() int { - h.lock.RLock() - defer h.lock.RUnlock() - return h.generateCount -} - -func (h *countingResourceEventHandler) generateEventHandler(_ *fedcorev1a1.FederatedTypeConfig) cache.ResourceEventHandler { - h.lock.Lock() - defer h.lock.Unlock() - h.generateCount++ - return h -} - -func (h *countingResourceEventHandler) OnAdd(_ interface{}) { - h.lock.Lock() - defer h.lock.Unlock() - h.addEventCount++ -} - -func (h *countingResourceEventHandler) OnDelete(_ interface{}) { - h.lock.Lock() - defer h.lock.Unlock() - h.deleteEventCount++ -} - -func (h *countingResourceEventHandler) OnUpdate(_ interface{}, _ interface{}) { - h.lock.Lock() - defer h.lock.Unlock() - h.updateEventCount++ -} - -var _ cache.ResourceEventHandler = &countingResourceEventHandler{} - -func getDeployment(name, namespace string) *unstructured.Unstructured { - dp := &appsv1.Deployment{ - TypeMeta: metav1.TypeMeta{ - Kind: "Deployment", - APIVersion: "apps/v1", - }, - ObjectMeta: metav1.ObjectMeta{ - Name: name, - Namespace: namespace, - }, - } - - dpMap, err := runtime.DefaultUnstructuredConverter.ToUnstructured(dp) - if err != nil { - panic(err) - } - - return &unstructured.Unstructured{Object: dpMap} -} - -func getConfigMap(name, namespace string) *unstructured.Unstructured { - cm := &corev1.ConfigMap{ - TypeMeta: metav1.TypeMeta{ - Kind: "ConfigMap", - APIVersion: "v1", - }, - ObjectMeta: metav1.ObjectMeta{ - Name: name, - Namespace: namespace, - }, - } - - cmMap, err := runtime.DefaultUnstructuredConverter.ToUnstructured(cm) - if err != nil { - panic(err) - } - - return &unstructured.Unstructured{Object: cmMap} -} - -func getSecret(name, namespace string) *unstructured.Unstructured { - secret := &corev1.Secret{ - TypeMeta: metav1.TypeMeta{ - Kind: "Secret", - APIVersion: "v1", - }, - ObjectMeta: metav1.ObjectMeta{ - Name: name, - Namespace: namespace, - }, - } - - secretMap, err := runtime.DefaultUnstructuredConverter.ToUnstructured(secret) - if err != nil { - panic(err) - } - - return &unstructured.Unstructured{Object: secretMap} -} - -func getDaemonSet(name, namespace string) *unstructured.Unstructured { - dm := &appsv1.DaemonSet{ - TypeMeta: metav1.TypeMeta{ - Kind: "DaemonSet", - APIVersion: "apps/v1", - }, - ObjectMeta: metav1.ObjectMeta{ - Name: name, - Namespace: namespace, - }, - } - - dmMap, err := runtime.DefaultUnstructuredConverter.ToUnstructured(dm) - if err != nil { - panic(err) - } - - return &unstructured.Unstructured{Object: dmMap} -} - -var ( - daemonsetFTC = &fedcorev1a1.FederatedTypeConfig{ - ObjectMeta: metav1.ObjectMeta{ - Name: "daemonsets", - }, - Spec: fedcorev1a1.FederatedTypeConfigSpec{ - SourceType: fedcorev1a1.APIResource{ - Group: "apps", - Version: "v1", - Kind: "DaemonSet", - PluralName: "daemonsets", - Scope: v1beta1.NamespaceScoped, - }, - }, - } - deploymentFTC = &fedcorev1a1.FederatedTypeConfig{ - ObjectMeta: metav1.ObjectMeta{ - Name: "deployments", - }, - Spec: fedcorev1a1.FederatedTypeConfigSpec{ - SourceType: fedcorev1a1.APIResource{ - Group: "apps", - Version: "v1", - Kind: "Deployment", - PluralName: "deployments", - Scope: v1beta1.NamespaceScoped, - }, - }, - } - configmapFTC = &fedcorev1a1.FederatedTypeConfig{ - ObjectMeta: metav1.ObjectMeta{ - Name: "configmaps", - }, - Spec: fedcorev1a1.FederatedTypeConfigSpec{ - SourceType: fedcorev1a1.APIResource{ - Group: "", - Version: "v1", - Kind: "ConfigMap", - PluralName: "configmaps", - Scope: v1beta1.NamespaceScoped, - }, - }, - } - secretFTC = &fedcorev1a1.FederatedTypeConfig{ - - ObjectMeta: metav1.ObjectMeta{ - Name: "secrets", - }, - Spec: fedcorev1a1.FederatedTypeConfigSpec{ - SourceType: fedcorev1a1.APIResource{ - Group: "", - Version: "v1", - Kind: "Secret", - PluralName: "secrets", - Scope: v1beta1.NamespaceScoped, - }, - }, - } -) diff --git a/pkg/util/informermanager/interface.go b/pkg/util/informermanager/interface.go index 61342cc1..cd68ee06 100644 --- a/pkg/util/informermanager/interface.go +++ b/pkg/util/informermanager/interface.go @@ -31,8 +31,8 @@ type EventHandlerGenerator struct { // objects based on FederatedTypeConfigs. InformerManager will listen to FTC events and maintain informers for the // source type of each FTC. // -// Having multiple FTCs with the same source type is not supported and will cause InformerManager to behave incorrectly. -// Updating FTC source types is also not supported and will also cause InformerManager to behave incorrectly. +// Having multiple FTCs with the same source type is not supported and may cause InformerManager to behave incorrectly. +// Updating FTC source types is also not supported and may also cause InformerManager to behave incorrectly. type InformerManager interface { // Adds an EventHandler used to generate and register ResourceEventHandlers for each FTC's source type informer. AddEventHandlerGenerator(generator *EventHandlerGenerator) error @@ -66,8 +66,11 @@ type ClusterEventPredicate func(oldCluster, newCluster *fedcorev1a1.FederatedClu // access objects in member clusters based on FederatedTypeConfigs. FederatedInformerManager will listen to FTC events // and maintian informers for each FTC's source type and joined member cluster. // -// Having multiple FTCs with the same source type is not supported and will cause FederatedInformerManager to behave -// incorrectly. Updating FTC source types is also not supported and will also cause InformerManager to behave +// Having multiple FTCs with the same source type is not supported and may cause FederatedInformerManager to behave +// incorrectly. Updating FTC source types is also not supported and may also cause FederatedInformerManager to behave +// incorrectly. +// +// Updating Cluster connection details is also not supported and may cause FederatedInformerManager to behave // incorrectly. type FederatedInformerManager interface { // Adds an EventHandler used to generate and register ResourceEventHandlers for each FTC's source type informer. @@ -96,4 +99,12 @@ type FederatedInformerManager interface { Start(ctx context.Context) } -type ClusterClientGetter func(cluster *fedcorev1a1.FederatedCluster) (dynamic.Interface, error) +// ClusterClientGetter is used by the FederatedInformerManager to create clients for joined member clusters. +type ClusterClientGetter struct { + // ConnectionHash should return a string that uniquely identifies the combination of parameters used to generate the + // cluster client. A change in the connection hash indicates a need to create a new client for a given member + // cluster. + ConnectionHash func(cluster *fedcorev1a1.FederatedCluster) string + // ClientGetter returns a dynamic client for the given member cluster. + ClientGetter func(cluster *fedcorev1a1.FederatedCluster) (dynamic.Interface, error) +} diff --git a/pkg/util/informermanager/testutils.go b/pkg/util/informermanager/testutils.go new file mode 100644 index 00000000..925fb161 --- /dev/null +++ b/pkg/util/informermanager/testutils.go @@ -0,0 +1,216 @@ +package informermanager + +import ( + "sync" + + corev1 "k8s.io/api/core/v1" + appsv1 "k8s.io/api/apps/v1" + "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/client-go/tools/cache" + + fedcorev1a1 "github.com/kubewharf/kubeadmiral/pkg/apis/core/v1alpha1" +) + +var ( + daemonsetFTC = &fedcorev1a1.FederatedTypeConfig{ + ObjectMeta: metav1.ObjectMeta{ + Name: "daemonsets", + }, + Spec: fedcorev1a1.FederatedTypeConfigSpec{ + SourceType: fedcorev1a1.APIResource{ + Group: "apps", + Version: "v1", + Kind: "DaemonSet", + PluralName: "daemonsets", + Scope: v1beta1.NamespaceScoped, + }, + }, + } + deploymentFTC = &fedcorev1a1.FederatedTypeConfig{ + ObjectMeta: metav1.ObjectMeta{ + Name: "deployments", + }, + Spec: fedcorev1a1.FederatedTypeConfigSpec{ + SourceType: fedcorev1a1.APIResource{ + Group: "apps", + Version: "v1", + Kind: "Deployment", + PluralName: "deployments", + Scope: v1beta1.NamespaceScoped, + }, + }, + } + configmapFTC = &fedcorev1a1.FederatedTypeConfig{ + ObjectMeta: metav1.ObjectMeta{ + Name: "configmaps", + }, + Spec: fedcorev1a1.FederatedTypeConfigSpec{ + SourceType: fedcorev1a1.APIResource{ + Group: "", + Version: "v1", + Kind: "ConfigMap", + PluralName: "configmaps", + Scope: v1beta1.NamespaceScoped, + }, + }, + } + secretFTC = &fedcorev1a1.FederatedTypeConfig{ + + ObjectMeta: metav1.ObjectMeta{ + Name: "secrets", + }, + Spec: fedcorev1a1.FederatedTypeConfigSpec{ + SourceType: fedcorev1a1.APIResource{ + Group: "", + Version: "v1", + Kind: "Secret", + PluralName: "secrets", + Scope: v1beta1.NamespaceScoped, + }, + }, + } +) + +type countingResourceEventHandler struct { + lock sync.RWMutex + + generateCount int + + addEventCount int + updateEventCount int + deleteEventCount int +} + +func (h *countingResourceEventHandler) getAddEventCount() int { + h.lock.RLock() + defer h.lock.RUnlock() + return h.addEventCount +} + +func (h *countingResourceEventHandler) getUpdateEventCount() int { + h.lock.RLock() + defer h.lock.RUnlock() + return h.updateEventCount +} + +func (h *countingResourceEventHandler) getDeleteEventCount() int { + h.lock.RLock() + defer h.lock.RUnlock() + return h.deleteEventCount +} + +func (h *countingResourceEventHandler) getGenerateCount() int { + h.lock.RLock() + defer h.lock.RUnlock() + return h.generateCount +} + +func (h *countingResourceEventHandler) generateEventHandler(_ *fedcorev1a1.FederatedTypeConfig) cache.ResourceEventHandler { + h.lock.Lock() + defer h.lock.Unlock() + h.generateCount++ + return h +} + +func (h *countingResourceEventHandler) OnAdd(_ interface{}) { + h.lock.Lock() + defer h.lock.Unlock() + h.addEventCount++ +} + +func (h *countingResourceEventHandler) OnDelete(_ interface{}) { + h.lock.Lock() + defer h.lock.Unlock() + h.deleteEventCount++ +} + +func (h *countingResourceEventHandler) OnUpdate(_ interface{}, _ interface{}) { + h.lock.Lock() + defer h.lock.Unlock() + h.updateEventCount++ +} + +var _ cache.ResourceEventHandler = &countingResourceEventHandler{} + +func getDeployment(name, namespace string) *unstructured.Unstructured { + dp := &appsv1.Deployment{ + TypeMeta: metav1.TypeMeta{ + Kind: "Deployment", + APIVersion: "apps/v1", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + }, + } + + dpMap, err := runtime.DefaultUnstructuredConverter.ToUnstructured(dp) + if err != nil { + panic(err) + } + + return &unstructured.Unstructured{Object: dpMap} +} + +func getConfigMap(name, namespace string) *unstructured.Unstructured { + cm := &corev1.ConfigMap{ + TypeMeta: metav1.TypeMeta{ + Kind: "ConfigMap", + APIVersion: "v1", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + }, + } + + cmMap, err := runtime.DefaultUnstructuredConverter.ToUnstructured(cm) + if err != nil { + panic(err) + } + + return &unstructured.Unstructured{Object: cmMap} +} + +func getSecret(name, namespace string) *unstructured.Unstructured { + secret := &corev1.Secret{ + TypeMeta: metav1.TypeMeta{ + Kind: "Secret", + APIVersion: "v1", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + }, + } + + secretMap, err := runtime.DefaultUnstructuredConverter.ToUnstructured(secret) + if err != nil { + panic(err) + } + + return &unstructured.Unstructured{Object: secretMap} +} + +func getDaemonSet(name, namespace string) *unstructured.Unstructured { + dm := &appsv1.DaemonSet{ + TypeMeta: metav1.TypeMeta{ + Kind: "DaemonSet", + APIVersion: "apps/v1", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + }, + } + + dmMap, err := runtime.DefaultUnstructuredConverter.ToUnstructured(dm) + if err != nil { + panic(err) + } + + return &unstructured.Unstructured{Object: dmMap} +} From fc7cfbcb34afda0e53f10b398a90d5bfa5ed7df5 Mon Sep 17 00:00:00 2001 From: "hawjia.lim" Date: Fri, 7 Jul 2023 18:59:10 +0800 Subject: [PATCH 013/173] change api of EventHandlerGenerators --- .../federatedinformermanager_test.go | 52 ++++++++++++++++++- pkg/util/informermanager/informermanager.go | 41 +++++++++------ pkg/util/informermanager/interface.go | 16 +++--- 3 files changed, 81 insertions(+), 28 deletions(-) diff --git a/pkg/util/informermanager/federatedinformermanager_test.go b/pkg/util/informermanager/federatedinformermanager_test.go index 8633bcd9..7982b899 100644 --- a/pkg/util/informermanager/federatedinformermanager_test.go +++ b/pkg/util/informermanager/federatedinformermanager_test.go @@ -1,6 +1,21 @@ package informermanager -import "testing" +import ( + "context" + "testing" + + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" + dynamicclient "k8s.io/client-go/dynamic" + dynamicfake "k8s.io/client-go/dynamic/fake" + + fedcorev1a1 "github.com/kubewharf/kubeadmiral/pkg/apis/core/v1alpha1" + fedclient "github.com/kubewharf/kubeadmiral/pkg/client/clientset/versioned" + "github.com/kubewharf/kubeadmiral/pkg/client/clientset/versioned/fake" + fedinformers "github.com/kubewharf/kubeadmiral/pkg/client/informers/externalversions" +) func TestFederatedInformerManagerClientAvailableForExistingClusters(t *testing.T) { @@ -61,3 +76,38 @@ func TestFederatedInformerManagerClusterEventHandlerOnClusterUpdate(t *testing.T func TestFederatedInformerManagerClusterEventHandlerOnClusterDelete(t *testing.T) { } + +func boostrapFederatedInformerManagerWithFakeClients( + clusters []*fedcorev1a1.FederatedCluster, + ftcs []*fedcorev1a1.FederatedTypeConfig, + objects []*unstructured.Unstructured, +) (FederatedInformerManager, dynamicclient.Interface, fedclient.Interface) { + scheme := runtime.NewScheme() + + corev1.AddToScheme(scheme) + appsv1.AddToScheme(scheme) + fedcorev1a1.AddToScheme(scheme) + + dynamicObjects := []runtime.Object{} + for _, object := range objects { + dynamicObjects = append(dynamicObjects, runtime.Object(object)) + } + dynamicClient := dynamicfake.NewSimpleDynamicClient(scheme, dynamicObjects...) + + fedObjects := []runtime.Object{} + for _, ftc := range ftcs { + fedObjects = append(fedObjects, runtime.Object(ftc)) + } + fedClient := fake.NewSimpleClientset(fedObjects...) + + factory := fedinformers.NewSharedInformerFactory(fedClient, 0) + informerManager := NewFederatedInformerManager( + nil, + factory.Core().V1alpha1().FederatedTypeConfigs(), + factory.Core().V1alpha1().FederatedClusters(), + ) + + factory.Start(context.TODO().Done()) + + return informerManager, dynamicClient, fedClient +} diff --git a/pkg/util/informermanager/informermanager.go b/pkg/util/informermanager/informermanager.go index 55985e60..5549851f 100644 --- a/pkg/util/informermanager/informermanager.go +++ b/pkg/util/informermanager/informermanager.go @@ -38,6 +38,7 @@ type informerManager struct { informers map[string]informers.GenericInformer informerStopChs map[string]chan struct{} eventHandlerRegistrations map[string]map[*EventHandlerGenerator]cache.ResourceEventHandlerRegistration + lastAppliedFTCsCache map[string]map[*EventHandlerGenerator]*fedcorev1a1.FederatedTypeConfig queue workqueue.Interface logger klog.Logger @@ -54,6 +55,7 @@ func NewInformerManager(client dynamic.Interface, ftcInformer fedcorev1a1informe informers: map[string]informers.GenericInformer{}, informerStopChs: map[string]chan struct{}{}, eventHandlerRegistrations: map[string]map[*EventHandlerGenerator]cache.ResourceEventHandlerRegistration{}, + lastAppliedFTCsCache: map[string]map[*EventHandlerGenerator]*fedcorev1a1.FederatedTypeConfig{}, queue: workqueue.NewRateLimitingQueue(workqueue.DefaultItemBasedRateLimiter()), logger: klog.LoggerWithName(klog.Background(), "informer-manager"), } @@ -157,29 +159,34 @@ func (m *informerManager) processFTC(ftc *fedcorev1a1.FederatedTypeConfig) (err m.informers[ftcName] = informer m.informerStopChs[ftcName] = stopCh m.eventHandlerRegistrations[ftcName] = map[*EventHandlerGenerator]cache.ResourceEventHandlerRegistration{} + m.lastAppliedFTCsCache[ftcName] = map[*EventHandlerGenerator]*fedcorev1a1.FederatedTypeConfig{} } registrations := m.eventHandlerRegistrations[ftcName] + lastAppliedFTCs := m.lastAppliedFTCsCache[ftcName] + + ftc = ftc.DeepCopy() for _, generator := range m.eventHandlerGenerators { - shouldRegister := generator.Predicate(ftc) - oldRegistration, oldRegistrationExists := registrations[generator] - - switch { - case !shouldRegister && oldRegistrationExists: - if err := informer.Informer().RemoveEventHandler(oldRegistration); err != nil { - return fmt.Errorf("failed to unregister event handler: %w", err), true - } - delete(registrations, generator) - - case shouldRegister && !oldRegistrationExists: - handler := generator.Generator(ftc) - newRegistration, err := informer.Informer().AddEventHandler(handler) - if err != nil { - return fmt.Errorf("failed to register event handler: %w", err), true - } - registrations[generator] = newRegistration + lastApplied := lastAppliedFTCs[generator] + if !generator.Predicate(lastApplied, ftc) { + continue + } + + oldRegistration := registrations[generator] + if err := informer.Informer().RemoveEventHandler(oldRegistration); err != nil { + return fmt.Errorf("failed to unregister event handler: %w", err), true + } + delete(registrations, generator) + + handler := generator.Generator(ftc) + newRegistration, err := informer.Informer().AddEventHandler(handler) + if err != nil { + delete(lastAppliedFTCs, generator) + return fmt.Errorf("failed to register event handler: %w", err), true } + registrations[generator] = newRegistration + lastAppliedFTCs[generator] = ftc } return nil, false diff --git a/pkg/util/informermanager/interface.go b/pkg/util/informermanager/interface.go index cd68ee06..d67e589a 100644 --- a/pkg/util/informermanager/interface.go +++ b/pkg/util/informermanager/interface.go @@ -14,16 +14,12 @@ import ( // EventHandlerGenerator is used by InformerManger and FederatedInformerManager to generate and register // ResourceEventHandlers for each FTC's source type informer. type EventHandlerGenerator struct { - // Predicate is called each time a FTC is reconciled to determine if a event handler needs to be registered for this - // EventHandlerGenerator. If Predicate returns false, any previously registered event handler for this - // EventHandlerGenerator will also be unregistered. - // - // Note: updating of event handlers is intentionally unsupported as registering a new event handler would cause all - // existing objects in the cache to be sent to it as add events, potentially causing performance problems. In other - // words, if Predicate returns true and there is already a registered event handler for this EventHandlerGenerator, - // a new event handler will not be generated. - Predicate func(ftc *fedcorev1a1.FederatedTypeConfig) bool - // Generator is used to generate a ResourceEventHandler for the given FTC. Generator MUST not return nil. + // Predicate is called each time a FTC is reconciled to determine if a new event handler needs to be generated and + // registered for this EventHandlerGenerator. If Predicate returns true, any previously registered event handler + // for this EventHandlerGenerator will also be unregistered. + Predicate func(lastApplied, latest *fedcorev1a1.FederatedTypeConfig) bool + // Generator is used to generate a ResourceEventHandler for the given FTC. If nil is returned, no event handler will + // be registered. Generator func(ftc *fedcorev1a1.FederatedTypeConfig) cache.ResourceEventHandler } From a91764059d59ebc10d6ef958ba465e900ddbed38 Mon Sep 17 00:00:00 2001 From: "hawjia.lim" Date: Fri, 7 Jul 2023 19:08:22 +0800 Subject: [PATCH 014/173] fix bug --- pkg/util/informermanager/informermanager.go | 16 +++++++++------- 1 file changed, 9 insertions(+), 7 deletions(-) diff --git a/pkg/util/informermanager/informermanager.go b/pkg/util/informermanager/informermanager.go index 5549851f..484bd4c8 100644 --- a/pkg/util/informermanager/informermanager.go +++ b/pkg/util/informermanager/informermanager.go @@ -178,14 +178,16 @@ func (m *informerManager) processFTC(ftc *fedcorev1a1.FederatedTypeConfig) (err return fmt.Errorf("failed to unregister event handler: %w", err), true } delete(registrations, generator) - - handler := generator.Generator(ftc) - newRegistration, err := informer.Informer().AddEventHandler(handler) - if err != nil { - delete(lastAppliedFTCs, generator) - return fmt.Errorf("failed to register event handler: %w", err), true + delete(lastAppliedFTCs, generator) + + if handler := generator.Generator(ftc); handler != nil { + newRegistration, err := informer.Informer().AddEventHandler(handler) + if err != nil { + return fmt.Errorf("failed to register event handler: %w", err), true + } + registrations[generator] = newRegistration } - registrations[generator] = newRegistration + lastAppliedFTCs[generator] = ftc } From 0e9f2ba60704da9d2626c53f11d7f44c25462715 Mon Sep 17 00:00:00 2001 From: "hawjia.lim" Date: Mon, 10 Jul 2023 13:02:50 +0800 Subject: [PATCH 015/173] finish informermanager tests --- .../federatedinformermanager_test.go | 4 +- pkg/util/informermanager/informermanager.go | 10 +- .../informermanager/informermanager_test.go | 374 ++++++++++++++---- 3 files changed, 302 insertions(+), 86 deletions(-) diff --git a/pkg/util/informermanager/federatedinformermanager_test.go b/pkg/util/informermanager/federatedinformermanager_test.go index 7982b899..28787c19 100644 --- a/pkg/util/informermanager/federatedinformermanager_test.go +++ b/pkg/util/informermanager/federatedinformermanager_test.go @@ -80,7 +80,7 @@ func TestFederatedInformerManagerClusterEventHandlerOnClusterDelete(t *testing.T func boostrapFederatedInformerManagerWithFakeClients( clusters []*fedcorev1a1.FederatedCluster, ftcs []*fedcorev1a1.FederatedTypeConfig, - objects []*unstructured.Unstructured, + objects map[string]*unstructured.Unstructured, ) (FederatedInformerManager, dynamicclient.Interface, fedclient.Interface) { scheme := runtime.NewScheme() @@ -102,7 +102,7 @@ func boostrapFederatedInformerManagerWithFakeClients( factory := fedinformers.NewSharedInformerFactory(fedClient, 0) informerManager := NewFederatedInformerManager( - nil, + ClusterClientGetter{}, factory.Core().V1alpha1().FederatedTypeConfigs(), factory.Core().V1alpha1().FederatedClusters(), ) diff --git a/pkg/util/informermanager/informermanager.go b/pkg/util/informermanager/informermanager.go index 484bd4c8..ab54a99b 100644 --- a/pkg/util/informermanager/informermanager.go +++ b/pkg/util/informermanager/informermanager.go @@ -170,14 +170,16 @@ func (m *informerManager) processFTC(ftc *fedcorev1a1.FederatedTypeConfig) (err for _, generator := range m.eventHandlerGenerators { lastApplied := lastAppliedFTCs[generator] if !generator.Predicate(lastApplied, ftc) { + lastAppliedFTCs[generator] = ftc continue } - oldRegistration := registrations[generator] - if err := informer.Informer().RemoveEventHandler(oldRegistration); err != nil { - return fmt.Errorf("failed to unregister event handler: %w", err), true + if oldRegistration := registrations[generator]; oldRegistration != nil { + if err := informer.Informer().RemoveEventHandler(oldRegistration); err != nil { + return fmt.Errorf("failed to unregister event handler: %w", err), true + } + delete(registrations, generator) } - delete(registrations, generator) delete(lastAppliedFTCs, generator) if handler := generator.Generator(ftc); handler != nil { diff --git a/pkg/util/informermanager/informermanager_test.go b/pkg/util/informermanager/informermanager_test.go index 46ee615b..12fedff4 100644 --- a/pkg/util/informermanager/informermanager_test.go +++ b/pkg/util/informermanager/informermanager_test.go @@ -2,6 +2,8 @@ package informermanager import ( "context" + "math/rand" + "strconv" "testing" "time" @@ -74,7 +76,7 @@ func TestInformerManagerListerAvailableForExistingFTCs(t *testing.T) { } // Verifies that the listers for the SourceType of FTCs created after the InformerManager is started eventually becomes -// avialable. +// available. func TestInformerManagerListerAvailableForNewFTC(t *testing.T) { g := gomega.NewGomegaWithT(t) @@ -99,7 +101,6 @@ func TestInformerManagerListerAvailableForNewFTC(t *testing.T) { apiresource := ftc.GetSourceType() gvr := schemautil.APIResourceToGVR(&apiresource) - // 4. Santiy check: verify that the lister for daemonsets is initially not available lister, informerSynced, exists := manager.GetResourceLister(gvr) @@ -129,7 +130,7 @@ func TestInformerManagerListerAvailableForNewFTC(t *testing.T) { g.Expect(informerSynced).To(gomega.BeNil()) } -// Verifies that event handlers from EventHandlerGenerators are properly registered for existing FTCs after the +// Verifies that event handlers from EventHandlerGenerators are registered for existing FTCs after the // InformerManager is started. func TestInformerManagerEventHandlerRegistrationForExistingFTCs(t *testing.T) { g := gomega.NewGomegaWithT(t) @@ -153,11 +154,11 @@ func TestInformerManagerEventHandlerRegistrationForExistingFTCs(t *testing.T) { unregisteredResourceEventHandler := &countingResourceEventHandler{} manager.AddEventHandlerGenerator(&EventHandlerGenerator{ - Predicate: func(ftc *fedcorev1a1.FederatedTypeConfig) bool { return true }, + Predicate: func(lastApplied, latest *fedcorev1a1.FederatedTypeConfig) bool { return true }, Generator: registeredResourceEventHandler.generateEventHandler, }) manager.AddEventHandlerGenerator(&EventHandlerGenerator{ - Predicate: func(ftc *fedcorev1a1.FederatedTypeConfig) bool { return false }, + Predicate: func(lastApplied, latest *fedcorev1a1.FederatedTypeConfig) bool { return false }, Generator: unregisteredResourceEventHandler.generateEventHandler, }) @@ -225,7 +226,7 @@ func TestInformerManagerEventHandlerRegistrationForExistingFTCs(t *testing.T) { }).WithTimeout(time.Second * 2).Should(gomega.Succeed()) } -// Verifies that event handlers from EventHandlerGenerators are properly registered for new FTCs created after the +// Verifies that event handlers from EventHandlerGenerators are registered for new FTCs created after the // InformerManager is started. func TestInformerManagerEventHandlerRegistrationForNewFTC(t *testing.T) { g := gomega.NewGomegaWithT(t) @@ -248,11 +249,11 @@ func TestInformerManagerEventHandlerRegistrationForNewFTC(t *testing.T) { unregisteredResourceEventHandler := &countingResourceEventHandler{} manager.AddEventHandlerGenerator(&EventHandlerGenerator{ - Predicate: func(ftc *fedcorev1a1.FederatedTypeConfig) bool { return true }, + Predicate: func(lastApplied, latest *fedcorev1a1.FederatedTypeConfig) bool { return true }, Generator: registeredResourceEventHandler.generateEventHandler, }) manager.AddEventHandlerGenerator(&EventHandlerGenerator{ - Predicate: func(ftc *fedcorev1a1.FederatedTypeConfig) bool { return false }, + Predicate: func(lastApplied, latest *fedcorev1a1.FederatedTypeConfig) bool { return false }, Generator: unregisteredResourceEventHandler.generateEventHandler, }) @@ -321,37 +322,100 @@ func TestInformerManagerEventHandlerRegistrationForNewFTC(t *testing.T) { }).WithTimeout(time.Second * 2).Should(gomega.Succeed()) } -// Verifies that event handlers from EventHandlerGenerator are unregistered and registered according to its Predicate -// when an FTC is updated. -func TestInformerManagerEventHandlerRegistrationOnFTCUpdate(t *testing.T) { +// Verifies that the EventHandlerGenerators receive the correct lastApplied and latest FTCs. +func TestInformerManagerEventHandlerGeneratorsReceiveCorrectFTCs(t *testing.T) { g := gomega.NewGomegaWithT(t) // 1. Bootstrap an environemnt with a single Deployment FTC. + generation := 1 + + ftc := deploymentFTC.DeepCopy() + ftc.SetAnnotations(map[string]string{"generation": strconv.Itoa(generation)}) + defaultFTCs := []*fedcorev1a1.FederatedTypeConfig{ftc} + + manager, _, fedClient := boostrapInformerManagerWithFakeClients(defaultFTCs, []*unstructured.Unstructured{}) + + // 2. Add EventHandlerGenerators to the InformerManager, the EventHandlerGenerator verifies that the "generation" + // annotation matches the generation variable + + // lock is used to ensure that the FTC events are not squashed by the InformerManager and that each event is + // processed. + lock := make(chan struct{}) + + eventHandler := &countingResourceEventHandler{} + manager.AddEventHandlerGenerator(&EventHandlerGenerator{ + Predicate: func(lastApplied *fedcorev1a1.FederatedTypeConfig, latest *fedcorev1a1.FederatedTypeConfig) bool { + switch { + case generation == 1: + // if newly created, expect lastApplied to be nil + g.Expect(lastApplied).To(gomega.BeNil()) + default: + g.Expect(strconv.Atoi(lastApplied.GetAnnotations()["generation"])).To(gomega.BeNumerically("==", generation-1)) + g.Expect(strconv.Atoi(latest.GetAnnotations()["generation"])).To(gomega.BeNumerically("==", generation)) + } + + <-lock + + return true + }, + Generator: eventHandler.generateEventHandler, + }) + + // 3. Start InformerManager. + + ctx := context.Background() + manager.Start(ctx) + + ctxWithTimeout, cancel := context.WithTimeout(ctx, time.Second*3) + defer cancel() + if !cache.WaitForCacheSync(ctxWithTimeout.Done(), manager.HasSynced) { + g.Fail("Timed out waiting for InformerManager cache sync") + } + + lock <- struct{}{} + + // 4. Trigger FTC updates + + for i := 0; i < 5; i++ { + generation++ + ftc.SetAnnotations(map[string]string{"generation": strconv.Itoa(generation)}) + _, err := fedClient.CoreV1alpha1().FederatedTypeConfigs().Update(ctx, ftc, metav1.UpdateOptions{}) + g.Expect(err).ToNot(gomega.HaveOccurred()) + + lock <- struct{}{} + } +} + +// Verifies that the event handler from EventHandlerGenerator is generated and registered on a corresponding FTC update +// where Predicate returns true and Generator returns an event handler. +func TestInformerManagerEventHandlerRegisteredOnFTCUpdate(t *testing.T) { + g := gomega.NewGomegaWithT(t) + + // 1. Bootstrap an environemnt with a single Deployment FTC and a single deployment. + + dp1 := getDeployment("dp-1", "default") + ftc := deploymentFTC.DeepCopy() ftc.SetAnnotations(map[string]string{"predicate": "false"}) defaultFTCs := []*fedcorev1a1.FederatedTypeConfig{ftc} - manager, dynamicClient, fedClient := boostrapInformerManagerWithFakeClients(defaultFTCs, []*unstructured.Unstructured{}) + defaultObjects := []*unstructured.Unstructured{dp1} + manager, dynamicClient, fedClient := boostrapInformerManagerWithFakeClients(defaultFTCs, defaultObjects) - // 2. Add EventHandlerGenerators to the InformerManager. eventHandler should be registered for FTCs based on the - // "predicate" annotation found on FTC object. If this annotation exists and its value == "true", the eventHandler - // SHOULD be registered. - // - // Because the deployment FTC was created with "predicate" annotation value as "false", eventHandler SHOULD NOT be - // registered at the start. + // 2. Add EventHandlerGenerators to the InformerManager, eventHandler SHOULD be generated when the "predicate" + // annotation of the FTC is "true". eventHandler := &countingResourceEventHandler{} - manager.AddEventHandlerGenerator(&EventHandlerGenerator{ - Predicate: func(ftc *fedcorev1a1.FederatedTypeConfig) bool { - anno := ftc.GetAnnotations() - return anno != nil && anno["predicate"] == "true" + Predicate: func(lastApplied *fedcorev1a1.FederatedTypeConfig, latest *fedcorev1a1.FederatedTypeConfig) bool { + annotations := latest.GetAnnotations() + return annotations["predicate"] == "true" }, Generator: eventHandler.generateEventHandler, }) - // 3. Start the InformerManager. + // 3. Start InformerManager. ctx := context.Background() manager.Start(ctx) @@ -362,8 +426,7 @@ func TestInformerManagerEventHandlerRegistrationOnFTCUpdate(t *testing.T) { g.Fail("Timed out waiting for InformerManager cache sync") } - // 4. Verify that eventHandler IS NOT generated or registered. This is because the deployment FTC was created with - // "predicate" annotation value = "false". + // 4. Sanity check: eventHandler should not be registered initially g.Consistently(func(g gomega.Gomega) { g.Expect(eventHandler.getGenerateCount()).To(gomega.BeZero()) @@ -372,123 +435,274 @@ func TestInformerManagerEventHandlerRegistrationOnFTCUpdate(t *testing.T) { g.Expect(eventHandler.getDeleteEventCount()).To(gomega.BeZero()) }).WithTimeout(time.Second * 2).Should(gomega.Succeed()) - // 5a. Update the FTC so that the "predicate" annotation == "true". + // 5. Trigger a registration by updating the "predicate" annotation to "true" ftc.SetAnnotations(map[string]string{"predicate": "true"}) ftc, err := fedClient.CoreV1alpha1().FederatedTypeConfigs().Update(ctx, ftc, metav1.UpdateOptions{}) g.Expect(err).ToNot(gomega.HaveOccurred()) - // 5b. Sleep for a second to allow the InformerManager time to process the FTC update. + // 6. Generate events for deployments - <-time.After(time.Second) + // +1 add event - // 5c. Verify that eventHandler is eventually registered and it receives any deployment events. + dp2 := getDeployment("dp-2", "default") + dp2, err = dynamicClient.Resource(common.DeploymentGVR).Namespace("default").Create(ctx, dp2, metav1.CreateOptions{}) + g.Expect(err).ToNot(gomega.HaveOccurred()) - // 5d. Generate +1 add event for deployments + // +1 update event - dp1 := getDeployment("dp-1", "default") - dp1, err = dynamicClient.Resource(common.DeploymentGVR).Namespace("default").Create(ctx, dp1, metav1.CreateOptions{}) + dp2.SetAnnotations(map[string]string{"test-annotation": "test-value"}) + dp2, err = dynamicClient.Resource(common.DeploymentGVR).Namespace("default").Update(ctx, dp2, metav1.UpdateOptions{}) + g.Expect(err).ToNot(gomega.HaveOccurred()) + + // +1 delete event + + err = dynamicClient.Resource(common.DeploymentGVR).Namespace("default").Delete(ctx, dp2.GetName(), metav1.DeleteOptions{}) g.Expect(err).ToNot(gomega.HaveOccurred()) + // 7. Verify that events are eventually received by eventHandler + g.Eventually(func(g gomega.Gomega) { g.Expect(eventHandler.getGenerateCount()).To(gomega.BeNumerically("==", 1)) - g.Expect(eventHandler.getAddEventCount()).To(gomega.BeNumerically("==", 1)) - g.Expect(eventHandler.getUpdateEventCount()).To(gomega.BeZero()) - g.Expect(eventHandler.getDeleteEventCount()).To(gomega.BeZero()) + g.Expect(eventHandler.getAddEventCount()).To(gomega.BeNumerically("==", 2)) + g.Expect(eventHandler.getUpdateEventCount()).To(gomega.BeNumerically("==", 1)) + g.Expect(eventHandler.getDeleteEventCount()).To(gomega.BeNumerically("==", 1)) }).WithTimeout(time.Second * 2).Should(gomega.Succeed()) +} - // 6a. Update the FTC such that the "predicate" annotation remains == "true" +// Verifies that the event handler from EventHandlerGenerator is unregistered on a corresponding FTC update +// where Predicate returns true and Generator returns a nil event handler. +func TestInformerManagerEventHandlerUnregisteredOnFTCUpdate(t *testing.T) { + g := gomega.NewGomegaWithT(t) - ftc.SetAnnotations(map[string]string{"predicate": "true", "update-trigger": "1"}) - ftc, err = fedClient.CoreV1alpha1().FederatedTypeConfigs().Update(ctx, ftc, metav1.UpdateOptions{}) - g.Expect(err).ToNot(gomega.HaveOccurred()) + // 1. Bootstrap an environemnt with a single Deployment FTC and a single deployment. - // 6b. Sleep for a second to allow the InformerManager to process the FTC update + dp1 := getDeployment("dp-1", "default") - <-time.After(time.Second) + ftc := deploymentFTC.DeepCopy() + ftc.SetAnnotations(map[string]string{"generator": "true"}) - // 6c. Verify that the generate function is NOT called (a new event handler was not generated) + defaultFTCs := []*fedcorev1a1.FederatedTypeConfig{ftc} + defaultObjects := []*unstructured.Unstructured{dp1} + manager, dynamicClient, fedClient := boostrapInformerManagerWithFakeClients(defaultFTCs, defaultObjects) - g.Consistently(func(g gomega.Gomega) { - g.Expect(eventHandler.getGenerateCount()).To(gomega.BeNumerically("==", 1)) - }).WithTimeout(time.Second * 2).Should(gomega.Succeed()) + // 2. Add EventHandlerGenerators to the InformerManager, Predicate always returns true and Generator returns + // eventHandler if the "generator" annotation == "true". Otherwise Generator returns nil. - // 6d. Verify that eventHandler was NOT re-registered and additional events continue to be received. + eventHandler := &countingResourceEventHandler{} + manager.AddEventHandlerGenerator(&EventHandlerGenerator{ + Predicate: func(lastApplied *fedcorev1a1.FederatedTypeConfig, latest *fedcorev1a1.FederatedTypeConfig) bool { + return true + }, + Generator: func(ftc *fedcorev1a1.FederatedTypeConfig) cache.ResourceEventHandler { + annotations := ftc.GetAnnotations() + if annotations["generator"] == "true" { + return eventHandler.generateEventHandler(ftc) + } + return nil + }, + }) - // 6e. Generate +1 update event for deployments. + // 3. Start InformerManager. - dp1.SetAnnotations(map[string]string{"test-annotation": "test-value"}) - dp1, err = dynamicClient.Resource(common.DeploymentGVR).Namespace("default").Update(ctx, dp1, metav1.UpdateOptions{}) - g.Expect(err).ToNot(gomega.HaveOccurred()) + ctx := context.Background() + manager.Start(ctx) + + ctxWithTimeout, cancel := context.WithTimeout(ctx, time.Second*3) + defer cancel() + if !cache.WaitForCacheSync(ctxWithTimeout.Done(), manager.HasSynced) { + g.Fail("Timed out waiting for InformerManager cache sync") + } + + // 4. Sanity check: eventHandler should be registered initially g.Eventually(func(g gomega.Gomega) { g.Expect(eventHandler.getGenerateCount()).To(gomega.BeNumerically("==", 1)) - // Add events should stay at 1 since eventHandler was not re-registered (registering will cause it to receive - // synthentic add events for all objects in the cache). g.Expect(eventHandler.getAddEventCount()).To(gomega.BeNumerically("==", 1)) - g.Expect(eventHandler.getUpdateEventCount()).To(gomega.BeNumerically("==", 1)) + g.Expect(eventHandler.getUpdateEventCount()).To(gomega.BeZero()) g.Expect(eventHandler.getDeleteEventCount()).To(gomega.BeZero()) }).WithTimeout(time.Second * 2).Should(gomega.Succeed()) - // 7a. Update the FTC such that "predicate" annotation becomes "false". + // 5. Trigger an unregistration by updating the "predicate" annotation to "false" - ftc.SetAnnotations(map[string]string{"predicate": "false"}) - ftc, err = fedClient.CoreV1alpha1().FederatedTypeConfigs().Update(ctx, ftc, metav1.UpdateOptions{}) + ftc.SetAnnotations(map[string]string{"generator": "false"}) + ftc, err := fedClient.CoreV1alpha1().FederatedTypeConfigs().Update(ctx, ftc, metav1.UpdateOptions{}) g.Expect(err).ToNot(gomega.HaveOccurred()) - // 7b. Sleep for a second to allow the InformerManager to process the FTC update. - + // 6. Sleep for a second to allow the InformerManager to process the FTC update. <-time.After(time.Second) - // 7c. Verify that events are no longer received by eventHandler since it should be unregistered. + // 7. Generate events for deployments - // 7d. Generate +1 add event for deployments. + // +1 add event dp2 := getDeployment("dp-2", "default") dp2, err = dynamicClient.Resource(common.DeploymentGVR).Namespace("default").Create(ctx, dp2, metav1.CreateOptions{}) g.Expect(err).ToNot(gomega.HaveOccurred()) - // 7e. Generate +1 update event for deployments. + // +1 update event dp2.SetAnnotations(map[string]string{"test-annotation": "test-value"}) dp2, err = dynamicClient.Resource(common.DeploymentGVR).Namespace("default").Update(ctx, dp2, metav1.UpdateOptions{}) g.Expect(err).ToNot(gomega.HaveOccurred()) - // 7f. Generate +1 delete event for deployments. + // +1 delete event - err = dynamicClient.Resource(common.DeploymentGVR).Namespace("default").Delete(ctx, dp1.GetName(), metav1.DeleteOptions{}) + err = dynamicClient.Resource(common.DeploymentGVR).Namespace("default").Delete(ctx, dp2.GetName(), metav1.DeleteOptions{}) g.Expect(err).ToNot(gomega.HaveOccurred()) + // 8. Verify that events are no longer received by eventHandler. + g.Consistently(func(g gomega.Gomega) { g.Expect(eventHandler.getGenerateCount()).To(gomega.BeNumerically("==", 1)) g.Expect(eventHandler.getAddEventCount()).To(gomega.BeNumerically("==", 1)) - g.Expect(eventHandler.getUpdateEventCount()).To(gomega.BeNumerically("==", 1)) + g.Expect(eventHandler.getUpdateEventCount()).To(gomega.BeZero()) g.Expect(eventHandler.getDeleteEventCount()).To(gomega.BeZero()) }).WithTimeout(time.Second * 2).Should(gomega.Succeed()) +} - // 8a. Update the FTC such that predicate remains == "false". +// Verifies that the event handler from an EventHandlerGenerator is regenerated and registered on a corresponding FTC +// update where Predicate returns true and Generator returns an event handler. +func TestInformerManagerEventHandlerReregisteredFTCUpdate(t *testing.T) { + g := gomega.NewGomegaWithT(t) - ftc.SetAnnotations(map[string]string{"predicate": "false", "update-trigger": "1"}) - ftc, err = fedClient.CoreV1alpha1().FederatedTypeConfigs().Update(ctx, ftc, metav1.UpdateOptions{}) - g.Expect(err).ToNot(gomega.HaveOccurred()) + // 1. Bootstrap an environemnt with a single Deployment FTC and a single deployment. - // 8b. Sleep for a second to allow the InformerManager to process the ftc update. + ftc := deploymentFTC.DeepCopy() + dp1 := getDeployment("dp-1", "default") - <-time.After(time.Second) + defaultFTCs := []*fedcorev1a1.FederatedTypeConfig{ftc} + defaultObjects := []*unstructured.Unstructured{dp1} + manager, dynamicClient, fedClient := boostrapInformerManagerWithFakeClients(defaultFTCs, defaultObjects) - // 8c. Verify that events are still not received by eventHandler since it should remain unregistered. + // 2. Add EventHandlerGenerators to the InformerManager, eventHandler SHOULD always be regenerated and registered. - // 8d. Generate +1 delete event for deplyoments. + eventHandler := &countingResourceEventHandler{} + manager.AddEventHandlerGenerator(&EventHandlerGenerator{ + Predicate: func(lastApplied *fedcorev1a1.FederatedTypeConfig, latest *fedcorev1a1.FederatedTypeConfig) bool { + return true + }, + Generator: eventHandler.generateEventHandler, + }) - err = dynamicClient.Resource(common.DeploymentGVR).Namespace("default").Delete(ctx, dp2.GetName(), metav1.DeleteOptions{}) - g.Expect(err).ToNot(gomega.HaveOccurred()) + // 3. Start InformerManager. - g.Consistently(func(g gomega.Gomega) { + ctx := context.Background() + manager.Start(ctx) + + ctxWithTimeout, cancel := context.WithTimeout(ctx, time.Second*3) + defer cancel() + if !cache.WaitForCacheSync(ctxWithTimeout.Done(), manager.HasSynced) { + g.Fail("Timed out waiting for InformerManager cache sync") + } + + // 4. Verify that eventHandler is generated and registered initially. + + g.Eventually(func(g gomega.Gomega) { g.Expect(eventHandler.getGenerateCount()).To(gomega.BeNumerically("==", 1)) g.Expect(eventHandler.getAddEventCount()).To(gomega.BeNumerically("==", 1)) - g.Expect(eventHandler.getUpdateEventCount()).To(gomega.BeNumerically("==", 1)) + g.Expect(eventHandler.getUpdateEventCount()).To(gomega.BeZero()) g.Expect(eventHandler.getDeleteEventCount()).To(gomega.BeZero()) }).WithTimeout(time.Second * 2).Should(gomega.Succeed()) + + // 5. Trigger FTC updates + + for i := 0; i < 5; i++ { + ftc.SetAnnotations(map[string]string{"trigger": strconv.Itoa(rand.Intn(1000))}) + _, err := fedClient.CoreV1alpha1().FederatedTypeConfigs().Update(ctx, ftc, metav1.UpdateOptions{}) + g.Expect(err).ToNot(gomega.HaveOccurred()) + + // 5a. Generate deployment events as well + + // +1 Update event + + dp1.SetAnnotations(map[string]string{"trigger": strconv.Itoa(rand.Intn(1000))}) + dp1, err = dynamicClient.Resource(common.DeploymentGVR).Namespace("default").Update(ctx, dp1, metav1.UpdateOptions{}) + g.Expect(err).ToNot(gomega.HaveOccurred()) + + // 5b. Verify eventHandler is regenerated and registered + + g.Eventually(func(g gomega.Gomega) { + g.Expect(eventHandler.getGenerateCount()).To(gomega.BeNumerically("==", 1+i)) + g.Expect(eventHandler.getAddEventCount()).To(gomega.BeNumerically("==", 1)) + g.Expect(eventHandler.getUpdateEventCount()).To(gomega.BeNumerically("==", i)) + g.Expect(eventHandler.getDeleteEventCount()).To(gomega.BeZero()) + }) + } +} + +// Verifies that the event handler from an EventHandlerGenerator is unchanged after a corresponding FTC update where +// Predicate returns false and an event handler did not exist previously. +func TestInformerManagerEventHandlerUnchangedFTCUpdate(t *testing.T) { + g := gomega.NewGomegaWithT(t) + + // 1. Bootstrap an environemnt with a single Deployment FTC and a single deployment. + + ftc := deploymentFTC.DeepCopy() + dp1 := getDeployment("dp-1", "default") + + defaultFTCs := []*fedcorev1a1.FederatedTypeConfig{ftc} + defaultObjects := []*unstructured.Unstructured{dp1} + manager, dynamicClient, fedClient := boostrapInformerManagerWithFakeClients(defaultFTCs, defaultObjects) + + // 2. Add EventHandlerGenerators to the InformerManager, eventHandler SHOULD only be created once per new FTC and + // never be regenerated. + + eventHandler := &countingResourceEventHandler{} + manager.AddEventHandlerGenerator(&EventHandlerGenerator{ + Predicate: func(lastApplied *fedcorev1a1.FederatedTypeConfig, latest *fedcorev1a1.FederatedTypeConfig) bool { + if lastApplied == nil { + return true + } + return false + }, + Generator: eventHandler.generateEventHandler, + }) + + // 3. Start InformerManager. + + ctx := context.Background() + manager.Start(ctx) + + ctxWithTimeout, cancel := context.WithTimeout(ctx, time.Second*3) + defer cancel() + if !cache.WaitForCacheSync(ctxWithTimeout.Done(), manager.HasSynced) { + g.Fail("Timed out waiting for InformerManager cache sync") + } + + // 4. Verify that eventHandler is generated and registered initially. + + g.Eventually(func(g gomega.Gomega) { + g.Expect(eventHandler.getGenerateCount()).To(gomega.BeNumerically("==", 1)) + g.Expect(eventHandler.getAddEventCount()).To(gomega.BeNumerically("==", 1)) + g.Expect(eventHandler.getUpdateEventCount()).To(gomega.BeZero()) + g.Expect(eventHandler.getDeleteEventCount()).To(gomega.BeZero()) + }).WithTimeout(time.Second * 2).Should(gomega.Succeed()) + + // 5. Trigger FTC updates + + for i := 0; i < 5; i++ { + ftc.SetAnnotations(map[string]string{"trigger": strconv.Itoa(rand.Intn(1000))}) + _, err := fedClient.CoreV1alpha1().FederatedTypeConfigs().Update(ctx, ftc, metav1.UpdateOptions{}) + g.Expect(err).ToNot(gomega.HaveOccurred()) + + // 5a. Generate deployment events as well + + // +1 Update event + + dp1.SetAnnotations(map[string]string{"trigger": strconv.Itoa(rand.Intn(1000))}) + dp1, err = dynamicClient.Resource(common.DeploymentGVR).Namespace("default").Update(ctx, dp1, metav1.UpdateOptions{}) + g.Expect(err).ToNot(gomega.HaveOccurred()) + + // 5b. Verify eventHandler is not regenerated but continues to be registered. + + g.Eventually(func(g gomega.Gomega) { + g.Expect(eventHandler.getGenerateCount()).To(gomega.BeNumerically("==", 1)) + g.Expect(eventHandler.getAddEventCount()).To(gomega.BeNumerically("==", 1)) + g.Expect(eventHandler.getUpdateEventCount()).To(gomega.BeNumerically("==", i)) + g.Expect(eventHandler.getDeleteEventCount()).To(gomega.BeZero()) + }) + } } // Verifies that event handlers from EventHandlerGenerators are unregistered when a FTC is deleted. @@ -513,11 +727,11 @@ func TestInformerManagerEventHandlerRegistrationOnFTCDelete(t *testing.T) { eventHandler2 := &countingResourceEventHandler{} manager.AddEventHandlerGenerator(&EventHandlerGenerator{ - Predicate: func(ftc *fedcorev1a1.FederatedTypeConfig) bool { return true }, + Predicate: func(lastApplied, latest *fedcorev1a1.FederatedTypeConfig) bool { return true }, Generator: eventHandler1.generateEventHandler, }) manager.AddEventHandlerGenerator(&EventHandlerGenerator{ - Predicate: func(ftc *fedcorev1a1.FederatedTypeConfig) bool { return true }, + Predicate: func(lastApplied, latest *fedcorev1a1.FederatedTypeConfig) bool { return true }, Generator: eventHandler2.generateEventHandler, }) @@ -636,11 +850,11 @@ func TestInformerManagerEventHandlerRegistrationOnShutdown(t *testing.T) { eventHandler2 := &countingResourceEventHandler{} manager.AddEventHandlerGenerator(&EventHandlerGenerator{ - Predicate: func(ftc *fedcorev1a1.FederatedTypeConfig) bool { return true }, + Predicate: func(lastApplied, latest *fedcorev1a1.FederatedTypeConfig) bool { return true }, Generator: eventHandler1.generateEventHandler, }) manager.AddEventHandlerGenerator(&EventHandlerGenerator{ - Predicate: func(ftc *fedcorev1a1.FederatedTypeConfig) bool { return true }, + Predicate: func(lastApplied, latest *fedcorev1a1.FederatedTypeConfig) bool { return true }, Generator: eventHandler2.generateEventHandler, }) From eedebb5e4ae645ac1fcb939ae0227a6159e614b4 Mon Sep 17 00:00:00 2001 From: "hawjia.lim" Date: Mon, 10 Jul 2023 18:09:21 +0800 Subject: [PATCH 016/173] refactor unit tests for informermanager --- .../federatedinformermanager.go | 33 +- .../federatedinformermanager_test.go | 180 ++- .../informermanager/informermanager_test.go | 1173 +++++++---------- pkg/util/informermanager/interface.go | 2 +- .../{testutils.go => testutils_test.go} | 212 ++- 5 files changed, 793 insertions(+), 807 deletions(-) rename pkg/util/informermanager/{testutils.go => testutils_test.go} (52%) diff --git a/pkg/util/informermanager/federatedinformermanager.go b/pkg/util/informermanager/federatedinformermanager.go index 60d7c66b..51e6dd72 100644 --- a/pkg/util/informermanager/federatedinformermanager.go +++ b/pkg/util/informermanager/federatedinformermanager.go @@ -1,7 +1,9 @@ package informermanager import ( + "bytes" "context" + "encoding/gob" "fmt" "sync" @@ -32,7 +34,7 @@ type federatedInformerManager struct { clusterEventHandler []*ClusterEventHandler clients map[string]dynamic.Interface - connectionMap map[string]string + connectionMap map[string][]byte informerManagers map[string]InformerManager informerManagersCancelFuncs map[string]context.CancelFunc @@ -54,7 +56,7 @@ func NewFederatedInformerManager( eventHandlerGenerators: []*EventHandlerGenerator{}, clusterEventHandler: []*ClusterEventHandler{}, clients: map[string]dynamic.Interface{}, - connectionMap: map[string]string{}, + connectionMap: map[string][]byte{}, informerManagers: map[string]InformerManager{}, informerManagersCancelFuncs: map[string]context.CancelFunc{}, queue: workqueue.NewRateLimitingQueue(workqueue.DefaultItemBasedRateLimiter()), @@ -132,9 +134,12 @@ func (m *federatedInformerManager) processCluster(cluster *fedcorev1a1.Federated clusterName := cluster.Name - connectionHash := m.clientGetter.ConnectionHash(cluster) + connectionHash, err := m.clientGetter.ConnectionHash(cluster) + if err != nil { + return fmt.Errorf("failed to get connection hash for cluster %s: %w", clusterName, err), true + } if oldConnectionHash, exists := m.connectionMap[clusterName]; exists { - if oldConnectionHash != connectionHash { + if !bytes.Equal(oldConnectionHash, connectionHash) { // This might occur if a cluster was deleted and recreated with different connection details within a short // period of time and we missed processing the deletion. We simply process the cluster deletion and // reenqueue. @@ -145,7 +150,7 @@ func (m *federatedInformerManager) processCluster(cluster *fedcorev1a1.Federated } else { clusterClient, err := m.clientGetter.ClientGetter(cluster) if err != nil { - return fmt.Errorf("failed to get client for cluster %s: %s", clusterName, err), true + return fmt.Errorf("failed to get client for cluster %s: %w", clusterName, err), true } manager := NewInformerManager(clusterClient, m.ftcInformer) @@ -304,6 +309,20 @@ func (m *federatedInformerManager) Start(ctx context.Context) { var _ FederatedInformerManager = &federatedInformerManager{} -func getConnectionHash(cluster *fedcorev1a1.FederatedCluster) string { - panic("unimplemented") +func DefaultClusterConnectionHash(cluster *fedcorev1a1.FederatedCluster) ([]byte, error) { + hashObj := struct { + ApiEndpoint string + SecretName string + UseServiceAccountToken bool + }{ + ApiEndpoint: cluster.Spec.APIEndpoint, + SecretName: cluster.Spec.SecretRef.Name, + UseServiceAccountToken: cluster.Spec.UseServiceAccountToken, + } + + var b bytes.Buffer + if err := gob.NewEncoder(&b).Encode(hashObj); err != nil { + return nil, err + } + return b.Bytes(), nil } diff --git a/pkg/util/informermanager/federatedinformermanager_test.go b/pkg/util/informermanager/federatedinformermanager_test.go index 28787c19..d19b44c7 100644 --- a/pkg/util/informermanager/federatedinformermanager_test.go +++ b/pkg/util/informermanager/federatedinformermanager_test.go @@ -3,34 +3,201 @@ package informermanager import ( "context" "testing" + "time" appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime" dynamicclient "k8s.io/client-go/dynamic" dynamicfake "k8s.io/client-go/dynamic/fake" + "k8s.io/client-go/tools/cache" fedcorev1a1 "github.com/kubewharf/kubeadmiral/pkg/apis/core/v1alpha1" fedclient "github.com/kubewharf/kubeadmiral/pkg/client/clientset/versioned" "github.com/kubewharf/kubeadmiral/pkg/client/clientset/versioned/fake" fedinformers "github.com/kubewharf/kubeadmiral/pkg/client/informers/externalversions" + "github.com/kubewharf/kubeadmiral/pkg/controllers/common" + schemautil "github.com/kubewharf/kubeadmiral/pkg/controllers/util/schema" + "github.com/onsi/gomega" ) +// Verifies that clients for existing clusters are eventually available after the FederatedInformerManager is started. func TestFederatedInformerManagerClientAvailableForExistingClusters(t *testing.T) { + g := gomega.NewGomegaWithT(t) + // 1. Bootstrap an environment with 3 clusters. + + cluster1 := getTestCluster("cluster-1") + cluster2 := getTestCluster("cluster-2") + cluster3 := getTestCluster("cluster-3") + + defaultClusters := []*fedcorev1a1.FederatedCluster{cluster1, cluster2, cluster3} + defaultFTCs := []*fedcorev1a1.FederatedTypeConfig{} + defaultObjects := map[string]*unstructured.Unstructured{} + + manager, _, _ := boostrapFederatedInformerManagerWithFakeClients(defaultClusters, defaultFTCs, defaultObjects) + + // 2. Start the manager + + ctx := context.Background() + manager.Start(ctx) + + ctxWithTimeout, cancel := context.WithTimeout(ctx, time.Second*3) + defer cancel() + if !cache.WaitForCacheSync(ctxWithTimeout.Done(), manager.HasSynced) { + g.Fail("Timed out waiting for FederatedInformerManager cache sync") + } + + // 3. Verify that clients for the clusters are eventually available + + for _, cluster := range defaultClusters { + g.Eventually(func(g gomega.Gomega) { + client, exists := manager.GetClusterClient(cluster.Name) + g.Expect(exists).To(gomega.BeTrue()) + g.Expect(client).ToNot(gomega.BeNil()) + }).WithTimeout(time.Second * 2).Should(gomega.Succeed()) + } + + // 4. Sanity check: the client for a non-existent cluster should not be available + + client, exists := manager.GetClusterClient("cluster-4") + g.Expect(exists).To(gomega.BeFalse()) + g.Expect(client).To(gomega.BeNil()) } +// Verifies that clients for new clusters created after the FederatedInformerManager is started are eventually +// available. func TestFederatedInformerManagerClientAvailableForNewCluster(t *testing.T) { + g := gomega.NewGomegaWithT(t) + + // 1. Bootstrap an environment with no initial clusters. + + defaultClusters := []*fedcorev1a1.FederatedCluster{} + defaultFTCs := []*fedcorev1a1.FederatedTypeConfig{} + defaultObjects := map[string]*unstructured.Unstructured{} + + manager, _, fedClient := boostrapFederatedInformerManagerWithFakeClients(defaultClusters, defaultFTCs, defaultObjects) + + // 2. Start the manager + + ctx := context.Background() + manager.Start(ctx) + + ctxWithTimeout, cancel := context.WithTimeout(ctx, time.Second*3) + defer cancel() + if !cache.WaitForCacheSync(ctxWithTimeout.Done(), manager.HasSynced) { + g.Fail("Timed out waiting for FederatedInformerManager cache sync") + } + + // 3. Sanity check: the client for a non-existent cluster should not be available + + client, exists := manager.GetClusterClient("cluster-1") + g.Expect(exists).To(gomega.BeFalse()) + g.Expect(client).To(gomega.BeNil()) + + // 4. Create a new cluster + + cluster1 := getTestCluster("cluster-1") + fedClient.CoreV1alpha1().FederatedClusters().Create(ctx, cluster1, metav1.CreateOptions{}) + + + // 5. Verify that clients for the clusters are eventually available + g.Eventually(func(g gomega.Gomega) { + client, exists := manager.GetClusterClient(cluster1.Name) + g.Expect(exists).To(gomega.BeTrue()) + g.Expect(client).ToNot(gomega.BeNil()) + }).WithTimeout(time.Second * 2).Should(gomega.Succeed()) } +// Verifies that the listers for the SourceType GVR of existing FTCs and member clusters are eventually available after +// the FederatedInformerManager is started. func TestFederatedInformerManagerListerAvailableForExistingFTCsAndClusters(t *testing.T) { + g := gomega.NewGomegaWithT(t) + // 1. Bootstrap an environment with 3 clusters and FTCs for deployments, configmaps and secrets. + + cluster1 := getTestCluster("cluster-1") + cluster2 := getTestCluster("cluster-2") + cluster3 := getTestCluster("cluster-3") + + defaultClusters := []*fedcorev1a1.FederatedCluster{cluster1, cluster2, cluster3} + defaultFTCs := []*fedcorev1a1.FederatedTypeConfig{deploymentFTC, configmapFTC, secretFTC} + defaultObjects := map[string]*unstructured.Unstructured{} + + manager, _, _ := boostrapFederatedInformerManagerWithFakeClients(defaultClusters, defaultFTCs, defaultObjects) + + // 2. Start the manager + + ctx := context.Background() + manager.Start(ctx) + + ctxWithTimeout, cancel := context.WithTimeout(ctx, time.Second*3) + defer cancel() + if !cache.WaitForCacheSync(ctxWithTimeout.Done(), manager.HasSynced) { + g.Fail("Timed out waiting for FederatedInformerManager cache sync") + } + + // 3. Verify that listers for the clusters and each FTC's SourceType GVR are eventually available. + + for _, cluster := range defaultClusters { + for _, ftc := range defaultFTCs { + apiresource := ftc.GetSourceType() + gvr := schemautil.APIResourceToGVR(&apiresource) + + g.Eventually(func(g gomega.Gomega) { + lister, informerSynced, exists := manager.GetResourceLister(gvr, cluster.Name) + g.Expect(exists).To(gomega.BeTrue()) + g.Expect(lister).ToNot(gomega.BeNil()) + g.Expect(informerSynced()).To(gomega.BeTrue()) + }).WithTimeout(time.Second * 2).Should(gomega.Succeed()) + } + } + + // 4. Sanity check: the listers for a non-existent cluster and ftc should not be available + + lister, informerSynced, exists := manager.GetResourceLister(common.DaemonSetGVR, defaultClusters[0].Name) + g.Expect(exists).To(gomega.BeFalse()) + g.Expect(lister).To(gomega.BeNil()) + g.Expect(informerSynced).To(gomega.BeNil()) + + lister, informerSynced, exists = manager.GetResourceLister(common.DeploymentGVR, "cluster-4") + g.Expect(exists).To(gomega.BeFalse()) + g.Expect(lister).To(gomega.BeNil()) + g.Expect(informerSynced).To(gomega.BeNil()) } +// Verifies that the lister for the SourceType GVR of a new FTC created after the FederatedInformerManager is started +// eventually becomes available. func TestFederatedInformerManagerListerAvailableForNewFTC(t *testing.T) { + g := gomega.NewGomegaWithT(t) + + // 1. Bootstrap an environment with 3 clusters. + + cluster1 := getTestCluster("cluster-1") + cluster2 := getTestCluster("cluster-2") + cluster3 := getTestCluster("cluster-3") + + defaultClusters := []*fedcorev1a1.FederatedCluster{cluster1, cluster2, cluster3} + defaultFTCs := []*fedcorev1a1.FederatedTypeConfig{} + defaultObjects := map[string]*unstructured.Unstructured{} + + manager, _, _ := boostrapFederatedInformerManagerWithFakeClients(defaultClusters, defaultFTCs, defaultObjects) + // 2. Start the manager + + ctx := context.Background() + manager.Start(ctx) + + ctxWithTimeout, cancel := context.WithTimeout(ctx, time.Second*3) + defer cancel() + if !cache.WaitForCacheSync(ctxWithTimeout.Done(), manager.HasSynced) { + g.Fail("Timed out waiting for FederatedInformerManager cache sync") + } + + // 3. Sanity check: } func TestFederatedInformerManagerListerAvailableForNewCluster(t *testing.T) { @@ -95,6 +262,9 @@ func boostrapFederatedInformerManagerWithFakeClients( dynamicClient := dynamicfake.NewSimpleDynamicClient(scheme, dynamicObjects...) fedObjects := []runtime.Object{} + for _, cluster := range clusters { + fedObjects = append(fedObjects, runtime.Object(cluster)) + } for _, ftc := range ftcs { fedObjects = append(fedObjects, runtime.Object(ftc)) } @@ -102,11 +272,19 @@ func boostrapFederatedInformerManagerWithFakeClients( factory := fedinformers.NewSharedInformerFactory(fedClient, 0) informerManager := NewFederatedInformerManager( - ClusterClientGetter{}, + ClusterClientGetter{ + ConnectionHash: DefaultClusterConnectionHash, + ClientGetter: func(cluster *fedcorev1a1.FederatedCluster) (dynamicclient.Interface, error) { + return dynamicfake.NewSimpleDynamicClient(scheme), nil + }, + }, factory.Core().V1alpha1().FederatedTypeConfigs(), factory.Core().V1alpha1().FederatedClusters(), ) + // this is required for the factory to start the underlying ftc informer + factory.Core().V1alpha1().FederatedTypeConfigs().Informer() + factory.Start(context.TODO().Done()) return informerManager, dynamicClient, fedClient diff --git a/pkg/util/informermanager/informermanager_test.go b/pkg/util/informermanager/informermanager_test.go index 12fedff4..ac6f4c0f 100644 --- a/pkg/util/informermanager/informermanager_test.go +++ b/pkg/util/informermanager/informermanager_test.go @@ -2,8 +2,6 @@ package informermanager import ( "context" - "math/rand" - "strconv" "testing" "time" @@ -14,7 +12,6 @@ import ( "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/runtime/schema" dynamicclient "k8s.io/client-go/dynamic" dynamicfake "k8s.io/client-go/dynamic/fake" "k8s.io/client-go/tools/cache" @@ -27,907 +24,606 @@ import ( schemautil "github.com/kubewharf/kubeadmiral/pkg/controllers/util/schema" ) -// Verifies that the listers for the SourceType GVR of existing FTCs in the cluster are eventually available after the -// InformerManager is started. -func TestInformerManagerListerAvailableForExistingFTCs(t *testing.T) { - g := gomega.NewGomegaWithT(t) +func TestInformerManager(t *testing.T) { + g := gomega.NewWithT(t) - // 1. Bootstrap an environment with FTCs for deployments, configmaps and secrets. + t.Run("listers for existing FTCs should be available eventually", func(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() - defaultFTCs := []*fedcorev1a1.FederatedTypeConfig{deploymentFTC, configmapFTC, secretFTC} - manager, _, _ := boostrapInformerManagerWithFakeClients(defaultFTCs, []*unstructured.Unstructured{}) + // 1. Bootstrap environment - // 2. Start the manager + defaultFTCs := []*fedcorev1a1.FederatedTypeConfig{deploymentFTC, configmapFTC, secretFTC} + defaultObjs := []*unstructured.Unstructured{} + generators := []*EventHandlerGenerator{} + manager, _, _ := boostrapInformerManagerWithFakeClients(g, ctx, defaultFTCs, defaultObjs, generators) - ctx := context.Background() - manager.Start(ctx) + // 2. Verify that the listers for each FTC is eventually available - ctxWithTimeout, cancel := context.WithTimeout(ctx, time.Second*3) - defer cancel() - if !cache.WaitForCacheSync(ctxWithTimeout.Done(), manager.HasSynced) { - g.Fail("Timed out waiting for InformerManager cache sync") - } - - // 3. Verify that the listers for each FTC's SourceType GVR is eventually available. - - for _, ftc := range defaultFTCs { - apiresource := ftc.GetSourceType() - gvr := schemautil.APIResourceToGVR(&apiresource) - - g.Eventually(func(g gomega.Gomega) { - lister, informerSynced, exists := manager.GetResourceLister(gvr) - g.Expect(exists).To(gomega.BeTrue()) - g.Expect(lister).ToNot(gomega.BeNil()) - g.Expect(informerSynced()).To(gomega.BeTrue()) - }).WithTimeout(time.Second * 2).Should(gomega.Succeed()) - } - - // 4. Sanity check: the lister for a GVR without a corresponding FTC should not exist - - gvr := schema.GroupVersionResource{ - Group: "apps", - Version: "v1", - Resource: "daemonsets", - } - lister, informerSynced, exists := manager.GetResourceLister(gvr) - g.Expect(exists).To(gomega.BeFalse()) - g.Expect(lister).To(gomega.BeNil()) - g.Expect(informerSynced).To(gomega.BeNil()) -} - -// Verifies that the listers for the SourceType of FTCs created after the InformerManager is started eventually becomes -// available. -func TestInformerManagerListerAvailableForNewFTC(t *testing.T) { - g := gomega.NewGomegaWithT(t) - - // 1. Bootstrap an environment with no FTCs to begin with. - - manager, _, fedClient := boostrapInformerManagerWithFakeClients([]*fedcorev1a1.FederatedTypeConfig{}, []*unstructured.Unstructured{}) - - // 2. Start the InformerManager. + for _, ftc := range defaultFTCs { + apiresource := ftc.GetSourceType() + gvr := schemautil.APIResourceToGVR(&apiresource) - ctx := context.Background() - manager.Start(ctx) + g.Eventually(func(g gomega.Gomega) { + lister, informerSynced, exists := manager.GetResourceLister(gvr) + g.Expect(exists).To(gomega.BeTrue()) + g.Expect(lister).ToNot(gomega.BeNil()) + g.Expect(informerSynced()).To(gomega.BeTrue()) + }).WithTimeout(time.Second * 2).Should(gomega.Succeed()) + } - ctxWithTimeout, cancel := context.WithTimeout(ctx, time.Second*3) - defer cancel() - if !cache.WaitForCacheSync(ctxWithTimeout.Done(), manager.HasSynced) { - g.Fail("Timed out waiting for InformerManager cache sync") - } - - // 3. Initialize daemonset FTC that will be created later. - - ftc := daemonsetFTC - apiresource := ftc.GetSourceType() - gvr := schemautil.APIResourceToGVR(&apiresource) - - // 4. Santiy check: verify that the lister for daemonsets is initially not available - - lister, informerSynced, exists := manager.GetResourceLister(gvr) - g.Expect(exists).To(gomega.BeFalse()) - g.Expect(lister).To(gomega.BeNil()) - g.Expect(informerSynced).To(gomega.BeNil()) - - // 5. Create the daemonset FTC. - - _, err := fedClient.CoreV1alpha1().FederatedTypeConfigs().Create(ctx, ftc, metav1.CreateOptions{}) - g.Expect(err).ToNot(gomega.HaveOccurred()) - - // 6. Verify the the lister for the SourceType of the newly created daemonset FTC is eventually available. - - g.Eventually(func(g gomega.Gomega) { - lister, informerSynced, exists := manager.GetResourceLister(gvr) - g.Expect(exists).To(gomega.BeTrue()) - g.Expect(lister).ToNot(gomega.BeNil()) - g.Expect(informerSynced()).To(gomega.BeTrue()) - }).WithTimeout(time.Second * 2).Should(gomega.Succeed()) - - // 7. Sanity check: the lister for a GVR without a corresponding FTC should not exist + // 3. Verify that the lister for a non-existent FTC is not available - lister, informerSynced, exists = manager.GetResourceLister(common.DeploymentGVR) - g.Expect(exists).To(gomega.BeFalse()) - g.Expect(lister).To(gomega.BeNil()) - g.Expect(informerSynced).To(gomega.BeNil()) -} - -// Verifies that event handlers from EventHandlerGenerators are registered for existing FTCs after the -// InformerManager is started. -func TestInformerManagerEventHandlerRegistrationForExistingFTCs(t *testing.T) { - g := gomega.NewGomegaWithT(t) - - // 1. Bootstrap an environment with FTCs for deplyoments, configmaps and secrets. Also create an existing - // deployment, configmap and secret. - - dp1 := getDeployment("dp-1", "default") - cm1 := getConfigMap("cm-1", "default") - sc1 := getSecret("sc-1", "default") - - defaultFTCs := []*fedcorev1a1.FederatedTypeConfig{deploymentFTC, configmapFTC, secretFTC} - defaultObjects := []*unstructured.Unstructured{dp1, cm1, sc1} - manager, dynamicClient, _ := boostrapInformerManagerWithFakeClients(defaultFTCs, defaultObjects) - - // 2. Add EventHandlerGenerators to the InformerManager. registeredResourceEventHandler SHOULD be registered to ALL - // FTCs (based on its Predicate), unregisteredResourceEventHandler SHOULD NOT be registered for ANY FTCs (based on - // its Predicate). - - registeredResourceEventHandler := &countingResourceEventHandler{} - unregisteredResourceEventHandler := &countingResourceEventHandler{} - - manager.AddEventHandlerGenerator(&EventHandlerGenerator{ - Predicate: func(lastApplied, latest *fedcorev1a1.FederatedTypeConfig) bool { return true }, - Generator: registeredResourceEventHandler.generateEventHandler, - }) - manager.AddEventHandlerGenerator(&EventHandlerGenerator{ - Predicate: func(lastApplied, latest *fedcorev1a1.FederatedTypeConfig) bool { return false }, - Generator: unregisteredResourceEventHandler.generateEventHandler, + lister, informerSynced, exists := manager.GetResourceLister(common.DaemonSetGVR) + g.Expect(exists).To(gomega.BeFalse()) + g.Expect(lister).To(gomega.BeNil()) + g.Expect(informerSynced).To(gomega.BeNil()) }) - // 3. Start the InformerManager. - - ctx := context.Background() - manager.Start(ctx) - - ctxWithTimeout, cancel := context.WithTimeout(ctx, time.Second*3) - defer cancel() - if !cache.WaitForCacheSync(ctxWithTimeout.Done(), manager.HasSynced) { - g.Fail("Timed out waiting for InformerManager cache sync") - } - - // 4. Verify that the registeredResourceEventHandler is eventually registered for ALL FTCs and that the add events - // for the existing objects are ALL RECEIVED. + t.Run("listers for new FTC should be available eventually", func(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() - g.Eventually(func(g gomega.Gomega) { - // The generate function should be called once for each FTC. - g.Expect(registeredResourceEventHandler.getGenerateCount()).To(gomega.BeNumerically("==", len(defaultFTCs))) - // The number of add events should be equal to the number of current existing objects. - g.Expect(registeredResourceEventHandler.getAddEventCount()).To(gomega.BeNumerically("==", len(defaultObjects))) - g.Expect(registeredResourceEventHandler.getUpdateEventCount()).To(gomega.BeZero()) - g.Expect(registeredResourceEventHandler.getDeleteEventCount()).To(gomega.BeZero()) - }).WithTimeout(time.Second * 2).Should(gomega.Succeed()) + // 1. Bootstrap environment - // 5. Verify that additional events continue to be received by registeredResourceEventHandler. + defaultFTCs := []*fedcorev1a1.FederatedTypeConfig{} + defaultObjs := []*unstructured.Unstructured{} + generators := []*EventHandlerGenerator{} + manager, _, fedClient := boostrapInformerManagerWithFakeClients(g, ctx, defaultFTCs, defaultObjs, generators) - // 5a. Generate +1 add event for secrets. - - sc2 := getSecret("sc-2", "default") - sc2, err := dynamicClient.Resource(common.SecretGVR).Namespace("default").Create(ctx, sc2, metav1.CreateOptions{}) - g.Expect(err).ToNot(gomega.HaveOccurred()) - - // 5b. Generate +1 update event for deployments. + ftc := daemonsetFTC + apiresource := ftc.GetSourceType() + gvr := schemautil.APIResourceToGVR(&apiresource) - dp1.SetAnnotations(map[string]string{"test-annotation": "test-value"}) - dp1, err = dynamicClient.Resource(common.DeploymentGVR).Namespace("default").Update(ctx, dp1, metav1.UpdateOptions{}) - g.Expect(err).ToNot(gomega.HaveOccurred()) + // 2. Verify that the lister for daemonsets is not available at the start - // 5c. Generate +1 delete event for configmaps. + g.Consistently(func(g gomega.Gomega) { + lister, informerSynced, exists := manager.GetResourceLister(gvr) + g.Expect(exists).To(gomega.BeFalse()) + g.Expect(lister).To(gomega.BeNil()) + g.Expect(informerSynced).To(gomega.BeNil()) + }).WithTimeout(time.Second * 2).Should(gomega.Succeed()) - err = dynamicClient.Resource(common.ConfigMapGVR).Namespace("default").Delete(ctx, cm1.GetName(), metav1.DeleteOptions{}) - g.Expect(err).ToNot(gomega.HaveOccurred()) + // 3. Create the daemonset FTC. - // 5d. Santiy check: events for GVR without a corresponding FTC should not be received. + _, err := fedClient.CoreV1alpha1().FederatedTypeConfigs().Create(ctx, ftc, metav1.CreateOptions{}) + g.Expect(err).ToNot(gomega.HaveOccurred()) - dm1 := getDaemonSet("dm-1", "default") - _, err = dynamicClient.Resource(common.DaemonSetGVR).Namespace("default").Create(ctx, dm1, metav1.CreateOptions{}) - g.Expect(err).ToNot(gomega.HaveOccurred()) + // 4. Verify the the lister for daemonsets is eventually available - g.Eventually(func(g gomega.Gomega) { - g.Expect(registeredResourceEventHandler.getAddEventCount()).To(gomega.BeNumerically("==", len(defaultObjects)+1)) - g.Expect(registeredResourceEventHandler.getUpdateEventCount()).To(gomega.BeNumerically("==", 1)) - g.Expect(registeredResourceEventHandler.getDeleteEventCount()).To(gomega.BeNumerically("==", 1)) + g.Eventually(func(g gomega.Gomega) { + lister, informerSynced, exists := manager.GetResourceLister(gvr) + g.Expect(exists).To(gomega.BeTrue()) + g.Expect(lister).ToNot(gomega.BeNil()) + g.Expect(informerSynced()).To(gomega.BeTrue()) + }).WithTimeout(time.Second * 2).Should(gomega.Succeed()) }) - // 6. Verify that unregisteredResourceEventHandler is not generated and receives 0 events. - - g.Consistently(func(g gomega.Gomega) { - g.Expect(unregisteredResourceEventHandler.getGenerateCount()).To(gomega.BeZero()) - g.Expect(unregisteredResourceEventHandler.getAddEventCount()).To(gomega.BeZero()) - g.Expect(unregisteredResourceEventHandler.getUpdateEventCount()).To(gomega.BeZero()) - g.Expect(unregisteredResourceEventHandler.getDeleteEventCount()).To(gomega.BeZero()) - }).WithTimeout(time.Second * 2).Should(gomega.Succeed()) -} - -// Verifies that event handlers from EventHandlerGenerators are registered for new FTCs created after the -// InformerManager is started. -func TestInformerManagerEventHandlerRegistrationForNewFTC(t *testing.T) { - g := gomega.NewGomegaWithT(t) - - // 1. Bootstrap an environment with no FTCs to begin with, but with 4 existing daemonsets. - - dm1 := getDaemonSet("dm-1", "default") - dm2 := getDaemonSet("dm-2", "default") - dm3 := getDaemonSet("dm-3", "default") - dm4 := getDaemonSet("dm-4", "default") - - defaultObjects := []*unstructured.Unstructured{dm1, dm2, dm3, dm4} - manager, dynamicClient, fedClient := boostrapInformerManagerWithFakeClients([]*fedcorev1a1.FederatedTypeConfig{}, defaultObjects) + t.Run("event handlers for existing FTCs should be registered eventually", func(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() - // 2. Add EventHandlerGenerators to the InformerManager. registeredResourceEventHandler SHOULD be registered to ALL - // FTCs (based on its Predicate), unregisteredResourceEventHandler SHOULD NOT be registered for ANY FTCs (based on - // its Predicate). + // 1. Bootstrap environment - registeredResourceEventHandler := &countingResourceEventHandler{} - unregisteredResourceEventHandler := &countingResourceEventHandler{} + dp1 := getTestDeployment("dp-1", "default") + cm1 := getTestConfigMap("cm-1", "default") + sc1 := getTestSecret("sc-1", "default") - manager.AddEventHandlerGenerator(&EventHandlerGenerator{ - Predicate: func(lastApplied, latest *fedcorev1a1.FederatedTypeConfig) bool { return true }, - Generator: registeredResourceEventHandler.generateEventHandler, - }) - manager.AddEventHandlerGenerator(&EventHandlerGenerator{ - Predicate: func(lastApplied, latest *fedcorev1a1.FederatedTypeConfig) bool { return false }, - Generator: unregisteredResourceEventHandler.generateEventHandler, - }) - - // 3. Start InformerManager. + alwaysRegistered := &countingResourceEventHandler{} + neverRegistered := &countingResourceEventHandler{} - ctx := context.Background() - manager.Start(ctx) + defaultFTCs := []*fedcorev1a1.FederatedTypeConfig{deploymentFTC, configmapFTC, secretFTC} + defaultObjs := []*unstructured.Unstructured{dp1, cm1, sc1} + generators := []*EventHandlerGenerator{ + { + Predicate: alwaysRegisterPredicate, + Generator: alwaysRegistered.GenerateEventHandler, + }, + { + Predicate: neverRegisterPredicate, + Generator: neverRegistered.GenerateEventHandler, + }, + } - ctxWithTimeout, cancel := context.WithTimeout(ctx, time.Second*3) - defer cancel() - if !cache.WaitForCacheSync(ctxWithTimeout.Done(), manager.HasSynced) { - g.Fail("Timed out waiting for InformerManager cache sync") - } + _, dynamicClient, _ := boostrapInformerManagerWithFakeClients(g, ctx, defaultFTCs, defaultObjs, generators) - // 4. Create a new FTC for daemonsets. + // 2. Verify alwaysRegistered is eventually registered for all existing FTCs. - ftc := daemonsetFTC - _, err := fedClient.CoreV1alpha1().FederatedTypeConfigs().Create(ctx, ftc, metav1.CreateOptions{}) - g.Expect(err).ToNot(gomega.HaveOccurred()) + alwaysRegistered.ExpectGenerateEvents(3) + alwaysRegistered.ExpectAddEvents(3) + alwaysRegistered.AssertEventually(g, time.Second*2) - // 5. Verify that the registeredResourceEventHandler is eventually registered for the new daemonset FTC and that the - // add events for the existing objects are ALL RECEIVED. + // 3. Verify newly generated events are received by alwaysRegistered - g.Eventually(func(g gomega.Gomega) { - g.Expect(registeredResourceEventHandler.getGenerateCount()).To(gomega.BeNumerically("==", 1)) - g.Expect(registeredResourceEventHandler.getAddEventCount()).To(gomega.BeNumerically("==", len(defaultObjects))) - g.Expect(registeredResourceEventHandler.getUpdateEventCount()).To(gomega.BeZero()) - g.Expect(registeredResourceEventHandler.getDeleteEventCount()).To(gomega.BeZero()) - }).WithTimeout(time.Second * 2).Should(gomega.Succeed()) + _, err := dynamicClient.Resource(common.SecretGVR). + Namespace("default"). + Create(ctx, getTestSecret("sc-2", "default"), metav1.CreateOptions{}) + g.Expect(err).ToNot(gomega.HaveOccurred()) + alwaysRegistered.ExpectAddEvents(1) - // 6. Verify that additional events continue to be received by registeredResourceEventHandler. + dp1.SetAnnotations(map[string]string{"test": "test"}) + _, err = dynamicClient.Resource(common.DeploymentGVR).Namespace("default").Update(ctx, dp1, metav1.UpdateOptions{}) + g.Expect(err).ToNot(gomega.HaveOccurred()) + alwaysRegistered.ExpectUpdateEvents(1) - // 6a. Generate +2 update events for daemonsets. + err = dynamicClient.Resource(common.ConfigMapGVR).Namespace("default").Delete(ctx, cm1.GetName(), metav1.DeleteOptions{}) + g.Expect(err).ToNot(gomega.HaveOccurred()) + alwaysRegistered.ExpectDeleteEvents(1) - dm1.SetAnnotations(map[string]string{"test-annotation": "test-value"}) - dm1, err = dynamicClient.Resource(common.DaemonSetGVR).Namespace("default").Update(ctx, dm1, metav1.UpdateOptions{}) - g.Expect(err).ToNot(gomega.HaveOccurred()) - dm2.SetAnnotations(map[string]string{"test-annotation": "test-value"}) - dm2, err = dynamicClient.Resource(common.DaemonSetGVR).Namespace("default").Update(ctx, dm2, metav1.UpdateOptions{}) - g.Expect(err).ToNot(gomega.HaveOccurred()) + alwaysRegistered.AssertEventually(g, time.Second*2) - // 6b. Generate +1 delete event for daemonsets. + // 4. Verify that events for non-existent FTCs are not received - err = dynamicClient.Resource(common.DaemonSetGVR).Namespace("default").Delete(ctx, dm4.GetName(), metav1.DeleteOptions{}) - g.Expect(err).ToNot(gomega.HaveOccurred()) + _, err = dynamicClient.Resource(common.DaemonSetGVR). + Namespace("default"). + Create(ctx, getTestDaemonSet("dm-1", "default"), metav1.CreateOptions{}) + g.Expect(err).ToNot(gomega.HaveOccurred()) - // 6c. Santiy check: events for GVRs without a corresponding FTC should not be received. + alwaysRegistered.AssertConsistently(g, time.Second*2) - sc1 := getSecret("sc-1", "default") - _, err = dynamicClient.Resource(common.SecretGVR).Namespace("default").Create(ctx, sc1, metav1.CreateOptions{}) - g.Expect(err).ToNot(gomega.HaveOccurred()) + // 5. Verify neverRegsitered receives no events - g.Eventually(func(g gomega.Gomega) { - g.Expect(registeredResourceEventHandler.getAddEventCount()).To(gomega.BeNumerically("==", 1)) - g.Expect(registeredResourceEventHandler.getUpdateEventCount()).To(gomega.BeNumerically("==", 2)) - g.Expect(registeredResourceEventHandler.getDeleteEventCount()).To(gomega.BeNumerically("==", 1)) + neverRegistered.AssertConsistently(g, time.Second*2) }) - // 7. Verify that unregisteredResourceEventHandler is not generated and receives 0 events. - - g.Consistently(func(g gomega.Gomega) { - g.Expect(unregisteredResourceEventHandler.getGenerateCount()).To(gomega.BeZero()) - g.Expect(unregisteredResourceEventHandler.getAddEventCount()).To(gomega.BeZero()) - g.Expect(unregisteredResourceEventHandler.getUpdateEventCount()).To(gomega.BeZero()) - g.Expect(unregisteredResourceEventHandler.getDeleteEventCount()).To(gomega.BeZero()) - }).WithTimeout(time.Second * 2).Should(gomega.Succeed()) -} - -// Verifies that the EventHandlerGenerators receive the correct lastApplied and latest FTCs. -func TestInformerManagerEventHandlerGeneratorsReceiveCorrectFTCs(t *testing.T) { - g := gomega.NewGomegaWithT(t) - - // 1. Bootstrap an environemnt with a single Deployment FTC. - - generation := 1 + t.Run("event handlers for new FTCs should be registered eventually", func(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() - ftc := deploymentFTC.DeepCopy() - ftc.SetAnnotations(map[string]string{"generation": strconv.Itoa(generation)}) - defaultFTCs := []*fedcorev1a1.FederatedTypeConfig{ftc} + // 1. Bootstrap environment - manager, _, fedClient := boostrapInformerManagerWithFakeClients(defaultFTCs, []*unstructured.Unstructured{}) + dm1 := getTestDaemonSet("dm-1", "default") + dm2 := getTestDaemonSet("dm-2", "default") + dm3 := getTestDaemonSet("dm-3", "default") + dm4 := getTestDaemonSet("dm-4", "default") - // 2. Add EventHandlerGenerators to the InformerManager, the EventHandlerGenerator verifies that the "generation" - // annotation matches the generation variable + alwaysRegistered := &countingResourceEventHandler{} + neverRegistered := &countingResourceEventHandler{} - // lock is used to ensure that the FTC events are not squashed by the InformerManager and that each event is - // processed. - lock := make(chan struct{}) + defaultFTCs := []*fedcorev1a1.FederatedTypeConfig{} + defaultObjs := []*unstructured.Unstructured{dm1, dm2, dm3, dm4} + generators := []*EventHandlerGenerator{ + { + Predicate: alwaysRegisterPredicate, + Generator: alwaysRegistered.GenerateEventHandler, + }, + { + Predicate: neverRegisterPredicate, + Generator: neverRegistered.GenerateEventHandler, + }, + } - eventHandler := &countingResourceEventHandler{} - manager.AddEventHandlerGenerator(&EventHandlerGenerator{ - Predicate: func(lastApplied *fedcorev1a1.FederatedTypeConfig, latest *fedcorev1a1.FederatedTypeConfig) bool { - switch { - case generation == 1: - // if newly created, expect lastApplied to be nil - g.Expect(lastApplied).To(gomega.BeNil()) - default: - g.Expect(strconv.Atoi(lastApplied.GetAnnotations()["generation"])).To(gomega.BeNumerically("==", generation-1)) - g.Expect(strconv.Atoi(latest.GetAnnotations()["generation"])).To(gomega.BeNumerically("==", generation)) - } - - <-lock - - return true - }, - Generator: eventHandler.generateEventHandler, - }) + _, dynamicClient, fedClient := boostrapInformerManagerWithFakeClients(g, ctx, defaultFTCs, defaultObjs, generators) - // 3. Start InformerManager. + // 2. Create new FTC for daemonset - ctx := context.Background() - manager.Start(ctx) + _, err := fedClient.CoreV1alpha1().FederatedTypeConfigs().Create(ctx, daemonsetFTC, metav1.CreateOptions{}) + g.Expect(err).ToNot(gomega.HaveOccurred()) - ctxWithTimeout, cancel := context.WithTimeout(ctx, time.Second*3) - defer cancel() - if !cache.WaitForCacheSync(ctxWithTimeout.Done(), manager.HasSynced) { - g.Fail("Timed out waiting for InformerManager cache sync") - } + // 3. Verify that alwaysRegistered is eventually registered for the new Daemonset FTC - lock <- struct{}{} + alwaysRegistered.ExpectGenerateEvents(1) + alwaysRegistered.ExpectAddEvents(4) + alwaysRegistered.AssertEventually(g, time.Second*2) - // 4. Trigger FTC updates + // 4. Verify that newly generated events are also received by alwaysRegistered - for i := 0; i < 5; i++ { - generation++ - ftc.SetAnnotations(map[string]string{"generation": strconv.Itoa(generation)}) - _, err := fedClient.CoreV1alpha1().FederatedTypeConfigs().Update(ctx, ftc, metav1.UpdateOptions{}) + dm1.SetAnnotations(map[string]string{"test": "test"}) + _, err = dynamicClient.Resource(common.DaemonSetGVR).Namespace("default").Update(ctx, dm1, metav1.UpdateOptions{}) g.Expect(err).ToNot(gomega.HaveOccurred()) + alwaysRegistered.ExpectUpdateEvents(1) - lock <- struct{}{} - } -} - -// Verifies that the event handler from EventHandlerGenerator is generated and registered on a corresponding FTC update -// where Predicate returns true and Generator returns an event handler. -func TestInformerManagerEventHandlerRegisteredOnFTCUpdate(t *testing.T) { - g := gomega.NewGomegaWithT(t) - - // 1. Bootstrap an environemnt with a single Deployment FTC and a single deployment. + err = dynamicClient.Resource(common.DaemonSetGVR).Namespace("default").Delete(ctx, dm4.GetName(), metav1.DeleteOptions{}) + g.Expect(err).ToNot(gomega.HaveOccurred()) + alwaysRegistered.ExpectDeleteEvents(1) - dp1 := getDeployment("dp-1", "default") + alwaysRegistered.AssertEventually(g, time.Second*2) - ftc := deploymentFTC.DeepCopy() - ftc.SetAnnotations(map[string]string{"predicate": "false"}) + // 4. Verify that events for non-existent FTCs are not received by alwaysRegistered - defaultFTCs := []*fedcorev1a1.FederatedTypeConfig{ftc} - defaultObjects := []*unstructured.Unstructured{dp1} - manager, dynamicClient, fedClient := boostrapInformerManagerWithFakeClients(defaultFTCs, defaultObjects) + _, err = dynamicClient.Resource(common.SecretGVR). + Namespace("default"). + Create(ctx, getTestSecret("sc-1", "default"), metav1.CreateOptions{}) + g.Expect(err).ToNot(gomega.HaveOccurred()) + alwaysRegistered.AssertConsistently(g, time.Second*2) - // 2. Add EventHandlerGenerators to the InformerManager, eventHandler SHOULD be generated when the "predicate" - // annotation of the FTC is "true". + // 5. Verify that unregisteredResourceEventHandler is not registered - eventHandler := &countingResourceEventHandler{} - manager.AddEventHandlerGenerator(&EventHandlerGenerator{ - Predicate: func(lastApplied *fedcorev1a1.FederatedTypeConfig, latest *fedcorev1a1.FederatedTypeConfig) bool { - annotations := latest.GetAnnotations() - return annotations["predicate"] == "true" - }, - Generator: eventHandler.generateEventHandler, + neverRegistered.AssertConsistently(g, time.Second*2) }) - // 3. Start InformerManager. - - ctx := context.Background() - manager.Start(ctx) - - ctxWithTimeout, cancel := context.WithTimeout(ctx, time.Second*3) - defer cancel() - if !cache.WaitForCacheSync(ctxWithTimeout.Done(), manager.HasSynced) { - g.Fail("Timed out waiting for InformerManager cache sync") - } - - // 4. Sanity check: eventHandler should not be registered initially - - g.Consistently(func(g gomega.Gomega) { - g.Expect(eventHandler.getGenerateCount()).To(gomega.BeZero()) - g.Expect(eventHandler.getAddEventCount()).To(gomega.BeZero()) - g.Expect(eventHandler.getUpdateEventCount()).To(gomega.BeZero()) - g.Expect(eventHandler.getDeleteEventCount()).To(gomega.BeZero()) - }).WithTimeout(time.Second * 2).Should(gomega.Succeed()) + t.Run("EventHandlerGenerators should receive correct lastApplied and latest FTCs", func(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() - // 5. Trigger a registration by updating the "predicate" annotation to "true" + // 1. Bootstrap environment - ftc.SetAnnotations(map[string]string{"predicate": "true"}) - ftc, err := fedClient.CoreV1alpha1().FederatedTypeConfigs().Update(ctx, ftc, metav1.UpdateOptions{}) - g.Expect(err).ToNot(gomega.HaveOccurred()) + var generation int64 = 1 - // 6. Generate events for deployments + // assertionCh is used to achieve 2 things: + // 1. It is used to pass assertions to the main goroutine. + // 2. It is used as an implicit lock to ensure FTC events are not squashed by the InformerManager. + assertionCh := make(chan func()) - // +1 add event + ftc := deploymentFTC.DeepCopy() + ftc.SetGeneration(generation) - dp2 := getDeployment("dp-2", "default") - dp2, err = dynamicClient.Resource(common.DeploymentGVR).Namespace("default").Create(ctx, dp2, metav1.CreateOptions{}) - g.Expect(err).ToNot(gomega.HaveOccurred()) + generator := &EventHandlerGenerator{ + Predicate: func(lastApplied, latest *fedcorev1a1.FederatedTypeConfig) bool { + if generation == 1 { + assertionCh <- func() { + g.Expect(lastApplied).To(gomega.BeNil()) + } + } else { + assertionCh <- func() { + g.Expect(lastApplied.GetGeneration()).To(gomega.BeNumerically("==", generation-1)) + g.Expect(latest.GetGeneration()).To(gomega.BeNumerically("==", generation)) + } + } - // +1 update event - - dp2.SetAnnotations(map[string]string{"test-annotation": "test-value"}) - dp2, err = dynamicClient.Resource(common.DeploymentGVR).Namespace("default").Update(ctx, dp2, metav1.UpdateOptions{}) - g.Expect(err).ToNot(gomega.HaveOccurred()) - - // +1 delete event - - err = dynamicClient.Resource(common.DeploymentGVR).Namespace("default").Delete(ctx, dp2.GetName(), metav1.DeleteOptions{}) - g.Expect(err).ToNot(gomega.HaveOccurred()) - - // 7. Verify that events are eventually received by eventHandler - - g.Eventually(func(g gomega.Gomega) { - g.Expect(eventHandler.getGenerateCount()).To(gomega.BeNumerically("==", 1)) - g.Expect(eventHandler.getAddEventCount()).To(gomega.BeNumerically("==", 2)) - g.Expect(eventHandler.getUpdateEventCount()).To(gomega.BeNumerically("==", 1)) - g.Expect(eventHandler.getDeleteEventCount()).To(gomega.BeNumerically("==", 1)) - }).WithTimeout(time.Second * 2).Should(gomega.Succeed()) -} - -// Verifies that the event handler from EventHandlerGenerator is unregistered on a corresponding FTC update -// where Predicate returns true and Generator returns a nil event handler. -func TestInformerManagerEventHandlerUnregisteredOnFTCUpdate(t *testing.T) { - g := gomega.NewGomegaWithT(t) - - // 1. Bootstrap an environemnt with a single Deployment FTC and a single deployment. - - dp1 := getDeployment("dp-1", "default") - - ftc := deploymentFTC.DeepCopy() - ftc.SetAnnotations(map[string]string{"generator": "true"}) - - defaultFTCs := []*fedcorev1a1.FederatedTypeConfig{ftc} - defaultObjects := []*unstructured.Unstructured{dp1} - manager, dynamicClient, fedClient := boostrapInformerManagerWithFakeClients(defaultFTCs, defaultObjects) - - // 2. Add EventHandlerGenerators to the InformerManager, Predicate always returns true and Generator returns - // eventHandler if the "generator" annotation == "true". Otherwise Generator returns nil. - - eventHandler := &countingResourceEventHandler{} - manager.AddEventHandlerGenerator(&EventHandlerGenerator{ - Predicate: func(lastApplied *fedcorev1a1.FederatedTypeConfig, latest *fedcorev1a1.FederatedTypeConfig) bool { - return true - }, - Generator: func(ftc *fedcorev1a1.FederatedTypeConfig) cache.ResourceEventHandler { - annotations := ftc.GetAnnotations() - if annotations["generator"] == "true" { - return eventHandler.generateEventHandler(ftc) - } - return nil - }, + return true + }, + Generator: func(ftc *fedcorev1a1.FederatedTypeConfig) cache.ResourceEventHandler { return nil }, + } + + defaultFTCs := []*fedcorev1a1.FederatedTypeConfig{ftc} + defaultObjs := []*unstructured.Unstructured{} + generators := []*EventHandlerGenerator{generator} + _, _, fedClient := boostrapInformerManagerWithFakeClients(g, ctx, defaultFTCs, defaultObjs, generators) + + fn := <-assertionCh + fn() + + // 3. Generate FTC update events + for i := 0; i < 5; i++ { + generation++ + ftc.SetGeneration(generation) + + var err error + ftc, err = fedClient.CoreV1alpha1().FederatedTypeConfigs().Update(ctx, ftc, metav1.UpdateOptions{}) + g.Expect(err).ToNot(gomega.HaveOccurred()) + + fn = <-assertionCh + fn() + } }) - // 3. Start InformerManager. - - ctx := context.Background() - manager.Start(ctx) + t.Run("event handler should be registered on FTC update", func(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() - ctxWithTimeout, cancel := context.WithTimeout(ctx, time.Second*3) - defer cancel() - if !cache.WaitForCacheSync(ctxWithTimeout.Done(), manager.HasSynced) { - g.Fail("Timed out waiting for InformerManager cache sync") - } + // 1. Bootstrap environment - // 4. Sanity check: eventHandler should be registered initially + dp1 := getTestDeployment("dp-1", "default") - g.Eventually(func(g gomega.Gomega) { - g.Expect(eventHandler.getGenerateCount()).To(gomega.BeNumerically("==", 1)) - g.Expect(eventHandler.getAddEventCount()).To(gomega.BeNumerically("==", 1)) - g.Expect(eventHandler.getUpdateEventCount()).To(gomega.BeZero()) - g.Expect(eventHandler.getDeleteEventCount()).To(gomega.BeZero()) - }).WithTimeout(time.Second * 2).Should(gomega.Succeed()) + ftc := deploymentFTC.DeepCopy() + ftc.SetAnnotations(map[string]string{"predicate": "false", "generator": "true"}) - // 5. Trigger an unregistration by updating the "predicate" annotation to "false" + handler := &countingResourceEventHandler{} + generator := newAnnotationBasedGenerator(handler) - ftc.SetAnnotations(map[string]string{"generator": "false"}) - ftc, err := fedClient.CoreV1alpha1().FederatedTypeConfigs().Update(ctx, ftc, metav1.UpdateOptions{}) - g.Expect(err).ToNot(gomega.HaveOccurred()) + defaultFTCs := []*fedcorev1a1.FederatedTypeConfig{ftc} + defaultObjs := []*unstructured.Unstructured{dp1} + generators := []*EventHandlerGenerator{generator} - // 6. Sleep for a second to allow the InformerManager to process the FTC update. - <-time.After(time.Second) + _, dynamicClient, fedClient := boostrapInformerManagerWithFakeClients(g, ctx, defaultFTCs, defaultObjs, generators) - // 7. Generate events for deployments + // 2. Verify that handler is not registered initially. - // +1 add event + handler.AssertConsistently(g, time.Second*2) - dp2 := getDeployment("dp-2", "default") - dp2, err = dynamicClient.Resource(common.DeploymentGVR).Namespace("default").Create(ctx, dp2, metav1.CreateOptions{}) - g.Expect(err).ToNot(gomega.HaveOccurred()) + // 3. Update FTC to trigger registration - // +1 update event + ftc.SetAnnotations(map[string]string{"predicate": "true", "generator": "true"}) + ftc, err := fedClient.CoreV1alpha1().FederatedTypeConfigs().Update(ctx, ftc, metav1.UpdateOptions{}) + g.Expect(err).ToNot(gomega.HaveOccurred()) - dp2.SetAnnotations(map[string]string{"test-annotation": "test-value"}) - dp2, err = dynamicClient.Resource(common.DeploymentGVR).Namespace("default").Update(ctx, dp2, metav1.UpdateOptions{}) - g.Expect(err).ToNot(gomega.HaveOccurred()) + // 4. Verify that handler is registered and additional events are received - // +1 delete event + handler.ExpectGenerateEvents(1) + handler.ExpectAddEvents(1) - err = dynamicClient.Resource(common.DeploymentGVR).Namespace("default").Delete(ctx, dp2.GetName(), metav1.DeleteOptions{}) - g.Expect(err).ToNot(gomega.HaveOccurred()) + handler.AssertEventually(g, time.Second*2) - // 8. Verify that events are no longer received by eventHandler. + dp2, err := dynamicClient.Resource(common.DeploymentGVR). + Namespace("default"). + Create(ctx, getTestDeployment("dp-2", "default"), metav1.CreateOptions{}) + g.Expect(err).ToNot(gomega.HaveOccurred()) + handler.ExpectAddEvents(1) - g.Consistently(func(g gomega.Gomega) { - g.Expect(eventHandler.getGenerateCount()).To(gomega.BeNumerically("==", 1)) - g.Expect(eventHandler.getAddEventCount()).To(gomega.BeNumerically("==", 1)) - g.Expect(eventHandler.getUpdateEventCount()).To(gomega.BeZero()) - g.Expect(eventHandler.getDeleteEventCount()).To(gomega.BeZero()) - }).WithTimeout(time.Second * 2).Should(gomega.Succeed()) -} + dp2.SetAnnotations(map[string]string{"test-annotation": "test-value"}) + dp2, err = dynamicClient.Resource(common.DeploymentGVR).Namespace("default").Update(ctx, dp2, metav1.UpdateOptions{}) + g.Expect(err).ToNot(gomega.HaveOccurred()) + handler.ExpectUpdateEvents(1) -// Verifies that the event handler from an EventHandlerGenerator is regenerated and registered on a corresponding FTC -// update where Predicate returns true and Generator returns an event handler. -func TestInformerManagerEventHandlerReregisteredFTCUpdate(t *testing.T) { - g := gomega.NewGomegaWithT(t) + err = dynamicClient.Resource(common.DeploymentGVR).Namespace("default").Delete(ctx, dp2.GetName(), metav1.DeleteOptions{}) + g.Expect(err).ToNot(gomega.HaveOccurred()) + handler.ExpectDeleteEvents(1) - // 1. Bootstrap an environemnt with a single Deployment FTC and a single deployment. + handler.AssertEventually(g, time.Second*2) + }) - ftc := deploymentFTC.DeepCopy() - dp1 := getDeployment("dp-1", "default") + t.Run("event handler should be unregistered on FTC update", func(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() - defaultFTCs := []*fedcorev1a1.FederatedTypeConfig{ftc} - defaultObjects := []*unstructured.Unstructured{dp1} - manager, dynamicClient, fedClient := boostrapInformerManagerWithFakeClients(defaultFTCs, defaultObjects) + // 1. Bootstrap environment - // 2. Add EventHandlerGenerators to the InformerManager, eventHandler SHOULD always be regenerated and registered. + dp1 := getTestDeployment("dp-1", "default") - eventHandler := &countingResourceEventHandler{} - manager.AddEventHandlerGenerator(&EventHandlerGenerator{ - Predicate: func(lastApplied *fedcorev1a1.FederatedTypeConfig, latest *fedcorev1a1.FederatedTypeConfig) bool { - return true - }, - Generator: eventHandler.generateEventHandler, - }) + ftc := deploymentFTC.DeepCopy() + ftc.SetAnnotations(map[string]string{"predicate": "true", "generator": "true"}) - // 3. Start InformerManager. + handler := &countingResourceEventHandler{} + generator := newAnnotationBasedGenerator(handler) - ctx := context.Background() - manager.Start(ctx) + defaultFTCs := []*fedcorev1a1.FederatedTypeConfig{ftc} + defaultObjs := []*unstructured.Unstructured{dp1} + generators := []*EventHandlerGenerator{generator} - ctxWithTimeout, cancel := context.WithTimeout(ctx, time.Second*3) - defer cancel() - if !cache.WaitForCacheSync(ctxWithTimeout.Done(), manager.HasSynced) { - g.Fail("Timed out waiting for InformerManager cache sync") - } + _, dynamicClient, fedClient := boostrapInformerManagerWithFakeClients(g, ctx, defaultFTCs, defaultObjs, generators) - // 4. Verify that eventHandler is generated and registered initially. + // 2. Verify that handler is registered initially. - g.Eventually(func(g gomega.Gomega) { - g.Expect(eventHandler.getGenerateCount()).To(gomega.BeNumerically("==", 1)) - g.Expect(eventHandler.getAddEventCount()).To(gomega.BeNumerically("==", 1)) - g.Expect(eventHandler.getUpdateEventCount()).To(gomega.BeZero()) - g.Expect(eventHandler.getDeleteEventCount()).To(gomega.BeZero()) - }).WithTimeout(time.Second * 2).Should(gomega.Succeed()) + handler.ExpectGenerateEvents(1) + handler.ExpectAddEvents(1) + handler.AssertEventually(g, time.Second*2) - // 5. Trigger FTC updates + // 3. Update FTC to trigger unregistration - for i := 0; i < 5; i++ { - ftc.SetAnnotations(map[string]string{"trigger": strconv.Itoa(rand.Intn(1000))}) - _, err := fedClient.CoreV1alpha1().FederatedTypeConfigs().Update(ctx, ftc, metav1.UpdateOptions{}) + ftc.SetAnnotations(map[string]string{"predicate": "true", "generator": "false"}) + ftc, err := fedClient.CoreV1alpha1().FederatedTypeConfigs().Update(ctx, ftc, metav1.UpdateOptions{}) g.Expect(err).ToNot(gomega.HaveOccurred()) - // 5a. Generate deployment events as well + <-time.After(time.Second) - // +1 Update event + // 4. Verify that handler is unregistered and new events are no longer received by handler. - dp1.SetAnnotations(map[string]string{"trigger": strconv.Itoa(rand.Intn(1000))}) - dp1, err = dynamicClient.Resource(common.DeploymentGVR).Namespace("default").Update(ctx, dp1, metav1.UpdateOptions{}) + dp2, err := dynamicClient.Resource(common.DeploymentGVR).Namespace("default").Create(ctx, getTestDeployment("dp-2", "default"), metav1.CreateOptions{}) g.Expect(err).ToNot(gomega.HaveOccurred()) - // 5b. Verify eventHandler is regenerated and registered - - g.Eventually(func(g gomega.Gomega) { - g.Expect(eventHandler.getGenerateCount()).To(gomega.BeNumerically("==", 1+i)) - g.Expect(eventHandler.getAddEventCount()).To(gomega.BeNumerically("==", 1)) - g.Expect(eventHandler.getUpdateEventCount()).To(gomega.BeNumerically("==", i)) - g.Expect(eventHandler.getDeleteEventCount()).To(gomega.BeZero()) - }) - } -} - -// Verifies that the event handler from an EventHandlerGenerator is unchanged after a corresponding FTC update where -// Predicate returns false and an event handler did not exist previously. -func TestInformerManagerEventHandlerUnchangedFTCUpdate(t *testing.T) { - g := gomega.NewGomegaWithT(t) - - // 1. Bootstrap an environemnt with a single Deployment FTC and a single deployment. + dp2.SetAnnotations(map[string]string{"test": "test"}) + _, err = dynamicClient.Resource(common.DeploymentGVR).Namespace("default").Update(ctx, dp2, metav1.UpdateOptions{}) + g.Expect(err).ToNot(gomega.HaveOccurred()) - ftc := deploymentFTC.DeepCopy() - dp1 := getDeployment("dp-1", "default") + err = dynamicClient.Resource(common.DeploymentGVR).Namespace("default").Delete(ctx, dp2.GetName(), metav1.DeleteOptions{}) + g.Expect(err).ToNot(gomega.HaveOccurred()) - defaultFTCs := []*fedcorev1a1.FederatedTypeConfig{ftc} - defaultObjects := []*unstructured.Unstructured{dp1} - manager, dynamicClient, fedClient := boostrapInformerManagerWithFakeClients(defaultFTCs, defaultObjects) + handler.AssertConsistently(g, time.Second*2) + }) - // 2. Add EventHandlerGenerators to the InformerManager, eventHandler SHOULD only be created once per new FTC and - // never be regenerated. + t.Run("event handler should be re-registered on FTC update", func(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() - eventHandler := &countingResourceEventHandler{} - manager.AddEventHandlerGenerator(&EventHandlerGenerator{ - Predicate: func(lastApplied *fedcorev1a1.FederatedTypeConfig, latest *fedcorev1a1.FederatedTypeConfig) bool { - if lastApplied == nil { - return true - } - return false - }, - Generator: eventHandler.generateEventHandler, - }) + // 1. Bootstrap environment - // 3. Start InformerManager. + dp1 := getTestDeployment("dp-1", "default") + ftc := deploymentFTC.DeepCopy() - ctx := context.Background() - manager.Start(ctx) + handler := &countingResourceEventHandler{} + generator := &EventHandlerGenerator{ + Predicate: alwaysRegisterPredicate, + Generator: handler.GenerateEventHandler, + } - ctxWithTimeout, cancel := context.WithTimeout(ctx, time.Second*3) - defer cancel() - if !cache.WaitForCacheSync(ctxWithTimeout.Done(), manager.HasSynced) { - g.Fail("Timed out waiting for InformerManager cache sync") - } + defaultFTCs := []*fedcorev1a1.FederatedTypeConfig{ftc} + defaultObjs := []*unstructured.Unstructured{dp1} + generators := []*EventHandlerGenerator{generator} - // 4. Verify that eventHandler is generated and registered initially. + _, dynamicClient, fedClient := boostrapInformerManagerWithFakeClients(g, ctx, defaultFTCs, defaultObjs, generators) - g.Eventually(func(g gomega.Gomega) { - g.Expect(eventHandler.getGenerateCount()).To(gomega.BeNumerically("==", 1)) - g.Expect(eventHandler.getAddEventCount()).To(gomega.BeNumerically("==", 1)) - g.Expect(eventHandler.getUpdateEventCount()).To(gomega.BeZero()) - g.Expect(eventHandler.getDeleteEventCount()).To(gomega.BeZero()) - }).WithTimeout(time.Second * 2).Should(gomega.Succeed()) + // 2. Verify that handler is registered initially + + handler.ExpectGenerateEvents(1) + handler.ExpectAddEvents(1) + handler.AssertEventually(g, time.Second*2) - // 5. Trigger FTC updates + // 3. Trigger FTC updates and verify re-registration - for i := 0; i < 5; i++ { - ftc.SetAnnotations(map[string]string{"trigger": strconv.Itoa(rand.Intn(1000))}) + ftc.SetAnnotations(map[string]string{"test": "test"}) _, err := fedClient.CoreV1alpha1().FederatedTypeConfigs().Update(ctx, ftc, metav1.UpdateOptions{}) g.Expect(err).ToNot(gomega.HaveOccurred()) - // 5a. Generate deployment events as well - - // +1 Update event + handler.ExpectGenerateEvents(1) + handler.ExpectAddEvents(1) + handler.AssertEventually(g, time.Second*2) - dp1.SetAnnotations(map[string]string{"trigger": strconv.Itoa(rand.Intn(1000))}) - dp1, err = dynamicClient.Resource(common.DeploymentGVR).Namespace("default").Update(ctx, dp1, metav1.UpdateOptions{}) + dp1.SetAnnotations(map[string]string{"test": "test"}) + _, err = dynamicClient.Resource(common.DeploymentGVR).Namespace("default").Update(ctx, dp1, metav1.UpdateOptions{}) g.Expect(err).ToNot(gomega.HaveOccurred()) - // 5b. Verify eventHandler is not regenerated but continues to be registered. - - g.Eventually(func(g gomega.Gomega) { - g.Expect(eventHandler.getGenerateCount()).To(gomega.BeNumerically("==", 1)) - g.Expect(eventHandler.getAddEventCount()).To(gomega.BeNumerically("==", 1)) - g.Expect(eventHandler.getUpdateEventCount()).To(gomega.BeNumerically("==", i)) - g.Expect(eventHandler.getDeleteEventCount()).To(gomega.BeZero()) - }) - } -} - -// Verifies that event handlers from EventHandlerGenerators are unregistered when a FTC is deleted. -func TestInformerManagerEventHandlerRegistrationOnFTCDelete(t *testing.T) { - g := gomega.NewGomegaWithT(t) - - // 1. Bootstrap an environment with FTCs for deplyoments, configmaps and secrets. Also create an existing - // deployment, configmap and secret. - - dp1 := getDeployment("dp-1", "default") - cm1 := getConfigMap("cm-1", "default") - sc1 := getSecret("sc-1", "default") - - defaultFTCs := []*fedcorev1a1.FederatedTypeConfig{deploymentFTC, configmapFTC, secretFTC} - defaultObjects := []*unstructured.Unstructured{dp1, cm1, sc1} - manager, dynamicClient, fedClient := boostrapInformerManagerWithFakeClients(defaultFTCs, defaultObjects) - - // 2. Add EventHandlerGenerators to the InformerManager. eventHandler1 and eventHandler2 SHOULD be registered to ALL - // FTCs (based on its Predicate). - - eventHandler1 := &countingResourceEventHandler{} - eventHandler2 := &countingResourceEventHandler{} - - manager.AddEventHandlerGenerator(&EventHandlerGenerator{ - Predicate: func(lastApplied, latest *fedcorev1a1.FederatedTypeConfig) bool { return true }, - Generator: eventHandler1.generateEventHandler, - }) - manager.AddEventHandlerGenerator(&EventHandlerGenerator{ - Predicate: func(lastApplied, latest *fedcorev1a1.FederatedTypeConfig) bool { return true }, - Generator: eventHandler2.generateEventHandler, + handler.ExpectUpdateEvents(1) + handler.AssertEventually(g, time.Second*2) }) - // 3. Start the InformerManager. + t.Run("event handler should be unchanged on FTC update", func(t * testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() - ctx := context.Background() - manager.Start(ctx) + // 1. Bootstrap environment - ctxWithTimeout, cancel := context.WithTimeout(ctx, time.Second*3) - defer cancel() - if !cache.WaitForCacheSync(ctxWithTimeout.Done(), manager.HasSynced) { - g.Fail("Timed out waiting for InformerManager cache sync") - } + dp1 := getTestDeployment("dp-1", "default") + ftc := deploymentFTC.DeepCopy() - // 4. Santiy check: verify that both eventHandler1 and eventHandler2 is registered and received events for the - // existing objects. + handler := &countingResourceEventHandler{} + generator := &EventHandlerGenerator{ + Predicate: registerOncePredicate, + Generator: handler.GenerateEventHandler, + } - g.Eventually(func(g gomega.Gomega) { - g.Expect(eventHandler1.getGenerateCount()).To(gomega.BeNumerically("==", len(defaultFTCs))) - g.Expect(eventHandler1.getAddEventCount()).To(gomega.BeNumerically("==", len(defaultObjects))) - g.Expect(eventHandler1.getUpdateEventCount()).To(gomega.BeZero()) - g.Expect(eventHandler1.getDeleteEventCount()).To(gomega.BeZero()) + defaultFTCs := []*fedcorev1a1.FederatedTypeConfig{ftc} + defaultObjs := []*unstructured.Unstructured{dp1} + generators := []*EventHandlerGenerator{generator} - g.Expect(eventHandler2.getGenerateCount()).To(gomega.BeNumerically("==", len(defaultFTCs))) - g.Expect(eventHandler2.getAddEventCount()).To(gomega.BeNumerically("==", len(defaultObjects))) - g.Expect(eventHandler2.getUpdateEventCount()).To(gomega.BeZero()) - g.Expect(eventHandler2.getDeleteEventCount()).To(gomega.BeZero()) - }).WithTimeout(time.Second * 2).Should(gomega.Succeed()) + _, dynamicClient, fedClient := boostrapInformerManagerWithFakeClients(g, ctx, defaultFTCs, defaultObjs, generators) - // 5. Delete the deployment FTC. + // 2. Verify that handler is registered initially + + handler.ExpectGenerateEvents(1) + handler.ExpectAddEvents(1) + handler.AssertEventually(g, time.Second*2) - err := fedClient.CoreV1alpha1().FederatedTypeConfigs().Delete(ctx, deploymentFTC.Name, metav1.DeleteOptions{}) - g.Expect(err).ToNot(gomega.HaveOccurred()) + // 3. Trigger FTC updates and verify no re-registration - // 6. Sleep for a second to allow the InformerManager to process the FTC deletion. + ftc.SetAnnotations(map[string]string{"test": "test"}) + _, err := fedClient.CoreV1alpha1().FederatedTypeConfigs().Update(ctx, ftc, metav1.UpdateOptions{}) + g.Expect(err).ToNot(gomega.HaveOccurred()) - <-time.After(time.Second) + handler.AssertConsistently(g, time.Second*2) - // 7. Verify that events are no longer received by eventHandler1 and eventHandler2. + // 4. Verify events are still received by handler - // 7a. Generate +1 add event for deployments. + dp1.SetAnnotations(map[string]string{"test": "test"}) + _, err = dynamicClient.Resource(common.DeploymentGVR).Namespace("default").Update(ctx, dp1, metav1.UpdateOptions{}) + g.Expect(err).ToNot(gomega.HaveOccurred()) - dp2 := getDeployment("dp-2", "default") - dp2, err = dynamicClient.Resource(common.DeploymentGVR).Namespace("default").Create(ctx, dp2, metav1.CreateOptions{}) - g.Expect(err).ToNot(gomega.HaveOccurred()) + handler.ExpectUpdateEvents(1) + handler.AssertEventually(g, time.Second*2) + }) - // 7b. Generate +1 update event for deployments. + t.Run("event handler should be unregisterd on FTC delete", func(t *testing.T){ + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() - dp2.SetAnnotations(map[string]string{"test-annotation": "test-value"}) - dp2, err = dynamicClient.Resource(common.DeploymentGVR).Namespace("default").Update(ctx, dp2, metav1.UpdateOptions{}) - g.Expect(err).ToNot(gomega.HaveOccurred()) + // 1. Bootstrap environment - // 7c. Generate +1 delete event for deployments. + dp1 := getTestDeployment("dp-1", "default") + cm1 := getTestConfigMap("cm-1", "default") + sc1 := getTestSecret("sc-1", "default") - err = dynamicClient.Resource(common.DeploymentGVR).Namespace("default").Delete(ctx, dp1.GetName(), metav1.DeleteOptions{}) - g.Expect(err).ToNot(gomega.HaveOccurred()) + handler1 := &countingResourceEventHandler{} + handler2 := &countingResourceEventHandler{} + generator1 := &EventHandlerGenerator{ + Predicate: registerOncePredicate, + Generator: handler1.GenerateEventHandler, + } + generator2 := &EventHandlerGenerator{ + Predicate: registerOncePredicate, + Generator: handler2.GenerateEventHandler, + } - g.Consistently(func(g gomega.Gomega) { - g.Expect(eventHandler1.getGenerateCount()).To(gomega.BeNumerically("==", len(defaultFTCs))) - g.Expect(eventHandler1.getAddEventCount()).To(gomega.BeNumerically("==", len(defaultObjects))) - g.Expect(eventHandler1.getUpdateEventCount()).To(gomega.BeZero()) - g.Expect(eventHandler1.getDeleteEventCount()).To(gomega.BeZero()) + defaultFTCs := []*fedcorev1a1.FederatedTypeConfig{deploymentFTC, configmapFTC, secretFTC} + defaultObjs := []*unstructured.Unstructured{dp1, cm1, sc1} + generators := []*EventHandlerGenerator{generator1, generator2} - g.Expect(eventHandler2.getGenerateCount()).To(gomega.BeNumerically("==", len(defaultFTCs))) - g.Expect(eventHandler2.getAddEventCount()).To(gomega.BeNumerically("==", len(defaultObjects))) - g.Expect(eventHandler2.getUpdateEventCount()).To(gomega.BeZero()) - g.Expect(eventHandler2.getDeleteEventCount()).To(gomega.BeZero()) - }).WithTimeout(time.Second * 2).Should(gomega.Succeed()) + _, dynamicClient, fedClient := boostrapInformerManagerWithFakeClients(g, ctx, defaultFTCs, defaultObjs, generators) - // 8. Sanity check: verify that events continue to be received for the other remaining FTCs' source types + // 2. Verify that handler1 and handler2 is registered initially for all FTCs + + handler1.ExpectGenerateEvents(3) + handler1.ExpectAddEvents(3) + handler1.AssertEventually(g, time.Second*2) - // 8a. Generate +1 add event for secrets. + handler2.ExpectGenerateEvents(3) + handler2.ExpectAddEvents(3) + handler2.AssertEventually(g, time.Second*2) - sc2 := getSecret("sc-2", "default") - sc2, err = dynamicClient.Resource(common.SecretGVR).Namespace("default").Create(ctx, sc2, metav1.CreateOptions{}) - g.Expect(err).ToNot(gomega.HaveOccurred()) + // 3. Delete the deployment FTC - // 8b. Generate +1 update event for configmaps. + err := fedClient.CoreV1alpha1().FederatedTypeConfigs().Delete(ctx, deploymentFTC.GetName(), metav1.DeleteOptions{}) + g.Expect(err).ToNot(gomega.HaveOccurred()) - err = dynamicClient.Resource(common.ConfigMapGVR).Namespace("default").Delete(ctx, cm1.GetName(), metav1.DeleteOptions{}) - g.Expect(err).ToNot(gomega.HaveOccurred()) + <-time.After(time.Second) - g.Eventually(func(g gomega.Gomega) { - g.Expect(eventHandler1.getGenerateCount()).To(gomega.BeNumerically("==", len(defaultFTCs))) - g.Expect(eventHandler1.getAddEventCount()).To(gomega.BeNumerically("==", len(defaultObjects)+1)) - g.Expect(eventHandler1.getUpdateEventCount()).To(gomega.BeZero()) - g.Expect(eventHandler1.getDeleteEventCount()).To(gomega.BeNumerically("==", 1)) + // 4. Verify that handler1 and handler2 is unregistered for deployments and no additional events are received - g.Expect(eventHandler2.getGenerateCount()).To(gomega.BeNumerically("==", len(defaultFTCs))) - g.Expect(eventHandler2.getAddEventCount()).To(gomega.BeNumerically("==", len(defaultObjects)+1)) - g.Expect(eventHandler2.getUpdateEventCount()).To(gomega.BeZero()) - g.Expect(eventHandler2.getDeleteEventCount()).To(gomega.BeNumerically("==", 1)) - }).WithTimeout(time.Second * 2).Should(gomega.Succeed()) -} + dp2, err := dynamicClient.Resource(common.DeploymentGVR). Namespace("default").Create(ctx, getTestDeployment("dp-2", "default"), metav1.CreateOptions{}) + g.Expect(err).ToNot(gomega.HaveOccurred()) -// Verifies that all event handlers from EventHandlerGenerators no longer receive any events after the InformerManager -// is shutdown (or when the context passed to the Start method expires). -func TestInformerManagerEventHandlerRegistrationOnShutdown(t *testing.T) { - g := gomega.NewGomegaWithT(t) + dp2.SetAnnotations(map[string]string{"test": "test"}) + _, err = dynamicClient.Resource(common.DeploymentGVR).Namespace("default").Update(ctx, dp2, metav1.UpdateOptions{}) + g.Expect(err).ToNot(gomega.HaveOccurred()) - // 1. Bootstrap an environment with FTCs for deplyoments, configmaps and secrets. Also create an existing - // deployment, configmap and secret. + err = dynamicClient.Resource(common.DeploymentGVR).Namespace("default").Delete(ctx, dp2.GetName(), metav1.DeleteOptions{}) + g.Expect(err).ToNot(gomega.HaveOccurred()) - dp1 := getDeployment("dp-1", "default") - cm1 := getConfigMap("cm-1", "default") - sc1 := getSecret("sc-1", "default") + handler1.AssertConsistently(g, time.Second*2) + handler2.AssertConsistently(g, time.Second*2) - defaultFTCs := []*fedcorev1a1.FederatedTypeConfig{deploymentFTC, configmapFTC, secretFTC} - defaultObjects := []*unstructured.Unstructured{dp1, cm1, sc1} - manager, dynamicClient, _ := boostrapInformerManagerWithFakeClients(defaultFTCs, defaultObjects) + // 5. Verify that handler1 and handler2 is not unregistered for other FTCs. - // 2. Add EventHandlerGenerators to the InformerManager. eventHandler1 and eventHandler2 SHOULD be registered to ALL - // FTCs (based on its Predicate). + _, err = dynamicClient.Resource(common.SecretGVR).Namespace("default").Create(ctx, getTestSecret("sc-2", "default"), metav1.CreateOptions{}) + g.Expect(err).ToNot(gomega.HaveOccurred()) + handler1.ExpectAddEvents(1) + handler2.ExpectAddEvents(1) - eventHandler1 := &countingResourceEventHandler{} - eventHandler2 := &countingResourceEventHandler{} + cm1.SetAnnotations(map[string]string{"test": "test"}) + _, err = dynamicClient.Resource(common.ConfigMapGVR).Namespace("default").Update(ctx, cm1, metav1.UpdateOptions{}) + g.Expect(err).ToNot(gomega.HaveOccurred()) + handler1.ExpectUpdateEvents(1) + handler2.ExpectUpdateEvents(1) - manager.AddEventHandlerGenerator(&EventHandlerGenerator{ - Predicate: func(lastApplied, latest *fedcorev1a1.FederatedTypeConfig) bool { return true }, - Generator: eventHandler1.generateEventHandler, - }) - manager.AddEventHandlerGenerator(&EventHandlerGenerator{ - Predicate: func(lastApplied, latest *fedcorev1a1.FederatedTypeConfig) bool { return true }, - Generator: eventHandler2.generateEventHandler, + handler1.AssertEventually(g, time.Second*2) + handler2.AssertEventually(g, time.Second*2) }) - // 3. Start the InformerManager. - - ctx, managerCancel := context.WithCancel(context.Background()) - manager.Start(ctx) - - ctxWithTimeout, cancel := context.WithTimeout(ctx, time.Second*3) - defer cancel() - if !cache.WaitForCacheSync(ctxWithTimeout.Done(), manager.HasSynced) { - g.Fail("Timed out waiting for InformerManager cache sync") - } - - // 4. Santiy check: verify that both eventHandler1 and eventHandler2 is registered and received events for the - // existing objects. + t.Run("event handlers should be unregistered on manager shutdown", func(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() - g.Eventually(func(g gomega.Gomega) { - g.Expect(eventHandler1.getGenerateCount()).To(gomega.BeNumerically("==", len(defaultFTCs))) - g.Expect(eventHandler1.getAddEventCount()).To(gomega.BeNumerically("==", len(defaultObjects))) - g.Expect(eventHandler1.getUpdateEventCount()).To(gomega.BeZero()) - g.Expect(eventHandler1.getDeleteEventCount()).To(gomega.BeZero()) + // 1. Bootstrap environment - g.Expect(eventHandler2.getGenerateCount()).To(gomega.BeNumerically("==", len(defaultFTCs))) - g.Expect(eventHandler2.getAddEventCount()).To(gomega.BeNumerically("==", len(defaultObjects))) - g.Expect(eventHandler2.getUpdateEventCount()).To(gomega.BeZero()) - g.Expect(eventHandler2.getDeleteEventCount()).To(gomega.BeZero()) - }).WithTimeout(time.Second * 2).Should(gomega.Succeed()) + dp1 := getTestDeployment("dp-1", "default") + cm1 := getTestConfigMap("cm-1", "default") + sc1 := getTestSecret("sc-1", "default") - // 5. Stop the InformerManager + handler1 := &countingResourceEventHandler{} + handler2 := &countingResourceEventHandler{} + generator1 := &EventHandlerGenerator{ + Predicate: registerOncePredicate, + Generator: handler1.GenerateEventHandler, + } + generator2 := &EventHandlerGenerator{ + Predicate: registerOncePredicate, + Generator: handler2.GenerateEventHandler, + } - managerCancel() + defaultFTCs := []*fedcorev1a1.FederatedTypeConfig{deploymentFTC, configmapFTC, secretFTC} + defaultObjs := []*unstructured.Unstructured{dp1, cm1, sc1} + generators := []*EventHandlerGenerator{generator1, generator2} - // 6. Sleep for a second to allow the InformerManager to process the shutdown + managerCtx, managerCancel := context.WithCancel(ctx) + _, dynamicClient, _ := boostrapInformerManagerWithFakeClients(g, managerCtx, defaultFTCs, defaultObjs, generators) - <-time.After(time.Second) + // 2. Verify that handler1 and handler2 is registered initially for all FTCs + + handler1.ExpectGenerateEvents(3) + handler1.ExpectAddEvents(3) + handler1.AssertEventually(g, time.Second*2) - // 7. Verify that events are not received for ANY FTCs by both eventHandler1 and eventHandler2. + handler2.ExpectGenerateEvents(3) + handler2.ExpectAddEvents(3) + handler2.AssertEventually(g, time.Second*2) - // 7a. Generate +1 add event for deployments. + // 3. Shutdown the manager - dp2 := getDeployment("dp-2", "default") - dp2, err := dynamicClient.Resource(common.DeploymentGVR).Namespace("default").Create(ctx, dp2, metav1.CreateOptions{}) - g.Expect(err).ToNot(gomega.HaveOccurred()) + managerCancel() + <-time.After(time.Second) - // 7b. Generate +1 add event for configmaps. + // 4. Verify that handler1 and handler2 is unregistered for all FTCs and no more events are received - cm2 := getConfigMap("cm-2", "default") - cm2, err = dynamicClient.Resource(common.ConfigMapGVR).Namespace("default").Create(ctx, cm2, metav1.CreateOptions{}) - g.Expect(err).ToNot(gomega.HaveOccurred()) - - // 7b. Generate +1 add event for secrets. + _, err := dynamicClient.Resource(common.DeploymentGVR).Namespace("default").Create(ctx, getTestDeployment("dp-2", "default"), metav1.CreateOptions{}) + g.Expect(err).ToNot(gomega.HaveOccurred()) - sc2 := getSecret("sc-2", "default") - sc2, err = dynamicClient.Resource(common.SecretGVR).Namespace("default").Create(ctx, sc2, metav1.CreateOptions{}) - g.Expect(err).ToNot(gomega.HaveOccurred()) + err = dynamicClient.Resource(common.ConfigMapGVR).Namespace("default").Delete(ctx, cm1.GetName(), metav1.DeleteOptions{}) + g.Expect(err).ToNot(gomega.HaveOccurred()) - g.Consistently(func(g gomega.Gomega) { - g.Expect(eventHandler1.getGenerateCount()).To(gomega.BeNumerically("==", len(defaultFTCs))) - g.Expect(eventHandler1.getAddEventCount()).To(gomega.BeNumerically("==", len(defaultObjects))) - g.Expect(eventHandler1.getUpdateEventCount()).To(gomega.BeZero()) - g.Expect(eventHandler1.getDeleteEventCount()).To(gomega.BeZero()) + sc1.SetAnnotations(map[string]string{"test": "test"}) + _, err = dynamicClient.Resource(common.SecretGVR).Namespace("default").Update(ctx, sc1, metav1.UpdateOptions{}) + g.Expect(err).ToNot(gomega.HaveOccurred()) - g.Expect(eventHandler2.getGenerateCount()).To(gomega.BeNumerically("==", len(defaultFTCs))) - g.Expect(eventHandler2.getAddEventCount()).To(gomega.BeNumerically("==", len(defaultObjects))) - g.Expect(eventHandler2.getUpdateEventCount()).To(gomega.BeZero()) - g.Expect(eventHandler2.getDeleteEventCount()).To(gomega.BeZero()) - }).WithTimeout(time.Second * 2).Should(gomega.Succeed()) + handler1.AssertConsistently(g, time.Second*2) + handler2.AssertConsistently(g, time.Second*2) + }) } func boostrapInformerManagerWithFakeClients( + g *gomega.WithT, + ctx context.Context, ftcs []*fedcorev1a1.FederatedTypeConfig, objects []*unstructured.Unstructured, + eventHandlerGenerators []*EventHandlerGenerator, ) (InformerManager, dynamicclient.Interface, fedclient.Interface) { scheme := runtime.NewScheme() @@ -950,7 +646,22 @@ func boostrapInformerManagerWithFakeClients( factory := fedinformers.NewSharedInformerFactory(fedClient, 0) informerManager := NewInformerManager(dynamicClient, factory.Core().V1alpha1().FederatedTypeConfigs()) - factory.Start(context.TODO().Done()) + for _, generator := range eventHandlerGenerators { + informerManager.AddEventHandlerGenerator(generator) + } + + factory.Start(ctx.Done()) + informerManager.Start(ctx) + + stopCh := make(chan struct{}) + go func() { + <-time.After(time.Second * 3) + close(stopCh) + }() + + if !cache.WaitForCacheSync(stopCh, informerManager.HasSynced) { + g.Fail("Timed out waiting for InformerManager cache sync") + } return informerManager, dynamicClient, fedClient } diff --git a/pkg/util/informermanager/interface.go b/pkg/util/informermanager/interface.go index d67e589a..dc4ce9c5 100644 --- a/pkg/util/informermanager/interface.go +++ b/pkg/util/informermanager/interface.go @@ -100,7 +100,7 @@ type ClusterClientGetter struct { // ConnectionHash should return a string that uniquely identifies the combination of parameters used to generate the // cluster client. A change in the connection hash indicates a need to create a new client for a given member // cluster. - ConnectionHash func(cluster *fedcorev1a1.FederatedCluster) string + ConnectionHash func(cluster *fedcorev1a1.FederatedCluster) ([]byte, error) // ClientGetter returns a dynamic client for the given member cluster. ClientGetter func(cluster *fedcorev1a1.FederatedCluster) (dynamic.Interface, error) } diff --git a/pkg/util/informermanager/testutils.go b/pkg/util/informermanager/testutils_test.go similarity index 52% rename from pkg/util/informermanager/testutils.go rename to pkg/util/informermanager/testutils_test.go index 925fb161..238bcb14 100644 --- a/pkg/util/informermanager/testutils.go +++ b/pkg/util/informermanager/testutils_test.go @@ -2,16 +2,19 @@ package informermanager import ( "sync" + "time" - corev1 "k8s.io/api/core/v1" appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/util/rand" "k8s.io/client-go/tools/cache" fedcorev1a1 "github.com/kubewharf/kubeadmiral/pkg/apis/core/v1alpha1" + "github.com/onsi/gomega" ) var ( @@ -58,7 +61,6 @@ var ( }, } secretFTC = &fedcorev1a1.FederatedTypeConfig{ - ObjectMeta: metav1.ObjectMeta{ Name: "secrets", }, @@ -74,68 +76,7 @@ var ( } ) -type countingResourceEventHandler struct { - lock sync.RWMutex - - generateCount int - - addEventCount int - updateEventCount int - deleteEventCount int -} - -func (h *countingResourceEventHandler) getAddEventCount() int { - h.lock.RLock() - defer h.lock.RUnlock() - return h.addEventCount -} - -func (h *countingResourceEventHandler) getUpdateEventCount() int { - h.lock.RLock() - defer h.lock.RUnlock() - return h.updateEventCount -} - -func (h *countingResourceEventHandler) getDeleteEventCount() int { - h.lock.RLock() - defer h.lock.RUnlock() - return h.deleteEventCount -} - -func (h *countingResourceEventHandler) getGenerateCount() int { - h.lock.RLock() - defer h.lock.RUnlock() - return h.generateCount -} - -func (h *countingResourceEventHandler) generateEventHandler(_ *fedcorev1a1.FederatedTypeConfig) cache.ResourceEventHandler { - h.lock.Lock() - defer h.lock.Unlock() - h.generateCount++ - return h -} - -func (h *countingResourceEventHandler) OnAdd(_ interface{}) { - h.lock.Lock() - defer h.lock.Unlock() - h.addEventCount++ -} - -func (h *countingResourceEventHandler) OnDelete(_ interface{}) { - h.lock.Lock() - defer h.lock.Unlock() - h.deleteEventCount++ -} - -func (h *countingResourceEventHandler) OnUpdate(_ interface{}, _ interface{}) { - h.lock.Lock() - defer h.lock.Unlock() - h.updateEventCount++ -} - -var _ cache.ResourceEventHandler = &countingResourceEventHandler{} - -func getDeployment(name, namespace string) *unstructured.Unstructured { +func getTestDeployment(name, namespace string) *unstructured.Unstructured { dp := &appsv1.Deployment{ TypeMeta: metav1.TypeMeta{ Kind: "Deployment", @@ -155,7 +96,7 @@ func getDeployment(name, namespace string) *unstructured.Unstructured { return &unstructured.Unstructured{Object: dpMap} } -func getConfigMap(name, namespace string) *unstructured.Unstructured { +func getTestConfigMap(name, namespace string) *unstructured.Unstructured { cm := &corev1.ConfigMap{ TypeMeta: metav1.TypeMeta{ Kind: "ConfigMap", @@ -175,7 +116,7 @@ func getConfigMap(name, namespace string) *unstructured.Unstructured { return &unstructured.Unstructured{Object: cmMap} } -func getSecret(name, namespace string) *unstructured.Unstructured { +func getTestSecret(name, namespace string) *unstructured.Unstructured { secret := &corev1.Secret{ TypeMeta: metav1.TypeMeta{ Kind: "Secret", @@ -195,7 +136,7 @@ func getSecret(name, namespace string) *unstructured.Unstructured { return &unstructured.Unstructured{Object: secretMap} } -func getDaemonSet(name, namespace string) *unstructured.Unstructured { +func getTestDaemonSet(name, namespace string) *unstructured.Unstructured { dm := &appsv1.DaemonSet{ TypeMeta: metav1.TypeMeta{ Kind: "DaemonSet", @@ -214,3 +155,140 @@ func getDaemonSet(name, namespace string) *unstructured.Unstructured { return &unstructured.Unstructured{Object: dmMap} } + +func getTestCluster(name string) *fedcorev1a1.FederatedCluster { + return &fedcorev1a1.FederatedCluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + }, + Spec: fedcorev1a1.FederatedClusterSpec{ + APIEndpoint: rand.String(24), + Insecure: false, + UseServiceAccountToken: true, + SecretRef: fedcorev1a1.LocalSecretReference{ + Name: name, + }, + Taints: []corev1.Taint{}, + }, + Status: fedcorev1a1.FederatedClusterStatus{ + Conditions: []fedcorev1a1.ClusterCondition{ + { + Type: fedcorev1a1.ClusterJoined, + Status: corev1.ConditionTrue, + LastProbeTime: metav1.Now(), + LastTransitionTime: metav1.Now(), + }, + }, + JoinPerformed: true, + }, + } +} + +type countingResourceEventHandler struct { + lock sync.RWMutex + + generateCount int + addEventCount int + updateEventCount int + deleteEventCount int + + expectedGenerateCount int + expectedAddEventCount int + expectedUpdateEventCount int + expectedDeleteEventCount int +} + +func (h *countingResourceEventHandler) ExpectGenerateEvents(i int) { + h.lock.Lock() + defer h.lock.Unlock() + h.expectedGenerateCount += i +} + +func (h *countingResourceEventHandler) ExpectAddEvents(i int) { + h.lock.Lock() + defer h.lock.Unlock() + h.expectedAddEventCount += i +} + +func (h *countingResourceEventHandler) ExpectUpdateEvents(i int) { + h.lock.Lock() + defer h.lock.Unlock() + h.expectedUpdateEventCount += i +} + +func (h *countingResourceEventHandler) ExpectDeleteEvents(i int) { + h.lock.Lock() + defer h.lock.Unlock() + h.expectedDeleteEventCount += i +} + +func (h *countingResourceEventHandler) AssertEventually(g gomega.Gomega, timeout time.Duration) { + g.Eventually(func(g gomega.Gomega) { + g.Expect(h.generateCount).To(gomega.BeNumerically("==", h.expectedGenerateCount)) + g.Expect(h.addEventCount).To(gomega.BeNumerically("==", h.expectedAddEventCount)) + g.Expect(h.updateEventCount).To(gomega.BeNumerically("==", h.expectedUpdateEventCount)) + g.Expect(h.deleteEventCount).To(gomega.BeNumerically("==", h.expectedDeleteEventCount)) + }).WithTimeout(timeout).Should(gomega.Succeed()) +} + +func (h *countingResourceEventHandler) AssertConsistently(g gomega.Gomega, timeout time.Duration) { + g.Consistently(func(g gomega.Gomega) { + g.Expect(h.generateCount).To(gomega.BeNumerically("==", h.expectedGenerateCount)) + g.Expect(h.addEventCount).To(gomega.BeNumerically("==", h.expectedAddEventCount)) + g.Expect(h.updateEventCount).To(gomega.BeNumerically("==", h.expectedUpdateEventCount)) + g.Expect(h.deleteEventCount).To(gomega.BeNumerically("==", h.expectedDeleteEventCount)) + }).WithTimeout(timeout).Should(gomega.Succeed()) +} + +func (h *countingResourceEventHandler) GenerateEventHandler(_ *fedcorev1a1.FederatedTypeConfig) cache.ResourceEventHandler { + h.lock.Lock() + defer h.lock.Unlock() + h.generateCount++ + return h +} + +func (h *countingResourceEventHandler) OnAdd(_ interface{}) { + h.lock.Lock() + defer h.lock.Unlock() + h.addEventCount++ +} + +func (h *countingResourceEventHandler) OnDelete(_ interface{}) { + h.lock.Lock() + defer h.lock.Unlock() + h.deleteEventCount++ +} + +func (h *countingResourceEventHandler) OnUpdate(_ interface{}, _ interface{}) { + h.lock.Lock() + defer h.lock.Unlock() + h.updateEventCount++ +} + +var _ cache.ResourceEventHandler = &countingResourceEventHandler{} + +func alwaysRegisterPredicate(_, _ *fedcorev1a1.FederatedTypeConfig) bool { + return true +} + +func neverRegisterPredicate(_, _ *fedcorev1a1.FederatedTypeConfig) bool { + return false +} + +func registerOncePredicate(old, _ *fedcorev1a1.FederatedTypeConfig) bool { + return old == nil +} + +func newAnnotationBasedGenerator(handler *countingResourceEventHandler) *EventHandlerGenerator { + return &EventHandlerGenerator{ + Predicate: func(_, latest *fedcorev1a1.FederatedTypeConfig) bool { + return latest.GetAnnotations()["predicate"] == "true" + }, + Generator: func(ftc *fedcorev1a1.FederatedTypeConfig) cache.ResourceEventHandler { + if ftc.GetAnnotations()["generator"] == "true" { + return handler.GenerateEventHandler(ftc) + } + return nil + }, + } +} From d505f2c8c2475a1a9f4b8d89e7df2fcde89a09b0 Mon Sep 17 00:00:00 2001 From: "hawjia.lim" Date: Tue, 11 Jul 2023 16:14:55 +0800 Subject: [PATCH 017/173] fix logger keys, use ratelimiting and other fixes --- .../federatedinformermanager.go | 28 ++++++++++--------- pkg/util/informermanager/informermanager.go | 28 ++++++++++--------- pkg/util/informermanager/testutils_test.go | 4 +-- 3 files changed, 32 insertions(+), 28 deletions(-) diff --git a/pkg/util/informermanager/federatedinformermanager.go b/pkg/util/informermanager/federatedinformermanager.go index 51e6dd72..5dd30974 100644 --- a/pkg/util/informermanager/federatedinformermanager.go +++ b/pkg/util/informermanager/federatedinformermanager.go @@ -38,7 +38,7 @@ type federatedInformerManager struct { informerManagers map[string]InformerManager informerManagersCancelFuncs map[string]context.CancelFunc - queue workqueue.Interface + queue workqueue.RateLimitingInterface logger klog.Logger } @@ -94,22 +94,24 @@ func (m *federatedInformerManager) worker() { } defer m.queue.Done(key) + logger := m.logger.WithValues("key", key) + _, name, err := cache.SplitMetaNamespaceKey(key.(string)) if err != nil { - m.logger.Error(err, "Failed to process FederatedCluster") + logger.Error(err, "Failed to process FederatedCluster") return } cluster, err := m.clusterInformer.Lister().Get(name) if err != nil && !apierrors.IsNotFound(err) { - m.logger.Error(err, "Failed to process FederatedCluster, will retry") - m.queue.Add(key) + logger.Error(err, "Failed to get FederatedCluster from lister, will retry") + m.queue.AddRateLimited(key) return } if apierrors.IsNotFound(err) || !util.IsClusterJoined(&cluster.Status) { if err := m.processClusterDeletion(name); err != nil { - m.logger.Error(err, "Failed to process FederatedCluster, will retry") - m.queue.Add(key) + logger.Error(err, "Failed to process FederatedCluster, will retry") + m.queue.AddRateLimited(key) return } return @@ -118,13 +120,13 @@ func (m *federatedInformerManager) worker() { err, needReenqueue := m.processCluster(cluster) if err != nil { if needReenqueue { - m.logger.Error(err, "Failed to process FederatedCluster, will retry") + logger.Error(err, "Failed to process FederatedCluster, will retry") } else { - m.logger.Error(err, "Failed to process FederatedCluster") + logger.Error(err, "Failed to process FederatedCluster") } } if needReenqueue { - m.queue.Add(key) + m.queue.AddRateLimited(key) } } @@ -253,10 +255,6 @@ func (m *federatedInformerManager) HasSynced() bool { } func (m *federatedInformerManager) Start(ctx context.Context) { - if !cache.WaitForNamedCacheSync("federated-informer-manager", ctx.Done(), m.HasSynced) { - return - } - m.lock.Lock() defer m.lock.Unlock() @@ -267,6 +265,10 @@ func (m *federatedInformerManager) Start(ctx context.Context) { m.started = true + if !cache.WaitForNamedCacheSync("federated-informer-manager", ctx.Done(), m.HasSynced) { + return + } + for _, handler := range m.clusterEventHandler { predicate := handler.Predicate callback := handler.Callback diff --git a/pkg/util/informermanager/informermanager.go b/pkg/util/informermanager/informermanager.go index ab54a99b..ee0e572c 100644 --- a/pkg/util/informermanager/informermanager.go +++ b/pkg/util/informermanager/informermanager.go @@ -40,7 +40,7 @@ type informerManager struct { eventHandlerRegistrations map[string]map[*EventHandlerGenerator]cache.ResourceEventHandlerRegistration lastAppliedFTCsCache map[string]map[*EventHandlerGenerator]*fedcorev1a1.FederatedTypeConfig - queue workqueue.Interface + queue workqueue.RateLimitingInterface logger klog.Logger } @@ -85,37 +85,39 @@ func (m *informerManager) worker() { } defer m.queue.Done(key) + logger := m.logger.WithValues("key", key) + _, name, err := cache.SplitMetaNamespaceKey(key.(string)) if err != nil { - m.logger.Error(err, "Failed to process FederatedTypeConfig") + logger.Error(err, "Failed to process FederatedTypeConfig") return } ftc, err := m.ftcInformer.Lister().Get(name) if apierrors.IsNotFound(err) { if err := m.processFTCDeletion(name); err != nil { - m.logger.Error(err, "Failed to process FederatedTypeConfig, will retry") - m.queue.Add(key) + logger.Error(err, "Failed to process FederatedTypeConfig, will retry") + m.queue.AddRateLimited(key) return } return } if err != nil { - m.logger.Error(err, "Failed to process FederatedTypeConfig, will retry") - m.queue.Add(key) + logger.Error(err, "Failed to get FederatedTypeConfig from lister, will retry") + m.queue.AddRateLimited(key) return } err, needReenqueue := m.processFTC(ftc) if err != nil { if needReenqueue { - m.logger.Error(err, "Failed to process FederatedTypeConfig, will retry") + logger.Error(err, "Failed to process FederatedTypeConfig, will retry") } else { - m.logger.Error(err, "Failed to process FederatedTypeConfig") + logger.Error(err, "Failed to process FederatedTypeConfig") } } if needReenqueue { - m.queue.Add(key) + m.queue.AddRateLimited(key) } } @@ -257,10 +259,6 @@ func (m *informerManager) HasSynced() bool { } func (m *informerManager) Start(ctx context.Context) { - if !cache.WaitForNamedCacheSync("informer-manager", ctx.Done(), m.HasSynced) { - return - } - m.lock.Lock() defer m.lock.Unlock() @@ -271,6 +269,10 @@ func (m *informerManager) Start(ctx context.Context) { m.started = true + if !cache.WaitForNamedCacheSync("informer-manager", ctx.Done(), m.HasSynced) { + return + } + go wait.Until(m.worker, 0, ctx.Done()) go func() { <-ctx.Done() diff --git a/pkg/util/informermanager/testutils_test.go b/pkg/util/informermanager/testutils_test.go index 238bcb14..c71897ba 100644 --- a/pkg/util/informermanager/testutils_test.go +++ b/pkg/util/informermanager/testutils_test.go @@ -20,7 +20,7 @@ import ( var ( daemonsetFTC = &fedcorev1a1.FederatedTypeConfig{ ObjectMeta: metav1.ObjectMeta{ - Name: "daemonsets", + Name: "daemonsets.apps", }, Spec: fedcorev1a1.FederatedTypeConfigSpec{ SourceType: fedcorev1a1.APIResource{ @@ -34,7 +34,7 @@ var ( } deploymentFTC = &fedcorev1a1.FederatedTypeConfig{ ObjectMeta: metav1.ObjectMeta{ - Name: "deployments", + Name: "deployments.apps", }, Spec: fedcorev1a1.FederatedTypeConfigSpec{ SourceType: fedcorev1a1.APIResource{ From d5330298ce00e7654bed3566370abce541e84cda Mon Sep 17 00:00:00 2001 From: "hawjia.lim" Date: Tue, 11 Jul 2023 17:47:29 +0800 Subject: [PATCH 018/173] propagate start contexts --- .../federatedinformermanager.go | 55 +++++++------ pkg/util/informermanager/informermanager.go | 77 +++++++++++-------- .../informermanager/informermanager_test.go | 36 ++++++--- 3 files changed, 104 insertions(+), 64 deletions(-) diff --git a/pkg/util/informermanager/federatedinformermanager.go b/pkg/util/informermanager/federatedinformermanager.go index 5dd30974..740d1342 100644 --- a/pkg/util/informermanager/federatedinformermanager.go +++ b/pkg/util/informermanager/federatedinformermanager.go @@ -38,8 +38,7 @@ type federatedInformerManager struct { informerManagers map[string]InformerManager informerManagersCancelFuncs map[string]context.CancelFunc - queue workqueue.RateLimitingInterface - logger klog.Logger + queue workqueue.RateLimitingInterface } func NewFederatedInformerManager( @@ -60,7 +59,6 @@ func NewFederatedInformerManager( informerManagers: map[string]InformerManager{}, informerManagersCancelFuncs: map[string]context.CancelFunc{}, queue: workqueue.NewRateLimitingQueue(workqueue.DefaultItemBasedRateLimiter()), - logger: klog.LoggerWithName(klog.Background(), "federated-informer-manager"), } clusterInformer.Informer().AddEventHandler(cache.FilteringResourceEventHandler{ @@ -81,20 +79,20 @@ func NewFederatedInformerManager( func (m *federatedInformerManager) enqueue(obj interface{}) { key, err := cache.DeletionHandlingMetaNamespaceKeyFunc(obj) if err != nil { - m.logger.Error(err, "Failed to enqueue FederatedCluster") + klog.Error(err, "federated-informer-manager: Failed to enqueue FederatedCluster") return } m.queue.Add(key) } -func (m *federatedInformerManager) worker() { +func (m *federatedInformerManager) worker(ctx context.Context) { key, shutdown := m.queue.Get() if shutdown { return } defer m.queue.Done(key) - logger := m.logger.WithValues("key", key) + logger := klog.FromContext(ctx) _, name, err := cache.SplitMetaNamespaceKey(key.(string)) if err != nil { @@ -102,6 +100,9 @@ func (m *federatedInformerManager) worker() { return } + logger = logger.WithValues("cluster", name) + ctx = klog.NewContext(ctx, logger) + cluster, err := m.clusterInformer.Lister().Get(name) if err != nil && !apierrors.IsNotFound(err) { logger.Error(err, "Failed to get FederatedCluster from lister, will retry") @@ -109,7 +110,7 @@ func (m *federatedInformerManager) worker() { return } if apierrors.IsNotFound(err) || !util.IsClusterJoined(&cluster.Status) { - if err := m.processClusterDeletion(name); err != nil { + if err := m.processClusterDeletion(ctx, name); err != nil { logger.Error(err, "Failed to process FederatedCluster, will retry") m.queue.AddRateLimited(key) return @@ -117,7 +118,7 @@ func (m *federatedInformerManager) worker() { return } - err, needReenqueue := m.processCluster(cluster) + err, needReenqueue := m.processCluster(ctx, cluster) if err != nil { if needReenqueue { logger.Error(err, "Failed to process FederatedCluster, will retry") @@ -130,7 +131,10 @@ func (m *federatedInformerManager) worker() { } } -func (m *federatedInformerManager) processCluster(cluster *fedcorev1a1.FederatedCluster) (err error, needReenqueue bool) { +func (m *federatedInformerManager) processCluster( + ctx context.Context, + cluster *fedcorev1a1.FederatedCluster, +) (err error, needReenqueue bool) { m.lock.Lock() defer m.lock.Unlock() @@ -146,7 +150,7 @@ func (m *federatedInformerManager) processCluster(cluster *fedcorev1a1.Federated // period of time and we missed processing the deletion. We simply process the cluster deletion and // reenqueue. // Note: updating of cluster connetion details, however, is still not a supported use case. - err := m.processClusterDeletionUnlocked(clusterName) + err := m.processClusterDeletionUnlocked(ctx, clusterName) return err, true } } else { @@ -156,8 +160,8 @@ func (m *federatedInformerManager) processCluster(cluster *fedcorev1a1.Federated } manager := NewInformerManager(clusterClient, m.ftcInformer) - ctx, cancel := context.WithCancel(context.Background()) + ctx, cancel := context.WithCancel(ctx) for _, generator := range m.eventHandlerGenerators { if err := manager.AddEventHandlerGenerator(generator); err != nil { cancel() @@ -165,6 +169,8 @@ func (m *federatedInformerManager) processCluster(cluster *fedcorev1a1.Federated } } + klog.FromContext(ctx).V(2).Info("Starting new InformerManager for FederatedCluster") + manager.Start(ctx) m.connectionMap[clusterName] = connectionHash @@ -176,17 +182,18 @@ func (m *federatedInformerManager) processCluster(cluster *fedcorev1a1.Federated return nil, false } -func (m *federatedInformerManager) processClusterDeletion(clusterName string) error { +func (m *federatedInformerManager) processClusterDeletion(ctx context.Context, clusterName string) error { m.lock.Lock() m.lock.Unlock() - return m.processClusterDeletionUnlocked(clusterName) + return m.processClusterDeletionUnlocked(ctx, clusterName) } -func (m *federatedInformerManager) processClusterDeletionUnlocked(clusterName string) error { +func (m *federatedInformerManager) processClusterDeletionUnlocked(ctx context.Context, clusterName string) error { delete(m.connectionMap, clusterName) delete(m.clients, clusterName) if cancel, ok := m.informerManagersCancelFuncs[clusterName]; ok { + klog.FromContext(ctx).V(2).Info("Stopping InformerManager for FederatedCluster") cancel() } delete(m.informerManagers, clusterName) @@ -258,14 +265,18 @@ func (m *federatedInformerManager) Start(ctx context.Context) { m.lock.Lock() defer m.lock.Unlock() + logger := klog.LoggerWithName(klog.FromContext(ctx), "federated-informer-manager") + ctx = klog.NewContext(ctx, logger) + if m.started { - m.logger.Error(nil, "FederatedInformerManager cannot be started more than once") + logger.Error(nil, "FederatedInformerManager cannot be started more than once") return } m.started = true - if !cache.WaitForNamedCacheSync("federated-informer-manager", ctx.Done(), m.HasSynced) { + if !cache.WaitForCacheSync(ctx.Done(), m.HasSynced) { + logger.Error(nil, "Failed to wait for FederatedInformerManager cache sync") return } @@ -296,16 +307,12 @@ func (m *federatedInformerManager) Start(ctx context.Context) { }) } - go wait.Until(m.worker, 0, ctx.Done()) + go wait.UntilWithContext(ctx, m.worker, 0) go func() { <-ctx.Done() - m.queue.ShutDown() - m.lock.Lock() - defer m.lock.Unlock() - for _, cancelFunc := range m.informerManagersCancelFuncs { - cancelFunc() - } + logger.V(2).Info("Stopping FederatedInformerManager") + m.queue.ShutDown() }() } @@ -326,5 +333,5 @@ func DefaultClusterConnectionHash(cluster *fedcorev1a1.FederatedCluster) ([]byte if err := gob.NewEncoder(&b).Encode(hashObj); err != nil { return nil, err } - return b.Bytes(), nil + return b.Bytes(), nil } diff --git a/pkg/util/informermanager/informermanager.go b/pkg/util/informermanager/informermanager.go index ee0e572c..5e0ed5a1 100644 --- a/pkg/util/informermanager/informermanager.go +++ b/pkg/util/informermanager/informermanager.go @@ -36,12 +36,11 @@ type informerManager struct { gvrMapping *tools.BijectionMap[string, schema.GroupVersionResource] informers map[string]informers.GenericInformer - informerStopChs map[string]chan struct{} + informerCancelFuncs map[string]context.CancelFunc eventHandlerRegistrations map[string]map[*EventHandlerGenerator]cache.ResourceEventHandlerRegistration lastAppliedFTCsCache map[string]map[*EventHandlerGenerator]*fedcorev1a1.FederatedTypeConfig - queue workqueue.RateLimitingInterface - logger klog.Logger + queue workqueue.RateLimitingInterface } func NewInformerManager(client dynamic.Interface, ftcInformer fedcorev1a1informers.FederatedTypeConfigInformer) InformerManager { @@ -53,11 +52,10 @@ func NewInformerManager(client dynamic.Interface, ftcInformer fedcorev1a1informe eventHandlerGenerators: []*EventHandlerGenerator{}, gvrMapping: tools.NewBijectionMap[string, schema.GroupVersionResource](), informers: map[string]informers.GenericInformer{}, - informerStopChs: map[string]chan struct{}{}, + informerCancelFuncs: map[string]context.CancelFunc{}, eventHandlerRegistrations: map[string]map[*EventHandlerGenerator]cache.ResourceEventHandlerRegistration{}, lastAppliedFTCsCache: map[string]map[*EventHandlerGenerator]*fedcorev1a1.FederatedTypeConfig{}, queue: workqueue.NewRateLimitingQueue(workqueue.DefaultItemBasedRateLimiter()), - logger: klog.LoggerWithName(klog.Background(), "informer-manager"), } ftcInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{ @@ -72,20 +70,20 @@ func NewInformerManager(client dynamic.Interface, ftcInformer fedcorev1a1informe func (m *informerManager) enqueue(obj interface{}) { key, err := cache.DeletionHandlingMetaNamespaceKeyFunc(obj) if err != nil { - m.logger.Error(err, "Failed to enqueue FederatedTypeConfig") + klog.Error(err, "informer-manager: Failed to enqueue FederatedTypeConfig") return } m.queue.Add(key) } -func (m *informerManager) worker() { +func (m *informerManager) worker(ctx context.Context) { key, shutdown := m.queue.Get() if shutdown { return } defer m.queue.Done(key) - logger := m.logger.WithValues("key", key) + logger := klog.FromContext(ctx) _, name, err := cache.SplitMetaNamespaceKey(key.(string)) if err != nil { @@ -93,9 +91,12 @@ func (m *informerManager) worker() { return } + logger = logger.WithValues("ftc", name) + ctx = klog.NewContext(ctx, logger) + ftc, err := m.ftcInformer.Lister().Get(name) if apierrors.IsNotFound(err) { - if err := m.processFTCDeletion(name); err != nil { + if err := m.processFTCDeletion(ctx, name); err != nil { logger.Error(err, "Failed to process FederatedTypeConfig, will retry") m.queue.AddRateLimited(key) return @@ -108,7 +109,7 @@ func (m *informerManager) worker() { return } - err, needReenqueue := m.processFTC(ftc) + err, needReenqueue := m.processFTC(ctx, ftc) if err != nil { if needReenqueue { logger.Error(err, "Failed to process FederatedTypeConfig, will retry") @@ -121,7 +122,7 @@ func (m *informerManager) worker() { } } -func (m *informerManager) processFTC(ftc *fedcorev1a1.FederatedTypeConfig) (err error, needReenqueue bool) { +func (m *informerManager) processFTC(ctx context.Context, ftc *fedcorev1a1.FederatedTypeConfig) (err error, needReenqueue bool) { m.lock.Lock() defer m.lock.Unlock() @@ -129,14 +130,20 @@ func (m *informerManager) processFTC(ftc *fedcorev1a1.FederatedTypeConfig) (err apiResource := ftc.GetSourceType() gvr := schemautil.APIResourceToGVR(&apiResource) + logger := klog.FromContext(ctx).WithValues("gvr", gvr.String()) + ctx = klog.NewContext(ctx, logger) + var informer informers.GenericInformer if oldGVR, exists := m.gvrMapping.Lookup(ftcName); exists { + logger = klog.FromContext(ctx).WithValues("old-gvr", oldGVR.String()) + ctx = klog.NewContext(ctx, logger) + if oldGVR != gvr { // This might occur if a ftc was deleted and recreated with a different source type within a short period of // time and we missed processing the deletion. We simply process the ftc deletion and reenqueue. Note: // updating of ftc source types, however, is still not a supported use case. - err := m.processFTCDeletionUnlocked(ftcName) + err := m.processFTCDeletionUnlocked(ctx, ftcName) return err, true } @@ -147,6 +154,8 @@ func (m *informerManager) processFTC(ftc *fedcorev1a1.FederatedTypeConfig) (err return fmt.Errorf("source type is already referenced by another FederatedTypeConfig: %w", err), false } + logger.V(2).Info("Starting new informer for FederatedTypeConfig") + informer = dynamicinformer.NewFilteredDynamicInformer( m.client, gvr, @@ -155,11 +164,11 @@ func (m *informerManager) processFTC(ftc *fedcorev1a1.FederatedTypeConfig) (err cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, nil, ) - stopCh := make(chan struct{}) - go informer.Informer().Run(stopCh) + ctx, cancel := context.WithCancel(ctx) + go informer.Informer().Run(ctx.Done()) m.informers[ftcName] = informer - m.informerStopChs[ftcName] = stopCh + m.informerCancelFuncs[ftcName] = cancel m.eventHandlerRegistrations[ftcName] = map[*EventHandlerGenerator]cache.ResourceEventHandlerRegistration{} m.lastAppliedFTCsCache[ftcName] = map[*EventHandlerGenerator]*fedcorev1a1.FederatedTypeConfig{} } @@ -198,22 +207,28 @@ func (m *informerManager) processFTC(ftc *fedcorev1a1.FederatedTypeConfig) (err return nil, false } -func (m *informerManager) processFTCDeletion(ftcName string) error { +func (m *informerManager) processFTCDeletion(ctx context.Context, ftcName string) error { m.lock.Lock() defer m.lock.Unlock() - return m.processFTCDeletionUnlocked(ftcName) + if gvr, exists := m.gvrMapping.Lookup(ftcName); exists { + logger := klog.FromContext(ctx).WithValues("gvr", gvr.String()) + ctx = klog.NewContext(ctx, logger) + } + + return m.processFTCDeletionUnlocked(ctx, ftcName) } -func (m *informerManager) processFTCDeletionUnlocked(ftcName string) error { - if stopCh, ok := m.informerStopChs[ftcName]; ok { - close(stopCh) +func (m *informerManager) processFTCDeletionUnlocked(ctx context.Context, ftcName string) error { + if cancel, ok := m.informerCancelFuncs[ftcName]; ok { + klog.FromContext(ctx).V(2).Info("Stopping informer for FederatedTypeConfig") + cancel() } m.gvrMapping.Delete(ftcName) delete(m.informers, ftcName) - delete(m.informerStopChs, ftcName) + delete(m.informerCancelFuncs, ftcName) delete(m.eventHandlerRegistrations, ftcName) return nil @@ -262,27 +277,29 @@ func (m *informerManager) Start(ctx context.Context) { m.lock.Lock() defer m.lock.Unlock() + logger := klog.LoggerWithName(klog.FromContext(ctx), "informer-manager") + ctx = klog.NewContext(ctx, logger) + if m.started { - m.logger.Error(nil, "InformerManager cannot be started more than once") + logger.Error(nil, "InformerManager cannot be started more than once") return } + logger.V(2).Info("Starting InformerManager") + m.started = true - if !cache.WaitForNamedCacheSync("informer-manager", ctx.Done(), m.HasSynced) { + if !cache.WaitForCacheSync(ctx.Done(), m.HasSynced) { + logger.Error(nil, "Failed to wait for InformerManager cache sync") return } - go wait.Until(m.worker, 0, ctx.Done()) + go wait.UntilWithContext(ctx, m.worker, 0) go func() { <-ctx.Done() - m.queue.ShutDown() - m.lock.Lock() - defer m.lock.Unlock() - for _, stopCh := range m.informerStopChs { - close(stopCh) - } + logger.V(2).Info("Stopping InformerManager") + m.queue.ShutDown() }() } diff --git a/pkg/util/informermanager/informermanager_test.go b/pkg/util/informermanager/informermanager_test.go index ac6f4c0f..0082cbc0 100644 --- a/pkg/util/informermanager/informermanager_test.go +++ b/pkg/util/informermanager/informermanager_test.go @@ -2,6 +2,7 @@ package informermanager import ( "context" + "flag" "testing" "time" @@ -11,6 +12,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime" + "k8s.io/klog/v2" dynamicclient "k8s.io/client-go/dynamic" dynamicfake "k8s.io/client-go/dynamic/fake" @@ -24,6 +26,12 @@ import ( schemautil "github.com/kubewharf/kubeadmiral/pkg/controllers/util/schema" ) +func init() { + flags := flag.NewFlagSet("", flag.ContinueOnError) + klog.InitFlags(flags) + flags.Set("v", "2") +} + func TestInformerManager(t *testing.T) { g := gomega.NewWithT(t) @@ -379,7 +387,9 @@ func TestInformerManager(t *testing.T) { // 4. Verify that handler is unregistered and new events are no longer received by handler. - dp2, err := dynamicClient.Resource(common.DeploymentGVR).Namespace("default").Create(ctx, getTestDeployment("dp-2", "default"), metav1.CreateOptions{}) + dp2, err := dynamicClient.Resource(common.DeploymentGVR). + Namespace("default"). + Create(ctx, getTestDeployment("dp-2", "default"), metav1.CreateOptions{}) g.Expect(err).ToNot(gomega.HaveOccurred()) dp2.SetAnnotations(map[string]string{"test": "test"}) @@ -414,7 +424,7 @@ func TestInformerManager(t *testing.T) { _, dynamicClient, fedClient := boostrapInformerManagerWithFakeClients(g, ctx, defaultFTCs, defaultObjs, generators) // 2. Verify that handler is registered initially - + handler.ExpectGenerateEvents(1) handler.ExpectAddEvents(1) handler.AssertEventually(g, time.Second*2) @@ -437,7 +447,7 @@ func TestInformerManager(t *testing.T) { handler.AssertEventually(g, time.Second*2) }) - t.Run("event handler should be unchanged on FTC update", func(t * testing.T) { + t.Run("event handler should be unchanged on FTC update", func(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() @@ -459,7 +469,7 @@ func TestInformerManager(t *testing.T) { _, dynamicClient, fedClient := boostrapInformerManagerWithFakeClients(g, ctx, defaultFTCs, defaultObjs, generators) // 2. Verify that handler is registered initially - + handler.ExpectGenerateEvents(1) handler.ExpectAddEvents(1) handler.AssertEventually(g, time.Second*2) @@ -482,7 +492,7 @@ func TestInformerManager(t *testing.T) { handler.AssertEventually(g, time.Second*2) }) - t.Run("event handler should be unregisterd on FTC delete", func(t *testing.T){ + t.Run("event handler should be unregisterd on FTC delete", func(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() @@ -510,7 +520,7 @@ func TestInformerManager(t *testing.T) { _, dynamicClient, fedClient := boostrapInformerManagerWithFakeClients(g, ctx, defaultFTCs, defaultObjs, generators) // 2. Verify that handler1 and handler2 is registered initially for all FTCs - + handler1.ExpectGenerateEvents(3) handler1.ExpectAddEvents(3) handler1.AssertEventually(g, time.Second*2) @@ -528,7 +538,9 @@ func TestInformerManager(t *testing.T) { // 4. Verify that handler1 and handler2 is unregistered for deployments and no additional events are received - dp2, err := dynamicClient.Resource(common.DeploymentGVR). Namespace("default").Create(ctx, getTestDeployment("dp-2", "default"), metav1.CreateOptions{}) + dp2, err := dynamicClient.Resource(common.DeploymentGVR). + Namespace("default"). + Create(ctx, getTestDeployment("dp-2", "default"), metav1.CreateOptions{}) g.Expect(err).ToNot(gomega.HaveOccurred()) dp2.SetAnnotations(map[string]string{"test": "test"}) @@ -543,7 +555,9 @@ func TestInformerManager(t *testing.T) { // 5. Verify that handler1 and handler2 is not unregistered for other FTCs. - _, err = dynamicClient.Resource(common.SecretGVR).Namespace("default").Create(ctx, getTestSecret("sc-2", "default"), metav1.CreateOptions{}) + _, err = dynamicClient.Resource(common.SecretGVR). + Namespace("default"). + Create(ctx, getTestSecret("sc-2", "default"), metav1.CreateOptions{}) g.Expect(err).ToNot(gomega.HaveOccurred()) handler1.ExpectAddEvents(1) handler2.ExpectAddEvents(1) @@ -587,7 +601,7 @@ func TestInformerManager(t *testing.T) { _, dynamicClient, _ := boostrapInformerManagerWithFakeClients(g, managerCtx, defaultFTCs, defaultObjs, generators) // 2. Verify that handler1 and handler2 is registered initially for all FTCs - + handler1.ExpectGenerateEvents(3) handler1.ExpectAddEvents(3) handler1.AssertEventually(g, time.Second*2) @@ -603,7 +617,9 @@ func TestInformerManager(t *testing.T) { // 4. Verify that handler1 and handler2 is unregistered for all FTCs and no more events are received - _, err := dynamicClient.Resource(common.DeploymentGVR).Namespace("default").Create(ctx, getTestDeployment("dp-2", "default"), metav1.CreateOptions{}) + _, err := dynamicClient.Resource(common.DeploymentGVR). + Namespace("default"). + Create(ctx, getTestDeployment("dp-2", "default"), metav1.CreateOptions{}) g.Expect(err).ToNot(gomega.HaveOccurred()) err = dynamicClient.Resource(common.ConfigMapGVR).Namespace("default").Delete(ctx, cm1.GetName(), metav1.DeleteOptions{}) From 507bd3fe7e047757fd933893f1808ae510c59992 Mon Sep 17 00:00:00 2001 From: "hawjia.lim" Date: Tue, 11 Jul 2023 18:10:39 +0800 Subject: [PATCH 019/173] refactor bijection package --- pkg/util/bijection/bijection.go | 93 +++++++++++++++++++++ pkg/util/informermanager/informermanager.go | 14 ++-- pkg/util/tools/bijection.go | 93 --------------------- 3 files changed, 100 insertions(+), 100 deletions(-) create mode 100644 pkg/util/bijection/bijection.go delete mode 100644 pkg/util/tools/bijection.go diff --git a/pkg/util/bijection/bijection.go b/pkg/util/bijection/bijection.go new file mode 100644 index 00000000..71efb696 --- /dev/null +++ b/pkg/util/bijection/bijection.go @@ -0,0 +1,93 @@ +package bijection + +import ( + "fmt" + "sync" +) + +func NewBijection[T1, T2 comparable]() *Bijection[T1, T2] { + return &Bijection[T1, T2]{ + lock: sync.RWMutex{}, + t1ToT2Map: map[T1]T2{}, + t2ToT1Map: map[T2]T1{}, + } +} + +type Bijection[T1, T2 comparable] struct { + lock sync.RWMutex + + t1ToT2Map map[T1]T2 + t2ToT1Map map[T2]T1 +} + +func (m *Bijection[T1, T2]) LookupByT1(key T1) (value T2, exists bool) { + m.lock.RLock() + defer m.lock.RUnlock() + + val, exists := m.t1ToT2Map[key] + if !exists { + return *new(T2), false + } + + return val, true +} + +func (m *Bijection[T1, T2]) LookupByT2(key T2) (value T1, exists bool) { + m.lock.RLock() + defer m.lock.RUnlock() + + val, exists := m.t2ToT1Map[key] + if !exists { + return *new(T1), false + } + + return val, true +} + +func (m *Bijection[T1, T2]) Add(t1 T1, t2 T2) error { + m.lock.Lock() + defer m.lock.Unlock() + + if val, ok := m.t1ToT2Map[t1]; ok { + return fmt.Errorf("%v is already mapped to %v", t1, val) + } + + if val, ok := m.t2ToT1Map[t2]; ok { + return fmt.Errorf("%v is already mapped to %v", t2, val) + } + + m.t1ToT2Map[t1] = t2 + m.t2ToT1Map[t2] = t1 + + return nil +} + +func (m *Bijection[T1, T2]) DeleteT1(key T1) bool { + m.lock.Lock() + defer m.lock.Unlock() + + val, ok := m.t1ToT2Map[key] + if !ok { + return false + } + + delete(m.t1ToT2Map, key) + delete(m.t2ToT1Map, val) + + return true +} + +func (m *Bijection[T1, T2]) DeleteT2(key T2) bool { + m.lock.Lock() + defer m.lock.Unlock() + + val, ok := m.t2ToT1Map[key] + if !ok { + return false + } + + delete(m.t2ToT1Map, key) + delete(m.t1ToT2Map, val) + + return true +} diff --git a/pkg/util/informermanager/informermanager.go b/pkg/util/informermanager/informermanager.go index 5e0ed5a1..be631d35 100644 --- a/pkg/util/informermanager/informermanager.go +++ b/pkg/util/informermanager/informermanager.go @@ -20,7 +20,7 @@ import ( fedcorev1a1informers "github.com/kubewharf/kubeadmiral/pkg/client/informers/externalversions/core/v1alpha1" "github.com/kubewharf/kubeadmiral/pkg/client/listers/core/v1alpha1" schemautil "github.com/kubewharf/kubeadmiral/pkg/controllers/util/schema" - "github.com/kubewharf/kubeadmiral/pkg/util/tools" + "github.com/kubewharf/kubeadmiral/pkg/util/bijection" ) type informerManager struct { @@ -33,7 +33,7 @@ type informerManager struct { eventHandlerGenerators []*EventHandlerGenerator - gvrMapping *tools.BijectionMap[string, schema.GroupVersionResource] + gvrMapping *bijection.Bijection[string, schema.GroupVersionResource] informers map[string]informers.GenericInformer informerCancelFuncs map[string]context.CancelFunc @@ -50,7 +50,7 @@ func NewInformerManager(client dynamic.Interface, ftcInformer fedcorev1a1informe client: client, ftcInformer: ftcInformer, eventHandlerGenerators: []*EventHandlerGenerator{}, - gvrMapping: tools.NewBijectionMap[string, schema.GroupVersionResource](), + gvrMapping: bijection.NewBijection[string, schema.GroupVersionResource](), informers: map[string]informers.GenericInformer{}, informerCancelFuncs: map[string]context.CancelFunc{}, eventHandlerRegistrations: map[string]map[*EventHandlerGenerator]cache.ResourceEventHandlerRegistration{}, @@ -135,7 +135,7 @@ func (m *informerManager) processFTC(ctx context.Context, ftc *fedcorev1a1.Feder var informer informers.GenericInformer - if oldGVR, exists := m.gvrMapping.Lookup(ftcName); exists { + if oldGVR, exists := m.gvrMapping.LookupByT1(ftcName); exists { logger = klog.FromContext(ctx).WithValues("old-gvr", oldGVR.String()) ctx = klog.NewContext(ctx, logger) @@ -211,7 +211,7 @@ func (m *informerManager) processFTCDeletion(ctx context.Context, ftcName string m.lock.Lock() defer m.lock.Unlock() - if gvr, exists := m.gvrMapping.Lookup(ftcName); exists { + if gvr, exists := m.gvrMapping.LookupByT1(ftcName); exists { logger := klog.FromContext(ctx).WithValues("gvr", gvr.String()) ctx = klog.NewContext(ctx, logger) } @@ -225,7 +225,7 @@ func (m *informerManager) processFTCDeletionUnlocked(ctx context.Context, ftcNam cancel() } - m.gvrMapping.Delete(ftcName) + m.gvrMapping.DeleteT1(ftcName) delete(m.informers, ftcName) delete(m.informerCancelFuncs, ftcName) @@ -256,7 +256,7 @@ func (m *informerManager) GetResourceLister( m.lock.RLock() defer m.lock.RUnlock() - ftc, ok := m.gvrMapping.ReverseLookup(gvr) + ftc, ok := m.gvrMapping.LookupByT2(gvr) if !ok { return nil, nil, false } diff --git a/pkg/util/tools/bijection.go b/pkg/util/tools/bijection.go deleted file mode 100644 index f4ba099e..00000000 --- a/pkg/util/tools/bijection.go +++ /dev/null @@ -1,93 +0,0 @@ -package tools - -import ( - "fmt" - "sync" -) - -func NewBijectionMap[T1, T2 comparable]() *BijectionMap[T1, T2] { - return &BijectionMap[T1, T2]{ - lock: sync.RWMutex{}, - forwardMap: map[T1]T2{}, - reverseMap: map[T2]T1{}, - } -} - -type BijectionMap[T1, T2 comparable] struct { - lock sync.RWMutex - - forwardMap map[T1]T2 - reverseMap map[T2]T1 -} - -func (m *BijectionMap[T1, T2]) Lookup(key T1) (value T2, exists bool) { - m.lock.RLock() - defer m.lock.RUnlock() - - val, exists := m.forwardMap[key] - if !exists { - return *new(T2), false - } - - return val, true -} - -func (m *BijectionMap[T1, T2]) ReverseLookup(key T2) (value T1, exists bool) { - m.lock.RLock() - defer m.lock.RUnlock() - - val, exists := m.reverseMap[key] - if !exists { - return *new(T1), false - } - - return val, true -} - -func (m *BijectionMap[T1, T2]) Add(key1 T1, key2 T2) error { - m.lock.Lock() - defer m.lock.Unlock() - - if val, ok := m.forwardMap[key1]; ok { - return fmt.Errorf("%v is already mapped to %v", key1, val) - } - - if val, ok := m.reverseMap[key2]; ok { - return fmt.Errorf("%v is already mapped to %v", key2, val) - } - - m.forwardMap[key1] = key2 - m.reverseMap[key2] = key1 - - return nil -} - -func (m *BijectionMap[T1, T2]) Delete(key T1) bool { - m.lock.Lock() - defer m.lock.Unlock() - - val, ok := m.forwardMap[key] - if !ok { - return false - } - - delete(m.forwardMap, key) - delete(m.reverseMap, val) - - return true -} - -func (m *BijectionMap[T1, T2]) ReverseDelete(key T2) bool { - m.lock.Lock() - defer m.lock.Unlock() - - val, ok := m.reverseMap[key] - if !ok { - return false - } - - delete(m.reverseMap, key) - delete(m.forwardMap, val) - - return true -} From ab92ccd0c714a9021d468e67bb55bf72777e67c1 Mon Sep 17 00:00:00 2001 From: "hawjia.lim" Date: Tue, 11 Jul 2023 23:20:01 +0800 Subject: [PATCH 020/173] improve countingResourceEventHandler --- .../federatedinformermanager_test.go | 64 +++++++++ .../informermanager/informermanager_test.go | 116 ++++++++------- pkg/util/informermanager/testutils_test.go | 134 +++++++++++++----- 3 files changed, 234 insertions(+), 80 deletions(-) diff --git a/pkg/util/informermanager/federatedinformermanager_test.go b/pkg/util/informermanager/federatedinformermanager_test.go index d19b44c7..c0c6b0c6 100644 --- a/pkg/util/informermanager/federatedinformermanager_test.go +++ b/pkg/util/informermanager/federatedinformermanager_test.go @@ -23,6 +23,70 @@ import ( "github.com/onsi/gomega" ) +func TestFederatedInformerManager(t *testing.T) { + _ = gomega.NewWithT(t) + + t.Run("clients for existing clusters should be available eventually", func(t *testing.T) { + + }) + + t.Run("clients for new clusters should be available eventually", func(t *testing.T) { + + }) + + t.Run("listers for existing FTCs and clusters should be available eventually", func(t *testing.T) { + + }) + + t.Run("listers for new FTCs should be available eventually", func(t *testing.T) { + + }) + + t.Run("listers for new clusteres should be available eventually", func(t *testing.T) { + + }) + + t.Run("event handlers for existing FTCs should be registed eventually", func(t *testing.T) { + + }) + + t.Run("event handlers for new FTCs should be registered eventually", func(t *testing.T) { + + }) + + t.Run("event handlers for new clusters should be registered eventually", func(t *testing.T) { + + }) + + t.Run("event handler should receive correct lastApplied and latest FTCs", func(t *testing.T) { + + }) + + t.Run("event handler should be registered on FTC update", func(t *testing.T) { + + }) + + t.Run("event handler should be unregistered on FTC update", func(t *testing.T) { + + }) + + t.Run("event handler should be re-registered on FTC update", func(t *testing.T) { + + }) + + t.Run("event handler should remain unchanged on FTC update", func(t *testing.T) { + + }) + + t.Run("event handler should be unregistered on FTC deletion", func(t *testing.T) { + + }) + + t.Run("event handler should be unregistered on cluster deletion", func(t *testing.T) { + + }) +} + // Verifies that clients for existing clusters are eventually available after the FederatedInformerManager is started. func TestFederatedInformerManagerClientAvailableForExistingClusters(t *testing.T) { g := gomega.NewGomegaWithT(t) diff --git a/pkg/util/informermanager/informermanager_test.go b/pkg/util/informermanager/informermanager_test.go index 0082cbc0..d26e7320 100644 --- a/pkg/util/informermanager/informermanager_test.go +++ b/pkg/util/informermanager/informermanager_test.go @@ -117,8 +117,8 @@ func TestInformerManager(t *testing.T) { cm1 := getTestConfigMap("cm-1", "default") sc1 := getTestSecret("sc-1", "default") - alwaysRegistered := &countingResourceEventHandler{} - neverRegistered := &countingResourceEventHandler{} + alwaysRegistered := newCountingResourceEventHandler() + neverRegistered := newCountingResourceEventHandler() defaultFTCs := []*fedcorev1a1.FederatedTypeConfig{deploymentFTC, configmapFTC, secretFTC} defaultObjs := []*unstructured.Unstructured{dp1, cm1, sc1} @@ -137,8 +137,12 @@ func TestInformerManager(t *testing.T) { // 2. Verify alwaysRegistered is eventually registered for all existing FTCs. - alwaysRegistered.ExpectGenerateEvents(3) - alwaysRegistered.ExpectAddEvents(3) + alwaysRegistered.ExpectGenerateEvents(deploymentFTC.Name, 1) + alwaysRegistered.ExpectGenerateEvents(configmapFTC.Name, 1) + alwaysRegistered.ExpectGenerateEvents(secretFTC.Name, 1) + alwaysRegistered.ExpectAddEvents(deploymentGVK, 1) + alwaysRegistered.ExpectAddEvents(configmapGVK, 1) + alwaysRegistered.ExpectAddEvents(secretGVK, 1) alwaysRegistered.AssertEventually(g, time.Second*2) // 3. Verify newly generated events are received by alwaysRegistered @@ -147,16 +151,16 @@ func TestInformerManager(t *testing.T) { Namespace("default"). Create(ctx, getTestSecret("sc-2", "default"), metav1.CreateOptions{}) g.Expect(err).ToNot(gomega.HaveOccurred()) - alwaysRegistered.ExpectAddEvents(1) + alwaysRegistered.ExpectAddEvents(secretGVK, 1) dp1.SetAnnotations(map[string]string{"test": "test"}) _, err = dynamicClient.Resource(common.DeploymentGVR).Namespace("default").Update(ctx, dp1, metav1.UpdateOptions{}) g.Expect(err).ToNot(gomega.HaveOccurred()) - alwaysRegistered.ExpectUpdateEvents(1) + alwaysRegistered.ExpectUpdateEvents(deploymentGVK, 1) err = dynamicClient.Resource(common.ConfigMapGVR).Namespace("default").Delete(ctx, cm1.GetName(), metav1.DeleteOptions{}) g.Expect(err).ToNot(gomega.HaveOccurred()) - alwaysRegistered.ExpectDeleteEvents(1) + alwaysRegistered.ExpectDeleteEvents(configmapGVK, 1) alwaysRegistered.AssertEventually(g, time.Second*2) @@ -185,8 +189,8 @@ func TestInformerManager(t *testing.T) { dm3 := getTestDaemonSet("dm-3", "default") dm4 := getTestDaemonSet("dm-4", "default") - alwaysRegistered := &countingResourceEventHandler{} - neverRegistered := &countingResourceEventHandler{} + alwaysRegistered := newCountingResourceEventHandler() + neverRegistered := newCountingResourceEventHandler() defaultFTCs := []*fedcorev1a1.FederatedTypeConfig{} defaultObjs := []*unstructured.Unstructured{dm1, dm2, dm3, dm4} @@ -210,8 +214,8 @@ func TestInformerManager(t *testing.T) { // 3. Verify that alwaysRegistered is eventually registered for the new Daemonset FTC - alwaysRegistered.ExpectGenerateEvents(1) - alwaysRegistered.ExpectAddEvents(4) + alwaysRegistered.ExpectGenerateEvents(daemonsetFTC.Name, 1) + alwaysRegistered.ExpectAddEvents(daemonsetGVK, 4) alwaysRegistered.AssertEventually(g, time.Second*2) // 4. Verify that newly generated events are also received by alwaysRegistered @@ -219,11 +223,11 @@ func TestInformerManager(t *testing.T) { dm1.SetAnnotations(map[string]string{"test": "test"}) _, err = dynamicClient.Resource(common.DaemonSetGVR).Namespace("default").Update(ctx, dm1, metav1.UpdateOptions{}) g.Expect(err).ToNot(gomega.HaveOccurred()) - alwaysRegistered.ExpectUpdateEvents(1) + alwaysRegistered.ExpectUpdateEvents(daemonsetGVK, 1) err = dynamicClient.Resource(common.DaemonSetGVR).Namespace("default").Delete(ctx, dm4.GetName(), metav1.DeleteOptions{}) g.Expect(err).ToNot(gomega.HaveOccurred()) - alwaysRegistered.ExpectDeleteEvents(1) + alwaysRegistered.ExpectDeleteEvents(daemonsetGVK, 1) alwaysRegistered.AssertEventually(g, time.Second*2) @@ -307,7 +311,7 @@ func TestInformerManager(t *testing.T) { ftc := deploymentFTC.DeepCopy() ftc.SetAnnotations(map[string]string{"predicate": "false", "generator": "true"}) - handler := &countingResourceEventHandler{} + handler := newCountingResourceEventHandler() generator := newAnnotationBasedGenerator(handler) defaultFTCs := []*fedcorev1a1.FederatedTypeConfig{ftc} @@ -328,8 +332,8 @@ func TestInformerManager(t *testing.T) { // 4. Verify that handler is registered and additional events are received - handler.ExpectGenerateEvents(1) - handler.ExpectAddEvents(1) + handler.ExpectGenerateEvents(ftc.Name, 1) + handler.ExpectAddEvents(deploymentGVK, 1) handler.AssertEventually(g, time.Second*2) @@ -337,16 +341,16 @@ func TestInformerManager(t *testing.T) { Namespace("default"). Create(ctx, getTestDeployment("dp-2", "default"), metav1.CreateOptions{}) g.Expect(err).ToNot(gomega.HaveOccurred()) - handler.ExpectAddEvents(1) + handler.ExpectAddEvents(deploymentGVK, 1) dp2.SetAnnotations(map[string]string{"test-annotation": "test-value"}) dp2, err = dynamicClient.Resource(common.DeploymentGVR).Namespace("default").Update(ctx, dp2, metav1.UpdateOptions{}) g.Expect(err).ToNot(gomega.HaveOccurred()) - handler.ExpectUpdateEvents(1) + handler.ExpectUpdateEvents(deploymentGVK, 1) err = dynamicClient.Resource(common.DeploymentGVR).Namespace("default").Delete(ctx, dp2.GetName(), metav1.DeleteOptions{}) g.Expect(err).ToNot(gomega.HaveOccurred()) - handler.ExpectDeleteEvents(1) + handler.ExpectDeleteEvents(deploymentGVK, 1) handler.AssertEventually(g, time.Second*2) }) @@ -362,7 +366,7 @@ func TestInformerManager(t *testing.T) { ftc := deploymentFTC.DeepCopy() ftc.SetAnnotations(map[string]string{"predicate": "true", "generator": "true"}) - handler := &countingResourceEventHandler{} + handler := newCountingResourceEventHandler() generator := newAnnotationBasedGenerator(handler) defaultFTCs := []*fedcorev1a1.FederatedTypeConfig{ftc} @@ -373,8 +377,8 @@ func TestInformerManager(t *testing.T) { // 2. Verify that handler is registered initially. - handler.ExpectGenerateEvents(1) - handler.ExpectAddEvents(1) + handler.ExpectGenerateEvents(ftc.Name, 1) + handler.ExpectAddEvents(deploymentGVK, 1) handler.AssertEventually(g, time.Second*2) // 3. Update FTC to trigger unregistration @@ -411,7 +415,7 @@ func TestInformerManager(t *testing.T) { dp1 := getTestDeployment("dp-1", "default") ftc := deploymentFTC.DeepCopy() - handler := &countingResourceEventHandler{} + handler := newCountingResourceEventHandler() generator := &EventHandlerGenerator{ Predicate: alwaysRegisterPredicate, Generator: handler.GenerateEventHandler, @@ -425,8 +429,8 @@ func TestInformerManager(t *testing.T) { // 2. Verify that handler is registered initially - handler.ExpectGenerateEvents(1) - handler.ExpectAddEvents(1) + handler.ExpectGenerateEvents(ftc.Name, 1) + handler.ExpectAddEvents(deploymentGVK, 1) handler.AssertEventually(g, time.Second*2) // 3. Trigger FTC updates and verify re-registration @@ -435,15 +439,15 @@ func TestInformerManager(t *testing.T) { _, err := fedClient.CoreV1alpha1().FederatedTypeConfigs().Update(ctx, ftc, metav1.UpdateOptions{}) g.Expect(err).ToNot(gomega.HaveOccurred()) - handler.ExpectGenerateEvents(1) - handler.ExpectAddEvents(1) + handler.ExpectGenerateEvents(ftc.Name, 1) + handler.ExpectAddEvents(deploymentGVK, 1) handler.AssertEventually(g, time.Second*2) dp1.SetAnnotations(map[string]string{"test": "test"}) _, err = dynamicClient.Resource(common.DeploymentGVR).Namespace("default").Update(ctx, dp1, metav1.UpdateOptions{}) g.Expect(err).ToNot(gomega.HaveOccurred()) - handler.ExpectUpdateEvents(1) + handler.ExpectUpdateEvents(deploymentGVK, 1) handler.AssertEventually(g, time.Second*2) }) @@ -456,7 +460,7 @@ func TestInformerManager(t *testing.T) { dp1 := getTestDeployment("dp-1", "default") ftc := deploymentFTC.DeepCopy() - handler := &countingResourceEventHandler{} + handler := newCountingResourceEventHandler() generator := &EventHandlerGenerator{ Predicate: registerOncePredicate, Generator: handler.GenerateEventHandler, @@ -470,8 +474,8 @@ func TestInformerManager(t *testing.T) { // 2. Verify that handler is registered initially - handler.ExpectGenerateEvents(1) - handler.ExpectAddEvents(1) + handler.ExpectGenerateEvents(ftc.Name, 1) + handler.ExpectAddEvents(deploymentGVK, 1) handler.AssertEventually(g, time.Second*2) // 3. Trigger FTC updates and verify no re-registration @@ -488,7 +492,7 @@ func TestInformerManager(t *testing.T) { _, err = dynamicClient.Resource(common.DeploymentGVR).Namespace("default").Update(ctx, dp1, metav1.UpdateOptions{}) g.Expect(err).ToNot(gomega.HaveOccurred()) - handler.ExpectUpdateEvents(1) + handler.ExpectUpdateEvents(deploymentGVK, 1) handler.AssertEventually(g, time.Second*2) }) @@ -502,8 +506,8 @@ func TestInformerManager(t *testing.T) { cm1 := getTestConfigMap("cm-1", "default") sc1 := getTestSecret("sc-1", "default") - handler1 := &countingResourceEventHandler{} - handler2 := &countingResourceEventHandler{} + handler1 := newCountingResourceEventHandler() + handler2 := newCountingResourceEventHandler() generator1 := &EventHandlerGenerator{ Predicate: registerOncePredicate, Generator: handler1.GenerateEventHandler, @@ -521,12 +525,20 @@ func TestInformerManager(t *testing.T) { // 2. Verify that handler1 and handler2 is registered initially for all FTCs - handler1.ExpectGenerateEvents(3) - handler1.ExpectAddEvents(3) + handler1.ExpectGenerateEvents(deploymentFTC.Name, 1) + handler1.ExpectGenerateEvents(configmapFTC.Name, 1) + handler1.ExpectGenerateEvents(secretFTC.Name, 1) + handler1.ExpectAddEvents(deploymentGVK, 1) + handler1.ExpectAddEvents(configmapGVK, 1) + handler1.ExpectAddEvents(secretGVK, 1) handler1.AssertEventually(g, time.Second*2) - handler2.ExpectGenerateEvents(3) - handler2.ExpectAddEvents(3) + handler2.ExpectGenerateEvents(deploymentFTC.Name, 1) + handler2.ExpectGenerateEvents(configmapFTC.Name, 1) + handler2.ExpectGenerateEvents(secretFTC.Name, 1) + handler2.ExpectAddEvents(deploymentGVK, 1) + handler2.ExpectAddEvents(configmapGVK, 1) + handler2.ExpectAddEvents(secretGVK, 1) handler2.AssertEventually(g, time.Second*2) // 3. Delete the deployment FTC @@ -559,14 +571,14 @@ func TestInformerManager(t *testing.T) { Namespace("default"). Create(ctx, getTestSecret("sc-2", "default"), metav1.CreateOptions{}) g.Expect(err).ToNot(gomega.HaveOccurred()) - handler1.ExpectAddEvents(1) - handler2.ExpectAddEvents(1) + handler1.ExpectAddEvents(secretGVK, 1) + handler2.ExpectAddEvents(secretGVK, 1) cm1.SetAnnotations(map[string]string{"test": "test"}) _, err = dynamicClient.Resource(common.ConfigMapGVR).Namespace("default").Update(ctx, cm1, metav1.UpdateOptions{}) g.Expect(err).ToNot(gomega.HaveOccurred()) - handler1.ExpectUpdateEvents(1) - handler2.ExpectUpdateEvents(1) + handler1.ExpectUpdateEvents(configmapGVK, 1) + handler2.ExpectUpdateEvents(configmapGVK, 1) handler1.AssertEventually(g, time.Second*2) handler2.AssertEventually(g, time.Second*2) @@ -582,8 +594,8 @@ func TestInformerManager(t *testing.T) { cm1 := getTestConfigMap("cm-1", "default") sc1 := getTestSecret("sc-1", "default") - handler1 := &countingResourceEventHandler{} - handler2 := &countingResourceEventHandler{} + handler1 := newCountingResourceEventHandler() + handler2 := newCountingResourceEventHandler() generator1 := &EventHandlerGenerator{ Predicate: registerOncePredicate, Generator: handler1.GenerateEventHandler, @@ -602,12 +614,20 @@ func TestInformerManager(t *testing.T) { // 2. Verify that handler1 and handler2 is registered initially for all FTCs - handler1.ExpectGenerateEvents(3) - handler1.ExpectAddEvents(3) + handler1.ExpectGenerateEvents(deploymentFTC.Name, 1) + handler1.ExpectGenerateEvents(configmapFTC.Name, 1) + handler1.ExpectGenerateEvents(secretFTC.Name, 1) + handler1.ExpectAddEvents(deploymentGVK, 1) + handler1.ExpectAddEvents(configmapGVK, 1) + handler1.ExpectAddEvents(secretGVK, 1) handler1.AssertEventually(g, time.Second*2) - handler2.ExpectGenerateEvents(3) - handler2.ExpectAddEvents(3) + handler2.ExpectGenerateEvents(deploymentFTC.Name, 1) + handler2.ExpectGenerateEvents(configmapFTC.Name, 1) + handler2.ExpectGenerateEvents(secretFTC.Name, 1) + handler2.ExpectAddEvents(deploymentGVK, 1) + handler2.ExpectAddEvents(configmapGVK, 1) + handler2.ExpectAddEvents(secretGVK, 1) handler2.AssertEventually(g, time.Second*2) // 3. Shutdown the manager diff --git a/pkg/util/informermanager/testutils_test.go b/pkg/util/informermanager/testutils_test.go index c71897ba..d9099f4a 100644 --- a/pkg/util/informermanager/testutils_test.go +++ b/pkg/util/informermanager/testutils_test.go @@ -1,6 +1,9 @@ package informermanager import ( + "fmt" + "path" + goruntime "runtime" "sync" "time" @@ -10,6 +13,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/util/rand" "k8s.io/client-go/tools/cache" @@ -76,6 +80,13 @@ var ( } ) +var ( + deploymentGVK = appsv1.SchemeGroupVersion.WithKind("Deployment") + daemonsetGVK = appsv1.SchemeGroupVersion.WithKind("DaemonSet") + configmapGVK = corev1.SchemeGroupVersion.WithKind("ConfigMap") + secretGVK = corev1.SchemeGroupVersion.WithKind("Secret") +) + func getTestDeployment(name, namespace string) *unstructured.Unstructured { dp := &appsv1.Deployment{ TypeMeta: metav1.TypeMeta{ @@ -184,85 +195,144 @@ func getTestCluster(name string) *fedcorev1a1.FederatedCluster { } } +func newCountingResourceEventHandler() *countingResourceEventHandler { + return &countingResourceEventHandler{ + lock: sync.RWMutex{}, + generateCount: map[string]int{}, + addEventCount: map[schema.GroupVersionKind]int{}, + updateEventCount: map[schema.GroupVersionKind]int{}, + deleteEventCount: map[schema.GroupVersionKind]int{}, + expectedGenerateCount: map[string]int{}, + expectedAddEventCount: map[schema.GroupVersionKind]int{}, + expectedUpdateEventCount: map[schema.GroupVersionKind]int{}, + expectedDeleteEventCount: map[schema.GroupVersionKind]int{}, + } +} + type countingResourceEventHandler struct { lock sync.RWMutex - generateCount int - addEventCount int - updateEventCount int - deleteEventCount int + generateCount map[string]int + addEventCount map[schema.GroupVersionKind]int + updateEventCount map[schema.GroupVersionKind]int + deleteEventCount map[schema.GroupVersionKind]int - expectedGenerateCount int - expectedAddEventCount int - expectedUpdateEventCount int - expectedDeleteEventCount int + expectedGenerateCount map[string]int + expectedAddEventCount map[schema.GroupVersionKind]int + expectedUpdateEventCount map[schema.GroupVersionKind]int + expectedDeleteEventCount map[schema.GroupVersionKind]int } -func (h *countingResourceEventHandler) ExpectGenerateEvents(i int) { +func (h *countingResourceEventHandler) ExpectGenerateEvents(ftcName string, n int) { h.lock.Lock() defer h.lock.Unlock() - h.expectedGenerateCount += i + h.expectedGenerateCount[ftcName] = h.expectedGenerateCount[ftcName] + n } -func (h *countingResourceEventHandler) ExpectAddEvents(i int) { +func (h *countingResourceEventHandler) ExpectAddEvents(gvk schema.GroupVersionKind, n int) { h.lock.Lock() defer h.lock.Unlock() - h.expectedAddEventCount += i + h.expectedAddEventCount[gvk] = h.expectedAddEventCount[gvk] + n } -func (h *countingResourceEventHandler) ExpectUpdateEvents(i int) { +func (h *countingResourceEventHandler) ExpectUpdateEvents(gvk schema.GroupVersionKind, n int) { h.lock.Lock() defer h.lock.Unlock() - h.expectedUpdateEventCount += i + h.expectedUpdateEventCount[gvk] = h.expectedUpdateEventCount[gvk] + n } -func (h *countingResourceEventHandler) ExpectDeleteEvents(i int) { +func (h *countingResourceEventHandler) ExpectDeleteEvents(gvk schema.GroupVersionKind, n int) { h.lock.Lock() defer h.lock.Unlock() - h.expectedDeleteEventCount += i + h.expectedDeleteEventCount[gvk] = h.expectedDeleteEventCount[gvk] + n } func (h *countingResourceEventHandler) AssertEventually(g gomega.Gomega, timeout time.Duration) { + _, file, no, _ := goruntime.Caller(1) + callerInfo := fmt.Sprintf("%s:%d", path.Base(file), no) + g.Eventually(func(g gomega.Gomega) { - g.Expect(h.generateCount).To(gomega.BeNumerically("==", h.expectedGenerateCount)) - g.Expect(h.addEventCount).To(gomega.BeNumerically("==", h.expectedAddEventCount)) - g.Expect(h.updateEventCount).To(gomega.BeNumerically("==", h.expectedUpdateEventCount)) - g.Expect(h.deleteEventCount).To(gomega.BeNumerically("==", h.expectedDeleteEventCount)) + for ftc := range h.expectedGenerateCount { + g.Expect(h.generateCount[ftc]). + To(gomega.BeNumerically("==", h.expectedGenerateCount[ftc]), "%s: incorrect number of generate events for %s", callerInfo, ftc) + } + for gvk := range h.expectedAddEventCount { + g.Expect(h.addEventCount[gvk]). + To(gomega.BeNumerically("==", h.expectedAddEventCount[gvk]), "%s: incorrect number of add events for %s", callerInfo, gvk) + } + for gvk := range h.expectedUpdateEventCount { + g.Expect(h.updateEventCount[gvk]). + To(gomega.BeNumerically("==", h.expectedUpdateEventCount[gvk]), "%s: incorrect number of update events for %s", callerInfo, gvk) + } + for gvk := range h.expectedDeleteEventCount { + g.Expect(h.deleteEventCount[gvk]). + To(gomega.BeNumerically("==", h.expectedDeleteEventCount[gvk]), "%s: incorrect number of delete events for %s", callerInfo, gvk) + } }).WithTimeout(timeout).Should(gomega.Succeed()) } func (h *countingResourceEventHandler) AssertConsistently(g gomega.Gomega, timeout time.Duration) { + _, file, no, _ := goruntime.Caller(1) + callerInfo := fmt.Sprintf("%s:%d", file, no) + g.Consistently(func(g gomega.Gomega) { - g.Expect(h.generateCount).To(gomega.BeNumerically("==", h.expectedGenerateCount)) - g.Expect(h.addEventCount).To(gomega.BeNumerically("==", h.expectedAddEventCount)) - g.Expect(h.updateEventCount).To(gomega.BeNumerically("==", h.expectedUpdateEventCount)) - g.Expect(h.deleteEventCount).To(gomega.BeNumerically("==", h.expectedDeleteEventCount)) + for ftc := range h.expectedGenerateCount { + g.Expect(h.generateCount[ftc]). + To(gomega.BeNumerically("==", h.expectedGenerateCount[ftc]), "%s: incorrect number of generate events for %s", callerInfo, ftc) + } + for gvk := range h.expectedAddEventCount { + g.Expect(h.addEventCount[gvk]). + To(gomega.BeNumerically("==", h.expectedAddEventCount[gvk]), "%s: incorrect number of add events for %s", callerInfo, gvk) + } + for gvk := range h.expectedUpdateEventCount { + g.Expect(h.updateEventCount[gvk]). + To(gomega.BeNumerically("==", h.expectedUpdateEventCount[gvk]), "%s: incorrect number of update events for %s", callerInfo, gvk) + } + for gvk := range h.expectedDeleteEventCount { + g.Expect(h.deleteEventCount[gvk]). + To(gomega.BeNumerically("==", h.expectedDeleteEventCount[gvk]), "%s: incorrect number of delete events for %s", callerInfo, gvk) + } }).WithTimeout(timeout).Should(gomega.Succeed()) } -func (h *countingResourceEventHandler) GenerateEventHandler(_ *fedcorev1a1.FederatedTypeConfig) cache.ResourceEventHandler { +func (h *countingResourceEventHandler) GenerateEventHandler(ftc *fedcorev1a1.FederatedTypeConfig) cache.ResourceEventHandler { h.lock.Lock() defer h.lock.Unlock() - h.generateCount++ + h.generateCount[ftc.Name] = h.generateCount[ftc.Name] + 1 return h } -func (h *countingResourceEventHandler) OnAdd(_ interface{}) { +func (h *countingResourceEventHandler) OnAdd(obj interface{}) { h.lock.Lock() defer h.lock.Unlock() - h.addEventCount++ + + gvk := h.mustParseObject(obj) + h.addEventCount[gvk] = h.addEventCount[gvk] + 1 } -func (h *countingResourceEventHandler) OnDelete(_ interface{}) { +func (h *countingResourceEventHandler) OnDelete(obj interface{}) { h.lock.Lock() defer h.lock.Unlock() - h.deleteEventCount++ + + gvk := h.mustParseObject(obj) + h.deleteEventCount[gvk] = h.deleteEventCount[gvk] + 1 } -func (h *countingResourceEventHandler) OnUpdate(_ interface{}, _ interface{}) { +func (h *countingResourceEventHandler) OnUpdate(_ interface{}, obj interface{}) { h.lock.Lock() defer h.lock.Unlock() - h.updateEventCount++ + + gvk := h.mustParseObject(obj) + h.updateEventCount[gvk] = h.updateEventCount[gvk] + 1 +} + +func (h *countingResourceEventHandler) mustParseObject(obj interface{}) schema.GroupVersionKind { + uns := obj.(*unstructured.Unstructured) + gv, err := schema.ParseGroupVersion(uns.GetAPIVersion()) + if err != nil { + panic(fmt.Errorf("failed to parse GroupVersion from unstructured: %w", err)) + } + return gv.WithKind(uns.GetKind()) } var _ cache.ResourceEventHandler = &countingResourceEventHandler{} From 3e4945dee53e0b5e71bbcfd70610b743973cb416 Mon Sep 17 00:00:00 2001 From: "hawjia.lim" Date: Wed, 12 Jul 2023 03:50:41 +0800 Subject: [PATCH 021/173] finish federatedinformermanager tests --- .../federatedinformermanager.go | 10 +- .../federatedinformermanager_test.go | 1262 ++++++++++++++--- .../informermanager/informermanager_test.go | 62 +- 3 files changed, 1140 insertions(+), 194 deletions(-) diff --git a/pkg/util/informermanager/federatedinformermanager.go b/pkg/util/informermanager/federatedinformermanager.go index 740d1342..9b0336db 100644 --- a/pkg/util/informermanager/federatedinformermanager.go +++ b/pkg/util/informermanager/federatedinformermanager.go @@ -31,7 +31,7 @@ type federatedInformerManager struct { clusterInformer fedcorev1a1informers.FederatedClusterInformer eventHandlerGenerators []*EventHandlerGenerator - clusterEventHandler []*ClusterEventHandler + clusterEventHandlers []*ClusterEventHandler clients map[string]dynamic.Interface connectionMap map[string][]byte @@ -53,7 +53,7 @@ func NewFederatedInformerManager( ftcInformer: ftcInformer, clusterInformer: clusterInformer, eventHandlerGenerators: []*EventHandlerGenerator{}, - clusterEventHandler: []*ClusterEventHandler{}, + clusterEventHandlers: []*ClusterEventHandler{}, clients: map[string]dynamic.Interface{}, connectionMap: map[string][]byte{}, informerManagers: map[string]InformerManager{}, @@ -73,6 +73,8 @@ func NewFederatedInformerManager( }, }) + ftcInformer.Informer() + return manager } @@ -210,7 +212,7 @@ func (m *federatedInformerManager) AddClusterEventHandler(handler *ClusterEventH return fmt.Errorf("FederatedInformerManager is already started.") } - m.clusterEventHandler = append(m.clusterEventHandler, handler) + m.clusterEventHandlers = append(m.clusterEventHandlers, handler) return nil } @@ -280,7 +282,7 @@ func (m *federatedInformerManager) Start(ctx context.Context) { return } - for _, handler := range m.clusterEventHandler { + for _, handler := range m.clusterEventHandlers { predicate := handler.Predicate callback := handler.Callback diff --git a/pkg/util/informermanager/federatedinformermanager_test.go b/pkg/util/informermanager/federatedinformermanager_test.go index c0c6b0c6..bf1b1ff8 100644 --- a/pkg/util/informermanager/federatedinformermanager_test.go +++ b/pkg/util/informermanager/federatedinformermanager_test.go @@ -24,307 +24,1196 @@ import ( ) func TestFederatedInformerManager(t *testing.T) { - _ = gomega.NewWithT(t) - t.Run("clients for existing clusters should be available eventually", func(t *testing.T) { + t.Parallel() + + g := gomega.NewWithT(t) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + // 1. Bootstrap environment + + defaultFTCs := []*fedcorev1a1.FederatedTypeConfig{} + defaultObjs := map[string][]*unstructured.Unstructured{} + defaultClusters := []*fedcorev1a1.FederatedCluster{ + getTestCluster("cluster-1"), + getTestCluster("cluster-2"), + getTestCluster("cluster-3"), + } + generators := []*EventHandlerGenerator{} + clusterHandlers := []*ClusterEventHandler{} + manager, _ := boostrapFederatedInformerManagerWithFakeClients(g, ctx, defaultFTCs, defaultObjs, defaultClusters, generators, clusterHandlers) + + // 2. Verify that the clients for each cluster is eventually available + + for _, cluster := range defaultClusters { + g.Eventually(func(g gomega.Gomega) { + client, exists := manager.GetClusterClient(cluster.Name) + g.Expect(exists).To(gomega.BeTrue()) + g.Expect(client).ToNot(gomega.BeNil()) + }).WithTimeout(time.Second * 2).Should(gomega.Succeed()) + } + + // 3. Verify that the client for a non-existent cluster is not available + client, exists := manager.GetClusterClient("cluster-4") + g.Expect(exists).To(gomega.BeFalse()) + g.Expect(client).To(gomega.BeNil()) }) t.Run("clients for new clusters should be available eventually", func(t *testing.T) { + t.Parallel() + + g := gomega.NewWithT(t) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + // 1. Bootstrap environment + + defaultFTCs := []*fedcorev1a1.FederatedTypeConfig{} + defaultObjs := map[string][]*unstructured.Unstructured{} + defaultClusters := []*fedcorev1a1.FederatedCluster{} + generators := []*EventHandlerGenerator{} + clusterHandlers := []*ClusterEventHandler{} + manager, fedClient := boostrapFederatedInformerManagerWithFakeClients(g, ctx, defaultFTCs, defaultObjs, defaultClusters, generators, clusterHandlers) + + // 2. Verify that client for cluster-1 does is not available initially. + + g.Consistently(func(g gomega.Gomega) { + client, exists := manager.GetClusterClient("cluster-1") + g.Expect(exists).To(gomega.BeFalse()) + g.Expect(client).To(gomega.BeNil()) + }).WithTimeout(time.Second * 2).Should(gomega.Succeed()) + + // 3. Create a new cluster + + cluster, err := fedClient.CoreV1alpha1().FederatedClusters().Create(ctx, getTestCluster("cluster-1"), metav1.CreateOptions{}) + g.Expect(err).ToNot(gomega.HaveOccurred()) + // 4. Verify that client for new cluster is eventually available + + g.Eventually(func(g gomega.Gomega) { + client, exists := manager.GetClusterClient(cluster.Name) + g.Expect(exists).To(gomega.BeTrue()) + g.Expect(client).ToNot(gomega.BeNil()) + }).WithTimeout(time.Second * 2).Should(gomega.Succeed()) }) t.Run("listers for existing FTCs and clusters should be available eventually", func(t *testing.T) { + t.Parallel() + + g := gomega.NewWithT(t) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + // 1. Bootstrap environment + + defaultFTCs := []*fedcorev1a1.FederatedTypeConfig{deploymentFTC, configmapFTC, secretFTC} + defaultObjs := map[string][]*unstructured.Unstructured{} + defaultClusters := []*fedcorev1a1.FederatedCluster{ + getTestCluster("cluster-1"), + getTestCluster("cluster-2"), + getTestCluster("cluster-3"), + } + generators := []*EventHandlerGenerator{} + clusterHandlers := []*ClusterEventHandler{} + manager, _ := boostrapFederatedInformerManagerWithFakeClients(g, ctx, defaultFTCs, defaultObjs, defaultClusters, generators, clusterHandlers) + + // 2. Verify that listers for existing FTCs and clusters are eventually avaiable + + for _, ftc := range defaultFTCs { + apiresource := ftc.GetSourceType() + gvr := schemautil.APIResourceToGVR(&apiresource) + + for _, cluster := range defaultClusters { + g.Eventually(func(g gomega.Gomega) { + lister, informerSynced, exists := manager.GetResourceLister(gvr, cluster.Name) + + g.Expect(exists).To(gomega.BeTrue()) + g.Expect(lister).ToNot(gomega.BeNil()) + g.Expect(informerSynced()).To(gomega.BeTrue()) + }) + } + } + + // 3. Verify that the lister for non-existent FTCs or clusters are not available + + lister, informerSynced, exists := manager.GetResourceLister(common.DaemonSetGVR, "cluster-1") + g.Expect(exists).To(gomega.BeFalse()) + g.Expect(lister).To(gomega.BeNil()) + g.Expect(informerSynced).To(gomega.BeNil()) + lister, informerSynced, exists = manager.GetResourceLister(common.DeploymentGVR, "cluster-4") + g.Expect(exists).To(gomega.BeFalse()) + g.Expect(lister).To(gomega.BeNil()) + g.Expect(informerSynced).To(gomega.BeNil()) + + lister, informerSynced, exists = manager.GetResourceLister(common.DaemonSetGVR, "cluster-4") + g.Expect(exists).To(gomega.BeFalse()) + g.Expect(lister).To(gomega.BeNil()) + g.Expect(informerSynced).To(gomega.BeNil()) }) t.Run("listers for new FTCs should be available eventually", func(t *testing.T) { + t.Parallel() + + g := gomega.NewWithT(t) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + // 1. Bootstrap environment + + defaultFTCs := []*fedcorev1a1.FederatedTypeConfig{} + defaultObjs := map[string][]*unstructured.Unstructured{} + defaultClusters := []*fedcorev1a1.FederatedCluster{ + getTestCluster("cluster-1"), + getTestCluster("cluster-2"), + getTestCluster("cluster-3"), + } + generators := []*EventHandlerGenerator{} + clusterHandlers := []*ClusterEventHandler{} + manager, fedClient := boostrapFederatedInformerManagerWithFakeClients(g, ctx, defaultFTCs, defaultObjs, defaultClusters, generators, clusterHandlers) + + ftc := daemonsetFTC + apiresource := ftc.GetSourceType() + gvr := schemautil.APIResourceToGVR(&apiresource) + + // 2. Verify that listers for daemonsets FTCs is not available at the start + + g.Consistently(func(g gomega.Gomega) { + for _, cluster := range defaultClusters { + lister, informerSynced, exists := manager.GetResourceLister(common.DeploymentGVR, cluster.Name) + g.Expect(exists).To(gomega.BeFalse()) + g.Expect(lister).To(gomega.BeNil()) + g.Expect(informerSynced).To(gomega.BeNil()) + } + }).WithTimeout(time.Second * 2).Should(gomega.Succeed()) + + // 3. Create the daemonset FTC + + _, err := fedClient.CoreV1alpha1().FederatedTypeConfigs().Create(ctx, ftc, metav1.CreateOptions{}) + g.Expect(err).ToNot(gomega.HaveOccurred()) + // 4. Verify that the lister for daemonsets is eventually available + + g.Eventually(func(g gomega.Gomega) { + for _, cluster := range defaultClusters { + lister, informerSynced, exists := manager.GetResourceLister(gvr, cluster.Name) + g.Expect(exists).To(gomega.BeTrue()) + g.Expect(lister).ToNot(gomega.BeNil()) + g.Expect(informerSynced()).To(gomega.BeTrue()) + } + }).WithTimeout(time.Second * 2).Should(gomega.Succeed()) }) - t.Run("listers for new clusteres should be available eventually", func(t *testing.T) { + t.Run("listers for new clusters should be available eventually", func(t *testing.T) { + t.Parallel() + + g := gomega.NewWithT(t) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + // 1. Bootstrap environment + + defaultFTCs := []*fedcorev1a1.FederatedTypeConfig{deploymentFTC, configmapFTC, secretFTC} + defaultObjs := map[string][]*unstructured.Unstructured{} + defaultClusters := []*fedcorev1a1.FederatedCluster{} + generators := []*EventHandlerGenerator{} + clusterHandlers := []*ClusterEventHandler{} + manager, fedClient := boostrapFederatedInformerManagerWithFakeClients(g, ctx, defaultFTCs, defaultObjs, defaultClusters, generators, clusterHandlers) + + cluster := getTestCluster("cluster-1") + + // 2. Verify that listers for cluster-1 is not available at the start + g.Consistently(func(g gomega.Gomega) { + for _, ftc := range defaultFTCs { + apiresource := ftc.GetSourceType() + gvr := schemautil.APIResourceToGVR(&apiresource) + + lister, informerSynced, exists := manager.GetResourceLister(gvr, cluster.Name) + g.Expect(exists).To(gomega.BeFalse()) + g.Expect(lister).To(gomega.BeNil()) + g.Expect(informerSynced).To(gomega.BeNil()) + } + }).WithTimeout(time.Second * 2).Should(gomega.Succeed()) + + // 3. Create cluster-1 + + _, err := fedClient.CoreV1alpha1().FederatedClusters().Create(ctx, cluster, metav1.CreateOptions{}) + g.Expect(err).ToNot(gomega.HaveOccurred()) + + // 4. Verify that the lister for cluster-1 is eventually available + + g.Eventually(func(g gomega.Gomega) { + for _, ftc := range defaultFTCs { + apiresource := ftc.GetSourceType() + gvr := schemautil.APIResourceToGVR(&apiresource) + + lister, informerSynced, exists := manager.GetResourceLister(gvr, cluster.Name) + g.Expect(exists).To(gomega.BeTrue()) + g.Expect(lister).ToNot(gomega.BeNil()) + g.Expect(informerSynced()).To(gomega.BeTrue()) + } + }).WithTimeout(time.Second * 2).Should(gomega.Succeed()) }) - t.Run("event handlers for existing FTCs should be registed eventually", func(t *testing.T) { + t.Run("event handlers for existing FTCs and clusters should be registed eventually", func(t *testing.T) { + t.Parallel() + + g := gomega.NewWithT(t) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + // 1. Bootstrap environment + + dp1 := getTestDeployment("dp-1", "default") + cm1 := getTestConfigMap("cm-1", "default") + sc1 := getTestSecret("sc-1", "default") + + alwaysRegistered := newCountingResourceEventHandler() + neverRegistered := newCountingResourceEventHandler() + + defaultFTCs := []*fedcorev1a1.FederatedTypeConfig{deploymentFTC, configmapFTC, secretFTC} + defaultObjs := map[string][]*unstructured.Unstructured{ + "cluster-1": {dp1, cm1, sc1}, + "cluster-2": {dp1, cm1, sc1}, + "cluster-3": {dp1, cm1, sc1}, + } + defaultClusters := []*fedcorev1a1.FederatedCluster{ + getTestCluster("cluster-1"), + getTestCluster("cluster-2"), + getTestCluster("cluster-3"), + } + generators := []*EventHandlerGenerator{ + { + Predicate: alwaysRegisterPredicate, + Generator: alwaysRegistered.GenerateEventHandler, + }, + { + Predicate: neverRegisterPredicate, + Generator: neverRegistered.GenerateEventHandler, + }, + } + clusterHandlers := []*ClusterEventHandler{} + manager, _ := boostrapFederatedInformerManagerWithFakeClients(g, ctx, defaultFTCs, defaultObjs, defaultClusters, generators, clusterHandlers) + + // 2. Verify alwaysRegistered is eventually registered for all existing FTCs and clusters. + + for range defaultClusters { + alwaysRegistered.ExpectGenerateEvents(deploymentFTC.Name, 1) + alwaysRegistered.ExpectGenerateEvents(configmapFTC.Name, 1) + alwaysRegistered.ExpectGenerateEvents(secretFTC.Name, 1) + alwaysRegistered.ExpectAddEvents(deploymentGVK, 1) + alwaysRegistered.ExpectAddEvents(configmapGVK, 1) + alwaysRegistered.ExpectAddEvents(secretGVK, 1) + } + + alwaysRegistered.AssertEventually(g, time.Second*2) + + // 4. Verify that newly generated events are received by alwaysRegistered. + + dp1.SetAnnotations(map[string]string{"test": "test"}) + for _, cluster := range defaultClusters { + dynamicClient, exists := manager.GetClusterClient(cluster.Name) + g.Expect(exists).To(gomega.BeTrue()) + + _, err := dynamicClient.Resource(common.SecretGVR). + Namespace("default"). + Create(ctx, getTestSecret("sc-2", "default"), metav1.CreateOptions{}) + g.Expect(err).ToNot(gomega.HaveOccurred()) + alwaysRegistered.ExpectAddEvents(secretGVK, 1) + + _, err = dynamicClient.Resource(common.DeploymentGVR).Namespace("default").Update(ctx, dp1, metav1.UpdateOptions{}) + g.Expect(err).ToNot(gomega.HaveOccurred()) + alwaysRegistered.ExpectUpdateEvents(deploymentGVK, 1) + + err = dynamicClient.Resource(common.ConfigMapGVR).Namespace("default").Delete(ctx, cm1.GetName(), metav1.DeleteOptions{}) + g.Expect(err).ToNot(gomega.HaveOccurred()) + alwaysRegistered.ExpectDeleteEvents(configmapGVK, 1) + } + + alwaysRegistered.AssertEventually(g, time.Second*1) + + // 5. Verify that events for non-existent FTCs are not received + + for _, cluster := range defaultClusters { + dynamicClient, exists := manager.GetClusterClient(cluster.Name) + g.Expect(exists).To(gomega.BeTrue()) + _, err := dynamicClient.Resource(common.DaemonSetGVR). + Namespace("default"). + Create(ctx, getTestDaemonSet("dm-1", "default"), metav1.CreateOptions{}) + g.Expect(err).ToNot(gomega.HaveOccurred()) + } + + alwaysRegistered.AssertConsistently(g, time.Second*2) + // 5. Verify neverRegsitered receives no events + + neverRegistered.AssertConsistently(g, time.Second*2) }) t.Run("event handlers for new FTCs should be registered eventually", func(t *testing.T) { + t.Parallel() + + g := gomega.NewWithT(t) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + // 1. Bootstrap environment + + dm1 := getTestDaemonSet("dm-1", "default") + dm2 := getTestDaemonSet("dm-2", "default") + dm3 := getTestDaemonSet("dm-3", "default") + dm4 := getTestDaemonSet("dm-4", "default") + + alwaysRegistered := newCountingResourceEventHandler() + neverRegistered := newCountingResourceEventHandler() + + defaultFTCs := []*fedcorev1a1.FederatedTypeConfig{} + defaultObjs := map[string][]*unstructured.Unstructured{ + "cluster-1": {dm1, dm2, dm3, dm4}, + "cluster-2": {dm1, dm2, dm3, dm4}, + "cluster-3": {dm1, dm2, dm3, dm4}, + } + defaultClusters := []*fedcorev1a1.FederatedCluster{ + getTestCluster("cluster-1"), + getTestCluster("cluster-2"), + getTestCluster("cluster-3"), + } + generators := []*EventHandlerGenerator{ + { + Predicate: alwaysRegisterPredicate, + Generator: alwaysRegistered.GenerateEventHandler, + }, + { + Predicate: neverRegisterPredicate, + Generator: neverRegistered.GenerateEventHandler, + }, + } + clusterHandlers := []*ClusterEventHandler{} + manager, fedClient := boostrapFederatedInformerManagerWithFakeClients(g, ctx, defaultFTCs, defaultObjs, defaultClusters, generators, clusterHandlers) + + // 2. Verify that alwaysRegistered is not registered initially for daemonset + + alwaysRegistered.AssertConsistently(g, time.Second*2) + + // 3. Create new FTC for daemonset + + _, err := fedClient.CoreV1alpha1().FederatedTypeConfigs().Create(ctx, daemonsetFTC, metav1.CreateOptions{}) + g.Expect(err).ToNot(gomega.HaveOccurred()) + + // 4. Verify that alwaysRegistered is eventually registered for the new Daemonset FTC + + for range defaultClusters { + alwaysRegistered.ExpectGenerateEvents(daemonsetFTC.Name, 1) + alwaysRegistered.ExpectAddEvents(daemonsetGVK, 4) + } + + alwaysRegistered.AssertEventually(g, time.Second*2) + + // 5. Verify that newly generated events are also received by alwaysRegistered + dm1.SetAnnotations(map[string]string{"test": "test"}) + + for _, cluster := range defaultClusters { + dynamicClient, exists := manager.GetClusterClient(cluster.Name) + g.Expect(exists).To(gomega.BeTrue()) + + _, err = dynamicClient.Resource(common.DaemonSetGVR).Namespace("default").Update(ctx, dm1, metav1.UpdateOptions{}) + g.Expect(err).ToNot(gomega.HaveOccurred()) + alwaysRegistered.ExpectUpdateEvents(daemonsetGVK, 1) + + g.Expect(exists).To(gomega.BeTrue()) + err = dynamicClient.Resource(common.DaemonSetGVR).Namespace("default").Delete(ctx, dm4.GetName(), metav1.DeleteOptions{}) + g.Expect(err).ToNot(gomega.HaveOccurred()) + alwaysRegistered.ExpectDeleteEvents(daemonsetGVK, 1) + } + + alwaysRegistered.AssertEventually(g, time.Second*2) + + // 6. Verify that events for non-existent FTCs are not received by alwaysRegistered + + for _, cluster := range defaultClusters { + dynamicClient, exists := manager.GetClusterClient(cluster.Name) + g.Expect(exists).To(gomega.BeTrue()) + _, err = dynamicClient.Resource(common.SecretGVR). + Namespace("default"). + Create(ctx, getTestSecret("sc-1", "default"), metav1.CreateOptions{}) + g.Expect(err).ToNot(gomega.HaveOccurred()) + } + alwaysRegistered.AssertConsistently(g, time.Second*2) + + // 7. Verify that unregisteredResourceEventHandler is not registered + + neverRegistered.AssertConsistently(g, time.Second*2) }) t.Run("event handlers for new clusters should be registered eventually", func(t *testing.T) { + t.Parallel() + + g := gomega.NewWithT(t) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + // 1. Bootstrap environment + + dm1 := getTestDaemonSet("dm-1", "default") + dm2 := getTestDaemonSet("dm-2", "default") + dm3 := getTestDaemonSet("dm-3", "default") + dm4 := getTestDaemonSet("dm-4", "default") + + alwaysRegistered := newCountingResourceEventHandler() + neverRegistered := newCountingResourceEventHandler() + + defaultFTCs := []*fedcorev1a1.FederatedTypeConfig{daemonsetFTC} + defaultObjs := map[string][]*unstructured.Unstructured{ + "cluster-1": {dm1, dm2, dm3, dm4}, + "cluster-2": {dm1, dm2, dm3, dm4}, + "cluster-3": {dm1, dm2, dm3, dm4}, + } + defaultClusters := []*fedcorev1a1.FederatedCluster{} + generators := []*EventHandlerGenerator{ + { + Predicate: alwaysRegisterPredicate, + Generator: alwaysRegistered.GenerateEventHandler, + }, + { + Predicate: neverRegisterPredicate, + Generator: neverRegistered.GenerateEventHandler, + }, + } + clusterHandlers := []*ClusterEventHandler{} + manager, fedClient := boostrapFederatedInformerManagerWithFakeClients(g, ctx, defaultFTCs, defaultObjs, defaultClusters, generators, clusterHandlers) + + // 2. Verify that alwaysRegistered is not registered initially since there are no clusters + + alwaysRegistered.AssertConsistently(g, time.Second*2) + + // 3. Create new clusters + + _, err := fedClient.CoreV1alpha1().FederatedClusters().Create(ctx, getTestCluster("cluster-1"), metav1.CreateOptions{}) + g.Expect(err).ToNot(gomega.HaveOccurred()) + + _, err = fedClient.CoreV1alpha1().FederatedClusters().Create(ctx, getTestCluster("cluster-2"), metav1.CreateOptions{}) + g.Expect(err).ToNot(gomega.HaveOccurred()) + + _, err = fedClient.CoreV1alpha1().FederatedClusters().Create(ctx, getTestCluster("cluster-3"), metav1.CreateOptions{}) + g.Expect(err).ToNot(gomega.HaveOccurred()) + + // 4. Verify that alwaysRegistered is eventually registered for the new Daemonset FTC + + for range defaultClusters { + alwaysRegistered.ExpectGenerateEvents(daemonsetFTC.Name, 3) + alwaysRegistered.ExpectAddEvents(daemonsetGVK, 4) + } + + alwaysRegistered.AssertEventually(g, time.Second*2) + + // 5. Verify that newly generated events are also received by alwaysRegistered + + dm1.SetAnnotations(map[string]string{"test": "test"}) + + for _, cluster := range defaultClusters { + dynamicClient, exists := manager.GetClusterClient(cluster.Name) + g.Expect(exists).To(gomega.BeTrue()) + + _, err = dynamicClient.Resource(common.DaemonSetGVR).Namespace("default").Update(ctx, dm1, metav1.UpdateOptions{}) + g.Expect(err).ToNot(gomega.HaveOccurred()) + alwaysRegistered.ExpectUpdateEvents(daemonsetGVK, 1) + + g.Expect(exists).To(gomega.BeTrue()) + _, err = dynamicClient.Resource(common.DaemonSetGVR). + Namespace("default"). + Create(ctx, getTestDaemonSet("dm-5", "default"), metav1.CreateOptions{}) + g.Expect(err).ToNot(gomega.HaveOccurred()) + alwaysRegistered.ExpectAddEvents(daemonsetGVK, 1) + g.Expect(exists).To(gomega.BeTrue()) + err = dynamicClient.Resource(common.DaemonSetGVR).Namespace("default").Delete(ctx, dm4.GetName(), metav1.DeleteOptions{}) + g.Expect(err).ToNot(gomega.HaveOccurred()) + alwaysRegistered.ExpectDeleteEvents(daemonsetGVK, 1) + } + + alwaysRegistered.AssertEventually(g, time.Second*2) + + // 6. Verify that events for non-existent FTCs are not received by alwaysRegistered + + for _, cluster := range defaultClusters { + dynamicClient, exists := manager.GetClusterClient(cluster.Name) + g.Expect(exists).To(gomega.BeTrue()) + + _, err = dynamicClient.Resource(common.SecretGVR). + Namespace("default"). + Create(ctx, getTestSecret("sc-1", "default"), metav1.CreateOptions{}) + g.Expect(err).ToNot(gomega.HaveOccurred()) + } + + alwaysRegistered.AssertConsistently(g, time.Second*2) + + // 7. Verify that unregisteredResourceEventHandler is not registered + + neverRegistered.AssertConsistently(g, time.Second*2) }) t.Run("event handler should receive correct lastApplied and latest FTCs", func(t *testing.T) { + t.Parallel() + + g := gomega.NewWithT(t) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + // 1. Bootstrap environment + + var generation int64 = 1 + + // assertionCh is used to achieve 2 things: + // 1. It is used to pass assertions to the main goroutine. + // 2. It is used as an implicit lock to ensure FTC events are not squashed by the InformerManager. + assertionCh := make(chan func()) + + ftc := deploymentFTC.DeepCopy() + ftc.SetGeneration(generation) + + generator := &EventHandlerGenerator{ + Predicate: func(lastApplied, latest *fedcorev1a1.FederatedTypeConfig) bool { + if generation == 1 { + assertionCh <- func() { + g.Expect(lastApplied).To(gomega.BeNil()) + } + } else { + assertionCh <- func() { + g.Expect(lastApplied.GetGeneration()).To(gomega.BeNumerically("==", generation-1)) + g.Expect(latest.GetGeneration()).To(gomega.BeNumerically("==", generation)) + } + } + + return true + }, + Generator: func(ftc *fedcorev1a1.FederatedTypeConfig) cache.ResourceEventHandler { return nil }, + } + + defaultFTCs := []*fedcorev1a1.FederatedTypeConfig{ftc} + defaultObjs := map[string][]*unstructured.Unstructured{} + defaultClusters := []*fedcorev1a1.FederatedCluster{ + getTestCluster("cluster-1"), + getTestCluster("cluster-2"), + getTestCluster("cluster-3"), + } + generators := []*EventHandlerGenerator{generator} + clusterHandlers := []*ClusterEventHandler{} + _, fedClient := boostrapFederatedInformerManagerWithFakeClients(g, ctx, defaultFTCs, defaultObjs, defaultClusters, generators, clusterHandlers) + + for range defaultClusters { + fn := <-assertionCh + fn() + } + + // 3. Generate FTC update events + + for i := 0; i < 5; i++ { + generation++ + ftc.SetGeneration(generation) + + var err error + ftc, err = fedClient.CoreV1alpha1().FederatedTypeConfigs().Update(ctx, ftc, metav1.UpdateOptions{}) + g.Expect(err).ToNot(gomega.HaveOccurred()) + for range defaultClusters { + fn := <-assertionCh + fn() + } + } }) t.Run("event handler should be registered on FTC update", func(t *testing.T) { + t.Parallel() - }) + g := gomega.NewWithT(t) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() - t.Run("event handler should be unregistered on FTC update", func(t *testing.T) { + // 1. Bootstrap environment - }) + dp1 := getTestDeployment("dp-1", "default") - t.Run("event handler should be re-registered on FTC update", func(t *testing.T) { + ftc := deploymentFTC.DeepCopy() + ftc.SetAnnotations(map[string]string{"predicate": "false", "generator": "true"}) - }) + handler := newCountingResourceEventHandler() + generator := newAnnotationBasedGenerator(handler) - t.Run("event handler should remain unchanged on FTC update", func(t *testing.T) { + defaultFTCs := []*fedcorev1a1.FederatedTypeConfig{ftc} + defaultObjs := map[string][]*unstructured.Unstructured{ + "cluster-1": {dp1}, + "cluster-2": {dp1}, + "cluster-3": {dp1}, + } + defaultClusters := []*fedcorev1a1.FederatedCluster{ + getTestCluster("cluster-1"), + getTestCluster("cluster-2"), + getTestCluster("cluster-3"), + } + generators := []*EventHandlerGenerator{generator} + clusterHandlers := []*ClusterEventHandler{} + manager, fedClient := boostrapFederatedInformerManagerWithFakeClients(g, ctx, defaultFTCs, defaultObjs, defaultClusters, generators, clusterHandlers) - }) + // 2. Verify that handler is not registered initially. - t.Run("event handler should be unregistered on FTC deletion", func(t *testing.T) { + handler.AssertConsistently(g, time.Second*2) - }) + // 3. Update FTC to trigger registration + + ftc.SetAnnotations(map[string]string{"predicate": "true", "generator": "true"}) + ftc, err := fedClient.CoreV1alpha1().FederatedTypeConfigs().Update(ctx, ftc, metav1.UpdateOptions{}) + g.Expect(err).ToNot(gomega.HaveOccurred()) + + // 4. Verify that handler is registered and additional events are received + + handler.ExpectGenerateEvents(ftc.Name, len(defaultClusters)) + handler.ExpectAddEvents(deploymentGVK, len(defaultClusters)) + + handler.AssertEventually(g, time.Second*2) - t.Run("event handler should be unregistered on cluster deletion", func(t *testing.T) { + for _, cluster := range defaultClusters { + dynamicClient, exists := manager.GetClusterClient(cluster.Name) + g.Expect(exists).To(gomega.BeTrue()) + + dp2, err := dynamicClient.Resource(common.DeploymentGVR). + Namespace("default"). + Create(ctx, getTestDeployment("dp-2", "default"), metav1.CreateOptions{}) + g.Expect(err).ToNot(gomega.HaveOccurred()) + handler.ExpectAddEvents(deploymentGVK, 1) + + dp2.SetAnnotations(map[string]string{"test-annotation": "test-value"}) + dp2, err = dynamicClient.Resource(common.DeploymentGVR).Namespace("default").Update(ctx, dp2, metav1.UpdateOptions{}) + g.Expect(err).ToNot(gomega.HaveOccurred()) + handler.ExpectUpdateEvents(deploymentGVK, 1) + + err = dynamicClient.Resource(common.DeploymentGVR).Namespace("default").Delete(ctx, dp2.GetName(), metav1.DeleteOptions{}) + g.Expect(err).ToNot(gomega.HaveOccurred()) + handler.ExpectDeleteEvents(deploymentGVK, 1) + } + handler.AssertEventually(g, time.Second*2) }) -} -// Verifies that clients for existing clusters are eventually available after the FederatedInformerManager is started. -func TestFederatedInformerManagerClientAvailableForExistingClusters(t *testing.T) { - g := gomega.NewGomegaWithT(t) + t.Run("event handler should be unregistered on FTC update", func(t *testing.T) { + t.Parallel() - // 1. Bootstrap an environment with 3 clusters. + g := gomega.NewWithT(t) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() - cluster1 := getTestCluster("cluster-1") - cluster2 := getTestCluster("cluster-2") - cluster3 := getTestCluster("cluster-3") + // 1. Bootstrap environment - defaultClusters := []*fedcorev1a1.FederatedCluster{cluster1, cluster2, cluster3} - defaultFTCs := []*fedcorev1a1.FederatedTypeConfig{} - defaultObjects := map[string]*unstructured.Unstructured{} + dp1 := getTestDeployment("dp-1", "default") - manager, _, _ := boostrapFederatedInformerManagerWithFakeClients(defaultClusters, defaultFTCs, defaultObjects) + ftc := deploymentFTC.DeepCopy() + ftc.SetAnnotations(map[string]string{"predicate": "true", "generator": "true"}) - // 2. Start the manager + handler := newCountingResourceEventHandler() + generator := newAnnotationBasedGenerator(handler) - ctx := context.Background() - manager.Start(ctx) + defaultFTCs := []*fedcorev1a1.FederatedTypeConfig{ftc} + defaultObjs := map[string][]*unstructured.Unstructured{ + "cluster-1": {dp1}, + "cluster-2": {dp1}, + "cluster-3": {dp1}, + } + defaultClusters := []*fedcorev1a1.FederatedCluster{ + getTestCluster("cluster-1"), + getTestCluster("cluster-2"), + getTestCluster("cluster-3"), + } + generators := []*EventHandlerGenerator{generator} + clusterHandlers := []*ClusterEventHandler{} + manager, fedClient := boostrapFederatedInformerManagerWithFakeClients(g, ctx, defaultFTCs, defaultObjs, defaultClusters, generators, clusterHandlers) - ctxWithTimeout, cancel := context.WithTimeout(ctx, time.Second*3) - defer cancel() - if !cache.WaitForCacheSync(ctxWithTimeout.Done(), manager.HasSynced) { - g.Fail("Timed out waiting for FederatedInformerManager cache sync") - } + // 2. Verify that handler is registered initially. - // 3. Verify that clients for the clusters are eventually available + handler.ExpectGenerateEvents(ftc.Name, len(defaultClusters)) + handler.ExpectAddEvents(deploymentGVK, len(defaultClusters)) + handler.AssertEventually(g, time.Second*2) - for _, cluster := range defaultClusters { - g.Eventually(func(g gomega.Gomega) { - client, exists := manager.GetClusterClient(cluster.Name) + // 3. Update FTC to trigger unregistration + + ftc.SetAnnotations(map[string]string{"predicate": "true", "generator": "false"}) + ftc, err := fedClient.CoreV1alpha1().FederatedTypeConfigs().Update(ctx, ftc, metav1.UpdateOptions{}) + g.Expect(err).ToNot(gomega.HaveOccurred()) + + <-time.After(time.Second) + + // 4. Verify that handler is unregistered and new events are no longer received by handler. + + for _, cluster := range defaultClusters { + dynamicClient, exists := manager.GetClusterClient(cluster.Name) g.Expect(exists).To(gomega.BeTrue()) - g.Expect(client).ToNot(gomega.BeNil()) - }).WithTimeout(time.Second * 2).Should(gomega.Succeed()) - } - // 4. Sanity check: the client for a non-existent cluster should not be available + dp2, err := dynamicClient.Resource(common.DeploymentGVR). + Namespace("default"). + Create(ctx, getTestDeployment("dp-2", "default"), metav1.CreateOptions{}) + g.Expect(err).ToNot(gomega.HaveOccurred()) - client, exists := manager.GetClusterClient("cluster-4") - g.Expect(exists).To(gomega.BeFalse()) - g.Expect(client).To(gomega.BeNil()) -} + dp2.SetAnnotations(map[string]string{"test": "test"}) + _, err = dynamicClient.Resource(common.DeploymentGVR).Namespace("default").Update(ctx, dp2, metav1.UpdateOptions{}) + g.Expect(err).ToNot(gomega.HaveOccurred()) -// Verifies that clients for new clusters created after the FederatedInformerManager is started are eventually -// available. -func TestFederatedInformerManagerClientAvailableForNewCluster(t *testing.T) { - g := gomega.NewGomegaWithT(t) + err = dynamicClient.Resource(common.DeploymentGVR).Namespace("default").Delete(ctx, dp2.GetName(), metav1.DeleteOptions{}) + g.Expect(err).ToNot(gomega.HaveOccurred()) + } - // 1. Bootstrap an environment with no initial clusters. + handler.AssertConsistently(g, time.Second*2) + }) - defaultClusters := []*fedcorev1a1.FederatedCluster{} - defaultFTCs := []*fedcorev1a1.FederatedTypeConfig{} - defaultObjects := map[string]*unstructured.Unstructured{} + t.Run("event handler should be re-registered on FTC update", func(t *testing.T) { + t.Parallel() - manager, _, fedClient := boostrapFederatedInformerManagerWithFakeClients(defaultClusters, defaultFTCs, defaultObjects) + g := gomega.NewWithT(t) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() - // 2. Start the manager + // 1. Bootstrap environment - ctx := context.Background() - manager.Start(ctx) + dp1 := getTestDeployment("dp-1", "default") + ftc := deploymentFTC.DeepCopy() - ctxWithTimeout, cancel := context.WithTimeout(ctx, time.Second*3) - defer cancel() - if !cache.WaitForCacheSync(ctxWithTimeout.Done(), manager.HasSynced) { - g.Fail("Timed out waiting for FederatedInformerManager cache sync") - } + handler := newCountingResourceEventHandler() + generator := &EventHandlerGenerator{ + Predicate: alwaysRegisterPredicate, + Generator: handler.GenerateEventHandler, + } - // 3. Sanity check: the client for a non-existent cluster should not be available + defaultFTCs := []*fedcorev1a1.FederatedTypeConfig{ftc} + defaultObjs := map[string][]*unstructured.Unstructured{ + "cluster-1": {dp1}, + "cluster-2": {dp1}, + "cluster-3": {dp1}, + } + defaultClusters := []*fedcorev1a1.FederatedCluster{ + getTestCluster("cluster-1"), + getTestCluster("cluster-2"), + getTestCluster("cluster-3"), + } + generators := []*EventHandlerGenerator{generator} + clusterHandlers := []*ClusterEventHandler{} + manager, fedClient := boostrapFederatedInformerManagerWithFakeClients(g, ctx, defaultFTCs, defaultObjs, defaultClusters, generators, clusterHandlers) - client, exists := manager.GetClusterClient("cluster-1") - g.Expect(exists).To(gomega.BeFalse()) - g.Expect(client).To(gomega.BeNil()) + // 2. Verify that handler is registered initially - // 4. Create a new cluster + handler.ExpectGenerateEvents(ftc.Name, len(defaultClusters)) + handler.ExpectAddEvents(deploymentGVK, len(defaultClusters)) + handler.AssertEventually(g, time.Second*2) - cluster1 := getTestCluster("cluster-1") - fedClient.CoreV1alpha1().FederatedClusters().Create(ctx, cluster1, metav1.CreateOptions{}) + // 3. Trigger FTC updates and verify re-registration + ftc.SetAnnotations(map[string]string{"test": "test"}) + _, err := fedClient.CoreV1alpha1().FederatedTypeConfigs().Update(ctx, ftc, metav1.UpdateOptions{}) + g.Expect(err).ToNot(gomega.HaveOccurred()) - // 5. Verify that clients for the clusters are eventually available + handler.ExpectGenerateEvents(ftc.Name, len(defaultClusters)) + handler.ExpectAddEvents(deploymentGVK, len(defaultClusters)) + handler.AssertEventually(g, time.Second*2) - g.Eventually(func(g gomega.Gomega) { - client, exists := manager.GetClusterClient(cluster1.Name) - g.Expect(exists).To(gomega.BeTrue()) - g.Expect(client).ToNot(gomega.BeNil()) - }).WithTimeout(time.Second * 2).Should(gomega.Succeed()) -} + dp1.SetAnnotations(map[string]string{"test": "test"}) -// Verifies that the listers for the SourceType GVR of existing FTCs and member clusters are eventually available after -// the FederatedInformerManager is started. -func TestFederatedInformerManagerListerAvailableForExistingFTCsAndClusters(t *testing.T) { - g := gomega.NewGomegaWithT(t) + for _, cluster := range defaultClusters { + dynamicClient, exists := manager.GetClusterClient(cluster.Name) + g.Expect(exists).To(gomega.BeTrue()) + _, err = dynamicClient.Resource(common.DeploymentGVR).Namespace("default").Update(ctx, dp1, metav1.UpdateOptions{}) + g.Expect(err).ToNot(gomega.HaveOccurred()) + handler.ExpectUpdateEvents(deploymentGVK, 1) + } - // 1. Bootstrap an environment with 3 clusters and FTCs for deployments, configmaps and secrets. + handler.AssertEventually(g, time.Second*2) + }) - cluster1 := getTestCluster("cluster-1") - cluster2 := getTestCluster("cluster-2") - cluster3 := getTestCluster("cluster-3") + t.Run("event handler should remain unchanged on FTC update", func(t *testing.T) { + t.Parallel() - defaultClusters := []*fedcorev1a1.FederatedCluster{cluster1, cluster2, cluster3} - defaultFTCs := []*fedcorev1a1.FederatedTypeConfig{deploymentFTC, configmapFTC, secretFTC} - defaultObjects := map[string]*unstructured.Unstructured{} + g := gomega.NewWithT(t) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() - manager, _, _ := boostrapFederatedInformerManagerWithFakeClients(defaultClusters, defaultFTCs, defaultObjects) + // 1. Bootstrap environment - // 2. Start the manager + dp1 := getTestDeployment("dp-1", "default") + ftc := deploymentFTC.DeepCopy() - ctx := context.Background() - manager.Start(ctx) + handler := newCountingResourceEventHandler() + generator := &EventHandlerGenerator{ + Predicate: registerOncePredicate, + Generator: handler.GenerateEventHandler, + } - ctxWithTimeout, cancel := context.WithTimeout(ctx, time.Second*3) - defer cancel() - if !cache.WaitForCacheSync(ctxWithTimeout.Done(), manager.HasSynced) { - g.Fail("Timed out waiting for FederatedInformerManager cache sync") - } + defaultFTCs := []*fedcorev1a1.FederatedTypeConfig{ftc} + defaultObjs := map[string][]*unstructured.Unstructured{ + "cluster-1": {dp1}, + "cluster-2": {dp1}, + "cluster-3": {dp1}, + } + defaultClusters := []*fedcorev1a1.FederatedCluster{ + getTestCluster("cluster-1"), + getTestCluster("cluster-2"), + getTestCluster("cluster-3"), + } + generators := []*EventHandlerGenerator{generator} + clusterHandlers := []*ClusterEventHandler{} + manager, fedClient := boostrapFederatedInformerManagerWithFakeClients(g, ctx, defaultFTCs, defaultObjs, defaultClusters, generators, clusterHandlers) - // 3. Verify that listers for the clusters and each FTC's SourceType GVR are eventually available. + // 2. Verify that handler is registered initially - for _, cluster := range defaultClusters { - for _, ftc := range defaultFTCs { - apiresource := ftc.GetSourceType() - gvr := schemautil.APIResourceToGVR(&apiresource) + handler.ExpectGenerateEvents(ftc.Name, len(defaultClusters)) + handler.ExpectAddEvents(deploymentGVK, len(defaultClusters)) + handler.AssertEventually(g, time.Second*2) - g.Eventually(func(g gomega.Gomega) { - lister, informerSynced, exists := manager.GetResourceLister(gvr, cluster.Name) - g.Expect(exists).To(gomega.BeTrue()) - g.Expect(lister).ToNot(gomega.BeNil()) - g.Expect(informerSynced()).To(gomega.BeTrue()) - }).WithTimeout(time.Second * 2).Should(gomega.Succeed()) + // 3. Trigger FTC updates and verify no re-registration + + ftc.SetAnnotations(map[string]string{"test": "test"}) + _, err := fedClient.CoreV1alpha1().FederatedTypeConfigs().Update(ctx, ftc, metav1.UpdateOptions{}) + g.Expect(err).ToNot(gomega.HaveOccurred()) + + handler.AssertConsistently(g, time.Second*2) + + // 4. Verify events are still received by handler + + dp1.SetAnnotations(map[string]string{"test": "test"}) + + for _, cluster := range defaultClusters { + dynamicClient, exists := manager.GetClusterClient(cluster.Name) + g.Expect(exists).To(gomega.BeTrue()) + _, err = dynamicClient.Resource(common.DeploymentGVR).Namespace("default").Update(ctx, dp1, metav1.UpdateOptions{}) + g.Expect(err).ToNot(gomega.HaveOccurred()) + handler.ExpectUpdateEvents(deploymentGVK, 1) } - } - // 4. Sanity check: the listers for a non-existent cluster and ftc should not be available + handler.AssertEventually(g, time.Second*2) + }) - lister, informerSynced, exists := manager.GetResourceLister(common.DaemonSetGVR, defaultClusters[0].Name) - g.Expect(exists).To(gomega.BeFalse()) - g.Expect(lister).To(gomega.BeNil()) - g.Expect(informerSynced).To(gomega.BeNil()) + t.Run("event handler should be unregistered on FTC delete", func(t *testing.T) { + t.Parallel() - lister, informerSynced, exists = manager.GetResourceLister(common.DeploymentGVR, "cluster-4") - g.Expect(exists).To(gomega.BeFalse()) - g.Expect(lister).To(gomega.BeNil()) - g.Expect(informerSynced).To(gomega.BeNil()) -} + g := gomega.NewWithT(t) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() -// Verifies that the lister for the SourceType GVR of a new FTC created after the FederatedInformerManager is started -// eventually becomes available. -func TestFederatedInformerManagerListerAvailableForNewFTC(t *testing.T) { - g := gomega.NewGomegaWithT(t) + // 1. Bootstrap environment - // 1. Bootstrap an environment with 3 clusters. + dp1 := getTestDeployment("dp-1", "default") + cm1 := getTestConfigMap("cm-1", "default") + sc1 := getTestSecret("sc-1", "default") - cluster1 := getTestCluster("cluster-1") - cluster2 := getTestCluster("cluster-2") - cluster3 := getTestCluster("cluster-3") + handler1 := newCountingResourceEventHandler() + handler2 := newCountingResourceEventHandler() + generator1 := &EventHandlerGenerator{ + Predicate: registerOncePredicate, + Generator: handler1.GenerateEventHandler, + } + generator2 := &EventHandlerGenerator{ + Predicate: registerOncePredicate, + Generator: handler2.GenerateEventHandler, + } - defaultClusters := []*fedcorev1a1.FederatedCluster{cluster1, cluster2, cluster3} - defaultFTCs := []*fedcorev1a1.FederatedTypeConfig{} - defaultObjects := map[string]*unstructured.Unstructured{} + defaultFTCs := []*fedcorev1a1.FederatedTypeConfig{deploymentFTC, configmapFTC, secretFTC} + defaultObjs := map[string][]*unstructured.Unstructured{ + "cluster-1": {dp1, cm1, sc1}, + "cluster-2": {dp1, cm1, sc1}, + "cluster-3": {dp1, cm1, sc1}, + } + defaultClusters := []*fedcorev1a1.FederatedCluster{ + getTestCluster("cluster-1"), + getTestCluster("cluster-2"), + getTestCluster("cluster-3"), + } + generators := []*EventHandlerGenerator{generator1, generator2} + clusterHandlers := []*ClusterEventHandler{} + manager, fedClient := boostrapFederatedInformerManagerWithFakeClients(g, ctx, defaultFTCs, defaultObjs, defaultClusters, generators, clusterHandlers) + + // 2. Verify that handler1 and handler2 is registered initially for all FTCs + + for range defaultClusters { + handler1.ExpectGenerateEvents(deploymentFTC.Name, 1) + handler1.ExpectGenerateEvents(configmapFTC.Name, 1) + handler1.ExpectGenerateEvents(secretFTC.Name, 1) + handler1.ExpectAddEvents(deploymentGVK, 1) + handler1.ExpectAddEvents(configmapGVK, 1) + handler1.ExpectAddEvents(secretGVK, 1) + + handler2.ExpectGenerateEvents(deploymentFTC.Name, 1) + handler2.ExpectGenerateEvents(configmapFTC.Name, 1) + handler2.ExpectGenerateEvents(secretFTC.Name, 1) + handler2.ExpectAddEvents(deploymentGVK, 1) + handler2.ExpectAddEvents(configmapGVK, 1) + handler2.ExpectAddEvents(secretGVK, 1) + } - manager, _, _ := boostrapFederatedInformerManagerWithFakeClients(defaultClusters, defaultFTCs, defaultObjects) + handler1.AssertEventually(g, time.Second*2) + handler2.AssertEventually(g, time.Second*2) - // 2. Start the manager + // 3. Delete the deployment FTC - ctx := context.Background() - manager.Start(ctx) + err := fedClient.CoreV1alpha1().FederatedTypeConfigs().Delete(ctx, deploymentFTC.GetName(), metav1.DeleteOptions{}) + g.Expect(err).ToNot(gomega.HaveOccurred()) - ctxWithTimeout, cancel := context.WithTimeout(ctx, time.Second*3) - defer cancel() - if !cache.WaitForCacheSync(ctxWithTimeout.Done(), manager.HasSynced) { - g.Fail("Timed out waiting for FederatedInformerManager cache sync") - } + <-time.After(time.Second) - // 3. Sanity check: -} + // 4. Verify that handler1 and handler2 is unregistered for deployments and no additional events are received -func TestFederatedInformerManagerListerAvailableForNewCluster(t *testing.T) { + for _, cluster := range defaultClusters { + dynamicClient, exists := manager.GetClusterClient(cluster.Name) + g.Expect(exists).To(gomega.BeTrue()) -} + dp2, err := dynamicClient.Resource(common.DeploymentGVR). + Namespace("default"). + Create(ctx, getTestDeployment("dp-2", "default"), metav1.CreateOptions{}) + g.Expect(err).ToNot(gomega.HaveOccurred()) -func TestFederatedInformerManagerEventHandlerRegistrationForExistingFTCsAndClusters(t *testing.T) { + dp2.SetAnnotations(map[string]string{"test": "test"}) + _, err = dynamicClient.Resource(common.DeploymentGVR).Namespace("default").Update(ctx, dp2, metav1.UpdateOptions{}) + g.Expect(err).ToNot(gomega.HaveOccurred()) -} + err = dynamicClient.Resource(common.DeploymentGVR).Namespace("default").Delete(ctx, dp2.GetName(), metav1.DeleteOptions{}) + g.Expect(err).ToNot(gomega.HaveOccurred()) + } -func TestFederatedInformerManagerEventHandlerRegistrationForNewFTC(t *testing.T) { + handler1.AssertConsistently(g, time.Second*2) + handler2.AssertConsistently(g, time.Second*2) -} + // 5. Verify that handler1 and handler2 is not unregistered for other FTCs. -func TestFederatedInformerManagerEventHandlerRegistrationOnFTCUpdate(t *testing.T) { + for _, cluster := range defaultClusters { + dynamicClient, exists := manager.GetClusterClient(cluster.Name) + g.Expect(exists).To(gomega.BeTrue()) -} + _, err = dynamicClient.Resource(common.SecretGVR). + Namespace("default"). + Create(ctx, getTestSecret("sc-2", "default"), metav1.CreateOptions{}) + g.Expect(err).ToNot(gomega.HaveOccurred()) + handler1.ExpectAddEvents(secretGVK, 1) + handler2.ExpectAddEvents(secretGVK, 1) + + cm1.SetAnnotations(map[string]string{"test": "test"}) + _, err = dynamicClient.Resource(common.ConfigMapGVR).Namespace("default").Update(ctx, cm1, metav1.UpdateOptions{}) + g.Expect(err).ToNot(gomega.HaveOccurred()) + handler1.ExpectUpdateEvents(configmapGVK, 1) + handler2.ExpectUpdateEvents(configmapGVK, 1) + } -func TestFederatedInformerManagerEventHandlerRegistrationOnFTCDelete(t *testing.T) { + handler1.AssertEventually(g, time.Second*2) + handler2.AssertEventually(g, time.Second*2) + }) -} + t.Run("event handler should be unregistered on cluster delete", func(t *testing.T) { + t.Parallel() -func TestFederatedInformerManagerEventHandlerRegistrationForNewCluster(t *testing.T) { + g := gomega.NewWithT(t) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() -} + // 1. Bootstrap environment -func TestFederatedInformerManagerEventHandlerRegistrationOnClusterDelete(t *testing.T) { + dp1 := getTestDeployment("dp-1", "default") + cm1 := getTestConfigMap("cm-1", "default") + sc1 := getTestSecret("sc-1", "default") -} + handler1 := newCountingResourceEventHandler() + handler2 := newCountingResourceEventHandler() + generator1 := &EventHandlerGenerator{ + Predicate: registerOncePredicate, + Generator: handler1.GenerateEventHandler, + } + generator2 := &EventHandlerGenerator{ + Predicate: registerOncePredicate, + Generator: handler2.GenerateEventHandler, + } -func TestFederatedInformerManagerClusterEventHandlerForExistingClusters(t *testing.T) { + defaultFTCs := []*fedcorev1a1.FederatedTypeConfig{deploymentFTC, configmapFTC, secretFTC} + defaultObjs := map[string][]*unstructured.Unstructured{ + "cluster-1": {dp1, cm1, sc1}, + "cluster-2": {dp1, cm1, sc1}, + "cluster-3": {dp1, cm1, sc1}, + } + defaultClusters := []*fedcorev1a1.FederatedCluster{ + getTestCluster("cluster-1"), + getTestCluster("cluster-2"), + getTestCluster("cluster-3"), + } + generators := []*EventHandlerGenerator{generator1, generator2} + clusterHandlers := []*ClusterEventHandler{} + manager, fedClient := boostrapFederatedInformerManagerWithFakeClients(g, ctx, defaultFTCs, defaultObjs, defaultClusters, generators, clusterHandlers) + + // 2. Verify that handler1 and handler2 is registered initially for all FTCs and clusters + + for range defaultClusters { + handler1.ExpectGenerateEvents(deploymentFTC.Name, 1) + handler1.ExpectGenerateEvents(configmapFTC.Name, 1) + handler1.ExpectGenerateEvents(secretFTC.Name, 1) + handler1.ExpectAddEvents(deploymentGVK, 1) + handler1.ExpectAddEvents(configmapGVK, 1) + handler1.ExpectAddEvents(secretGVK, 1) + + handler2.ExpectGenerateEvents(deploymentFTC.Name, 1) + handler2.ExpectGenerateEvents(configmapFTC.Name, 1) + handler2.ExpectGenerateEvents(secretFTC.Name, 1) + handler2.ExpectAddEvents(deploymentGVK, 1) + handler2.ExpectAddEvents(configmapGVK, 1) + handler2.ExpectAddEvents(secretGVK, 1) + } -} + handler1.AssertEventually(g, time.Second*2) + handler2.AssertEventually(g, time.Second*2) -func TestFederatedInformerManagerClusterEventHandlerForNewCluster(t *testing.T) { + // 3. Delete cluster-1 -} + // Get client before deletion + dynamicClient, exists := manager.GetClusterClient("cluster-1") -func TestFederatedInformerManagerClusterEventHandlerOnClusterUpdate(t *testing.T) { + err := fedClient.CoreV1alpha1().FederatedClusters().Delete(ctx, "cluster-1", metav1.DeleteOptions{}) + g.Expect(err).ToNot(gomega.HaveOccurred()) -} + <-time.After(time.Second) + + // 4. Verify that handler1 and handler2 is unregistered for cluster-1 and no additional events are received + + g.Expect(exists).To(gomega.BeTrue()) -func TestFederatedInformerManagerClusterEventHandlerOnClusterDelete(t *testing.T) { + dp2, err := dynamicClient.Resource(common.DeploymentGVR). + Namespace("default"). + Create(ctx, getTestDeployment("dp-2", "default"), metav1.CreateOptions{}) + g.Expect(err).ToNot(gomega.HaveOccurred()) + + dp2.SetAnnotations(map[string]string{"test": "test"}) + _, err = dynamicClient.Resource(common.DeploymentGVR).Namespace("default").Update(ctx, dp2, metav1.UpdateOptions{}) + g.Expect(err).ToNot(gomega.HaveOccurred()) + + err = dynamicClient.Resource(common.DeploymentGVR).Namespace("default").Delete(ctx, dp2.GetName(), metav1.DeleteOptions{}) + g.Expect(err).ToNot(gomega.HaveOccurred()) + + handler1.AssertConsistently(g, time.Second*2) + handler2.AssertConsistently(g, time.Second*2) + + // 5. Verify that handler1 and handler2 is not unregistered for other clusters. + + for _, cluster := range []string{"cluster-2", "cluster-3"} { + dynamicClient, exists := manager.GetClusterClient(cluster) + g.Expect(exists).To(gomega.BeTrue()) + + _, err = dynamicClient.Resource(common.SecretGVR). + Namespace("default"). + Create(ctx, getTestSecret("sc-2", "default"), metav1.CreateOptions{}) + g.Expect(err).ToNot(gomega.HaveOccurred()) + handler1.ExpectAddEvents(secretGVK, 1) + handler2.ExpectAddEvents(secretGVK, 1) + + cm1.SetAnnotations(map[string]string{"test": "test"}) + _, err = dynamicClient.Resource(common.ConfigMapGVR).Namespace("default").Update(ctx, cm1, metav1.UpdateOptions{}) + g.Expect(err).ToNot(gomega.HaveOccurred()) + handler1.ExpectUpdateEvents(configmapGVK, 1) + handler2.ExpectUpdateEvents(configmapGVK, 1) + } + + handler1.AssertEventually(g, time.Second*2) + handler2.AssertEventually(g, time.Second*2) + }) + t.Run("ClusterEventHandlers should receive correct old and new clusters", func(t *testing.T) { + t.Parallel() + + g := gomega.NewWithT(t) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + // 1. Bootstrap environment + + var generation int64 = 1 + var callBackCount int64 = 0 + var expectedCallbackCount int64 = 1 + + // assertionCh is used to achieve 2 things: + // 1. It is used to pass assertions to the main goroutine. + // 2. It is used as an implicit lock to ensure FTC events are not squashed by the InformerManager. + assertionCh := make(chan func()) + + cluster := getTestCluster("cluster-1") + cluster.SetAnnotations(map[string]string{"predicate": "true"}) + cluster.SetGeneration(1) + + clusterHandler := &ClusterEventHandler{ + Predicate: func(oldCluster *fedcorev1a1.FederatedCluster, newCluster *fedcorev1a1.FederatedCluster) bool { + if generation == 1 { + assertionCh <- func() { + g.Expect(oldCluster).To(gomega.BeNil()) + g.Expect(newCluster.GetGeneration()).To(gomega.BeNumerically("==", 1)) + } + } else { + assertionCh <- func() { + g.Expect(oldCluster.GetGeneration()).To(gomega.BeNumerically("==", generation-1)) + g.Expect(newCluster.GetGeneration()).To(gomega.BeNumerically("==", generation)) + } + } + + return newCluster.GetAnnotations()["predicate"] == "true" + }, + Callback: func(cluster *fedcorev1a1.FederatedCluster) { + callBackCount++ + }, + } + + defaultFTCs := []*fedcorev1a1.FederatedTypeConfig{} + defaultObjs := map[string][]*unstructured.Unstructured{} + defaultClusters := []*fedcorev1a1.FederatedCluster{} + generators := []*EventHandlerGenerator{} + clusterHandlers := []*ClusterEventHandler{clusterHandler} + _, fedClient := boostrapFederatedInformerManagerWithFakeClients(g, ctx, defaultFTCs, defaultObjs, defaultClusters, generators, clusterHandlers) + + // 2. Create cluster + + cluster, err := fedClient.CoreV1alpha1().FederatedClusters().Create(ctx, cluster, metav1.CreateOptions{}) + g.Expect(err).ToNot(gomega.HaveOccurred()) + + fn := <-assertionCh + fn() + g.Expect(callBackCount).To(gomega.Equal(expectedCallbackCount)) + + // 3. Generate cluster update events + + for i := 0; i < 5; i++ { + generation++ + cluster.SetGeneration(generation) + + if i % 2 == 0 { + cluster.SetAnnotations(map[string]string{"predicate": "false"}) + } else { + cluster.SetAnnotations(map[string]string{"predicate": "true"}) + expectedCallbackCount++ + } + + var err error + cluster, err = fedClient.CoreV1alpha1().FederatedClusters().Update(ctx, cluster, metav1.UpdateOptions{}) + g.Expect(err).ToNot(gomega.HaveOccurred()) + + fn = <-assertionCh + fn() + g.Expect(callBackCount).To(gomega.Equal(expectedCallbackCount)) + } + }) } func boostrapFederatedInformerManagerWithFakeClients( - clusters []*fedcorev1a1.FederatedCluster, + g *gomega.WithT, + ctx context.Context, ftcs []*fedcorev1a1.FederatedTypeConfig, - objects map[string]*unstructured.Unstructured, -) (FederatedInformerManager, dynamicclient.Interface, fedclient.Interface) { + objects map[string][]*unstructured.Unstructured, + clusters []*fedcorev1a1.FederatedCluster, + eventHandlerGenerators []*EventHandlerGenerator, + clusterEventHandlers []*ClusterEventHandler, +) (FederatedInformerManager, fedclient.Interface) { scheme := runtime.NewScheme() corev1.AddToScheme(scheme) appsv1.AddToScheme(scheme) fedcorev1a1.AddToScheme(scheme) - dynamicObjects := []runtime.Object{} - for _, object := range objects { - dynamicObjects = append(dynamicObjects, runtime.Object(object)) - } - dynamicClient := dynamicfake.NewSimpleDynamicClient(scheme, dynamicObjects...) - fedObjects := []runtime.Object{} for _, cluster := range clusters { fedObjects = append(fedObjects, runtime.Object(cluster)) @@ -339,17 +1228,38 @@ func boostrapFederatedInformerManagerWithFakeClients( ClusterClientGetter{ ConnectionHash: DefaultClusterConnectionHash, ClientGetter: func(cluster *fedcorev1a1.FederatedCluster) (dynamicclient.Interface, error) { - return dynamicfake.NewSimpleDynamicClient(scheme), nil + dynamicObjects := []runtime.Object{} + + clusterObjects := objects[cluster.Name] + if clusterObjects != nil { + for _, object := range clusterObjects { + dynamicObjects = append(dynamicObjects, runtime.Object(object)) + } + } + + return dynamicfake.NewSimpleDynamicClient(scheme, dynamicObjects...), nil }, }, factory.Core().V1alpha1().FederatedTypeConfigs(), factory.Core().V1alpha1().FederatedClusters(), ) - // this is required for the factory to start the underlying ftc informer - factory.Core().V1alpha1().FederatedTypeConfigs().Informer() + for _, generator := range eventHandlerGenerators { + informerManager.AddEventHandlerGenerator(generator) + } + for _, handler := range clusterEventHandlers { + informerManager.AddClusterEventHandler(handler) + } + + factory.Start(ctx.Done()) + informerManager.Start(ctx) - factory.Start(context.TODO().Done()) + ctxWithTimeout, cancel := context.WithTimeout(ctx, time.Second) + defer cancel() + + if !cache.WaitForCacheSync(ctxWithTimeout.Done(), informerManager.HasSynced) { + g.Fail("Timed out waiting for FederatedInformerManager cache sync") + } - return informerManager, dynamicClient, fedClient + return informerManager, fedClient } diff --git a/pkg/util/informermanager/informermanager_test.go b/pkg/util/informermanager/informermanager_test.go index d26e7320..95889e2a 100644 --- a/pkg/util/informermanager/informermanager_test.go +++ b/pkg/util/informermanager/informermanager_test.go @@ -33,9 +33,10 @@ func init() { } func TestInformerManager(t *testing.T) { - g := gomega.NewWithT(t) - t.Run("listers for existing FTCs should be available eventually", func(t *testing.T) { + t.Parallel() + + g := gomega.NewWithT(t) ctx, cancel := context.WithCancel(context.Background()) defer cancel() @@ -69,6 +70,9 @@ func TestInformerManager(t *testing.T) { }) t.Run("listers for new FTC should be available eventually", func(t *testing.T) { + t.Parallel() + + g := gomega.NewWithT(t) ctx, cancel := context.WithCancel(context.Background()) defer cancel() @@ -97,7 +101,7 @@ func TestInformerManager(t *testing.T) { _, err := fedClient.CoreV1alpha1().FederatedTypeConfigs().Create(ctx, ftc, metav1.CreateOptions{}) g.Expect(err).ToNot(gomega.HaveOccurred()) - // 4. Verify the the lister for daemonsets is eventually available + // 4. Verify that the lister for daemonsets is eventually available g.Eventually(func(g gomega.Gomega) { lister, informerSynced, exists := manager.GetResourceLister(gvr) @@ -108,6 +112,9 @@ func TestInformerManager(t *testing.T) { }) t.Run("event handlers for existing FTCs should be registered eventually", func(t *testing.T) { + t.Parallel() + + g := gomega.NewWithT(t) ctx, cancel := context.WithCancel(context.Background()) defer cancel() @@ -179,6 +186,9 @@ func TestInformerManager(t *testing.T) { }) t.Run("event handlers for new FTCs should be registered eventually", func(t *testing.T) { + t.Parallel() + + g := gomega.NewWithT(t) ctx, cancel := context.WithCancel(context.Background()) defer cancel() @@ -207,18 +217,22 @@ func TestInformerManager(t *testing.T) { _, dynamicClient, fedClient := boostrapInformerManagerWithFakeClients(g, ctx, defaultFTCs, defaultObjs, generators) - // 2. Create new FTC for daemonset + // 2. Verify that alwaysRegistered is not registered initially for daemonset + + alwaysRegistered.AssertConsistently(g, time.Second*2) + + // 3. Create new FTC for daemonset _, err := fedClient.CoreV1alpha1().FederatedTypeConfigs().Create(ctx, daemonsetFTC, metav1.CreateOptions{}) g.Expect(err).ToNot(gomega.HaveOccurred()) - // 3. Verify that alwaysRegistered is eventually registered for the new Daemonset FTC + // 4. Verify that alwaysRegistered is eventually registered for the new Daemonset FTC alwaysRegistered.ExpectGenerateEvents(daemonsetFTC.Name, 1) alwaysRegistered.ExpectAddEvents(daemonsetGVK, 4) alwaysRegistered.AssertEventually(g, time.Second*2) - // 4. Verify that newly generated events are also received by alwaysRegistered + // 5. Verify that newly generated events are also received by alwaysRegistered dm1.SetAnnotations(map[string]string{"test": "test"}) _, err = dynamicClient.Resource(common.DaemonSetGVR).Namespace("default").Update(ctx, dm1, metav1.UpdateOptions{}) @@ -231,7 +245,7 @@ func TestInformerManager(t *testing.T) { alwaysRegistered.AssertEventually(g, time.Second*2) - // 4. Verify that events for non-existent FTCs are not received by alwaysRegistered + // 6. Verify that events for non-existent FTCs are not received by alwaysRegistered _, err = dynamicClient.Resource(common.SecretGVR). Namespace("default"). @@ -239,12 +253,15 @@ func TestInformerManager(t *testing.T) { g.Expect(err).ToNot(gomega.HaveOccurred()) alwaysRegistered.AssertConsistently(g, time.Second*2) - // 5. Verify that unregisteredResourceEventHandler is not registered + // 7. Verify that unregisteredResourceEventHandler is not registered neverRegistered.AssertConsistently(g, time.Second*2) }) t.Run("EventHandlerGenerators should receive correct lastApplied and latest FTCs", func(t *testing.T) { + t.Parallel() + + g := gomega.NewWithT(t) ctx, cancel := context.WithCancel(context.Background()) defer cancel() @@ -265,6 +282,7 @@ func TestInformerManager(t *testing.T) { if generation == 1 { assertionCh <- func() { g.Expect(lastApplied).To(gomega.BeNil()) + g.Expect(latest.GetGeneration()).To(gomega.BeNumerically("==", 1)) } } else { assertionCh <- func() { @@ -287,6 +305,7 @@ func TestInformerManager(t *testing.T) { fn() // 3. Generate FTC update events + for i := 0; i < 5; i++ { generation++ ftc.SetGeneration(generation) @@ -301,6 +320,9 @@ func TestInformerManager(t *testing.T) { }) t.Run("event handler should be registered on FTC update", func(t *testing.T) { + t.Parallel() + + g := gomega.NewWithT(t) ctx, cancel := context.WithCancel(context.Background()) defer cancel() @@ -356,6 +378,9 @@ func TestInformerManager(t *testing.T) { }) t.Run("event handler should be unregistered on FTC update", func(t *testing.T) { + t.Parallel() + + g := gomega.NewWithT(t) ctx, cancel := context.WithCancel(context.Background()) defer cancel() @@ -407,6 +432,9 @@ func TestInformerManager(t *testing.T) { }) t.Run("event handler should be re-registered on FTC update", func(t *testing.T) { + t.Parallel() + + g := gomega.NewWithT(t) ctx, cancel := context.WithCancel(context.Background()) defer cancel() @@ -452,6 +480,9 @@ func TestInformerManager(t *testing.T) { }) t.Run("event handler should be unchanged on FTC update", func(t *testing.T) { + t.Parallel() + + g := gomega.NewWithT(t) ctx, cancel := context.WithCancel(context.Background()) defer cancel() @@ -497,6 +528,9 @@ func TestInformerManager(t *testing.T) { }) t.Run("event handler should be unregisterd on FTC delete", func(t *testing.T) { + t.Parallel() + + g := gomega.NewWithT(t) ctx, cancel := context.WithCancel(context.Background()) defer cancel() @@ -585,6 +619,9 @@ func TestInformerManager(t *testing.T) { }) t.Run("event handlers should be unregistered on manager shutdown", func(t *testing.T) { + t.Parallel() + + g := gomega.NewWithT(t) ctx, cancel := context.WithCancel(context.Background()) defer cancel() @@ -689,13 +726,10 @@ func boostrapInformerManagerWithFakeClients( factory.Start(ctx.Done()) informerManager.Start(ctx) - stopCh := make(chan struct{}) - go func() { - <-time.After(time.Second * 3) - close(stopCh) - }() + ctxWithTimeout, cancel := context.WithTimeout(ctx, time.Second) + defer cancel() - if !cache.WaitForCacheSync(stopCh, informerManager.HasSynced) { + if !cache.WaitForCacheSync(ctxWithTimeout.Done(), informerManager.HasSynced) { g.Fail("Timed out waiting for InformerManager cache sync") } From 9ce179bde5592a3471e95448aad12298c60783a1 Mon Sep 17 00:00:00 2001 From: "hawjia.lim" Date: Wed, 12 Jul 2023 12:18:38 +0800 Subject: [PATCH 022/173] fix typos --- .../federatedinformermanager.go | 2 +- .../federatedinformermanager_test.go | 36 +++++++++---------- pkg/util/informermanager/interface.go | 2 +- 3 files changed, 20 insertions(+), 20 deletions(-) diff --git a/pkg/util/informermanager/federatedinformermanager.go b/pkg/util/informermanager/federatedinformermanager.go index 9b0336db..61b060a4 100644 --- a/pkg/util/informermanager/federatedinformermanager.go +++ b/pkg/util/informermanager/federatedinformermanager.go @@ -151,7 +151,7 @@ func (m *federatedInformerManager) processCluster( // This might occur if a cluster was deleted and recreated with different connection details within a short // period of time and we missed processing the deletion. We simply process the cluster deletion and // reenqueue. - // Note: updating of cluster connetion details, however, is still not a supported use case. + // Note: updating of cluster connection details, however, is still not a supported use case. err := m.processClusterDeletionUnlocked(ctx, clusterName) return err, true } diff --git a/pkg/util/informermanager/federatedinformermanager_test.go b/pkg/util/informermanager/federatedinformermanager_test.go index bf1b1ff8..876c466a 100644 --- a/pkg/util/informermanager/federatedinformermanager_test.go +++ b/pkg/util/informermanager/federatedinformermanager_test.go @@ -42,7 +42,7 @@ func TestFederatedInformerManager(t *testing.T) { } generators := []*EventHandlerGenerator{} clusterHandlers := []*ClusterEventHandler{} - manager, _ := boostrapFederatedInformerManagerWithFakeClients(g, ctx, defaultFTCs, defaultObjs, defaultClusters, generators, clusterHandlers) + manager, _ := bootstrapFederatedInformerManagerWithFakeClients(g, ctx, defaultFTCs, defaultObjs, defaultClusters, generators, clusterHandlers) // 2. Verify that the clients for each cluster is eventually available @@ -75,7 +75,7 @@ func TestFederatedInformerManager(t *testing.T) { defaultClusters := []*fedcorev1a1.FederatedCluster{} generators := []*EventHandlerGenerator{} clusterHandlers := []*ClusterEventHandler{} - manager, fedClient := boostrapFederatedInformerManagerWithFakeClients(g, ctx, defaultFTCs, defaultObjs, defaultClusters, generators, clusterHandlers) + manager, fedClient := bootstrapFederatedInformerManagerWithFakeClients(g, ctx, defaultFTCs, defaultObjs, defaultClusters, generators, clusterHandlers) // 2. Verify that client for cluster-1 does is not available initially. @@ -117,9 +117,9 @@ func TestFederatedInformerManager(t *testing.T) { } generators := []*EventHandlerGenerator{} clusterHandlers := []*ClusterEventHandler{} - manager, _ := boostrapFederatedInformerManagerWithFakeClients(g, ctx, defaultFTCs, defaultObjs, defaultClusters, generators, clusterHandlers) + manager, _ := bootstrapFederatedInformerManagerWithFakeClients(g, ctx, defaultFTCs, defaultObjs, defaultClusters, generators, clusterHandlers) - // 2. Verify that listers for existing FTCs and clusters are eventually avaiable + // 2. Verify that listers for existing FTCs and clusters are eventually available for _, ftc := range defaultFTCs { apiresource := ftc.GetSourceType() @@ -172,7 +172,7 @@ func TestFederatedInformerManager(t *testing.T) { } generators := []*EventHandlerGenerator{} clusterHandlers := []*ClusterEventHandler{} - manager, fedClient := boostrapFederatedInformerManagerWithFakeClients(g, ctx, defaultFTCs, defaultObjs, defaultClusters, generators, clusterHandlers) + manager, fedClient := bootstrapFederatedInformerManagerWithFakeClients(g, ctx, defaultFTCs, defaultObjs, defaultClusters, generators, clusterHandlers) ftc := daemonsetFTC apiresource := ftc.GetSourceType() @@ -220,7 +220,7 @@ func TestFederatedInformerManager(t *testing.T) { defaultClusters := []*fedcorev1a1.FederatedCluster{} generators := []*EventHandlerGenerator{} clusterHandlers := []*ClusterEventHandler{} - manager, fedClient := boostrapFederatedInformerManagerWithFakeClients(g, ctx, defaultFTCs, defaultObjs, defaultClusters, generators, clusterHandlers) + manager, fedClient := bootstrapFederatedInformerManagerWithFakeClients(g, ctx, defaultFTCs, defaultObjs, defaultClusters, generators, clusterHandlers) cluster := getTestCluster("cluster-1") @@ -296,7 +296,7 @@ func TestFederatedInformerManager(t *testing.T) { }, } clusterHandlers := []*ClusterEventHandler{} - manager, _ := boostrapFederatedInformerManagerWithFakeClients(g, ctx, defaultFTCs, defaultObjs, defaultClusters, generators, clusterHandlers) + manager, _ := bootstrapFederatedInformerManagerWithFakeClients(g, ctx, defaultFTCs, defaultObjs, defaultClusters, generators, clusterHandlers) // 2. Verify alwaysRegistered is eventually registered for all existing FTCs and clusters. @@ -392,7 +392,7 @@ func TestFederatedInformerManager(t *testing.T) { }, } clusterHandlers := []*ClusterEventHandler{} - manager, fedClient := boostrapFederatedInformerManagerWithFakeClients(g, ctx, defaultFTCs, defaultObjs, defaultClusters, generators, clusterHandlers) + manager, fedClient := bootstrapFederatedInformerManagerWithFakeClients(g, ctx, defaultFTCs, defaultObjs, defaultClusters, generators, clusterHandlers) // 2. Verify that alwaysRegistered is not registered initially for daemonset @@ -484,7 +484,7 @@ func TestFederatedInformerManager(t *testing.T) { }, } clusterHandlers := []*ClusterEventHandler{} - manager, fedClient := boostrapFederatedInformerManagerWithFakeClients(g, ctx, defaultFTCs, defaultObjs, defaultClusters, generators, clusterHandlers) + manager, fedClient := bootstrapFederatedInformerManagerWithFakeClients(g, ctx, defaultFTCs, defaultObjs, defaultClusters, generators, clusterHandlers) // 2. Verify that alwaysRegistered is not registered initially since there are no clusters @@ -602,7 +602,7 @@ func TestFederatedInformerManager(t *testing.T) { } generators := []*EventHandlerGenerator{generator} clusterHandlers := []*ClusterEventHandler{} - _, fedClient := boostrapFederatedInformerManagerWithFakeClients(g, ctx, defaultFTCs, defaultObjs, defaultClusters, generators, clusterHandlers) + _, fedClient := bootstrapFederatedInformerManagerWithFakeClients(g, ctx, defaultFTCs, defaultObjs, defaultClusters, generators, clusterHandlers) for range defaultClusters { fn := <-assertionCh @@ -656,7 +656,7 @@ func TestFederatedInformerManager(t *testing.T) { } generators := []*EventHandlerGenerator{generator} clusterHandlers := []*ClusterEventHandler{} - manager, fedClient := boostrapFederatedInformerManagerWithFakeClients(g, ctx, defaultFTCs, defaultObjs, defaultClusters, generators, clusterHandlers) + manager, fedClient := bootstrapFederatedInformerManagerWithFakeClients(g, ctx, defaultFTCs, defaultObjs, defaultClusters, generators, clusterHandlers) // 2. Verify that handler is not registered initially. @@ -728,7 +728,7 @@ func TestFederatedInformerManager(t *testing.T) { } generators := []*EventHandlerGenerator{generator} clusterHandlers := []*ClusterEventHandler{} - manager, fedClient := boostrapFederatedInformerManagerWithFakeClients(g, ctx, defaultFTCs, defaultObjs, defaultClusters, generators, clusterHandlers) + manager, fedClient := bootstrapFederatedInformerManagerWithFakeClients(g, ctx, defaultFTCs, defaultObjs, defaultClusters, generators, clusterHandlers) // 2. Verify that handler is registered initially. @@ -797,7 +797,7 @@ func TestFederatedInformerManager(t *testing.T) { } generators := []*EventHandlerGenerator{generator} clusterHandlers := []*ClusterEventHandler{} - manager, fedClient := boostrapFederatedInformerManagerWithFakeClients(g, ctx, defaultFTCs, defaultObjs, defaultClusters, generators, clusterHandlers) + manager, fedClient := bootstrapFederatedInformerManagerWithFakeClients(g, ctx, defaultFTCs, defaultObjs, defaultClusters, generators, clusterHandlers) // 2. Verify that handler is registered initially @@ -859,7 +859,7 @@ func TestFederatedInformerManager(t *testing.T) { } generators := []*EventHandlerGenerator{generator} clusterHandlers := []*ClusterEventHandler{} - manager, fedClient := boostrapFederatedInformerManagerWithFakeClients(g, ctx, defaultFTCs, defaultObjs, defaultClusters, generators, clusterHandlers) + manager, fedClient := bootstrapFederatedInformerManagerWithFakeClients(g, ctx, defaultFTCs, defaultObjs, defaultClusters, generators, clusterHandlers) // 2. Verify that handler is registered initially @@ -927,7 +927,7 @@ func TestFederatedInformerManager(t *testing.T) { } generators := []*EventHandlerGenerator{generator1, generator2} clusterHandlers := []*ClusterEventHandler{} - manager, fedClient := boostrapFederatedInformerManagerWithFakeClients(g, ctx, defaultFTCs, defaultObjs, defaultClusters, generators, clusterHandlers) + manager, fedClient := bootstrapFederatedInformerManagerWithFakeClients(g, ctx, defaultFTCs, defaultObjs, defaultClusters, generators, clusterHandlers) // 2. Verify that handler1 and handler2 is registered initially for all FTCs @@ -1040,7 +1040,7 @@ func TestFederatedInformerManager(t *testing.T) { } generators := []*EventHandlerGenerator{generator1, generator2} clusterHandlers := []*ClusterEventHandler{} - manager, fedClient := boostrapFederatedInformerManagerWithFakeClients(g, ctx, defaultFTCs, defaultObjs, defaultClusters, generators, clusterHandlers) + manager, fedClient := bootstrapFederatedInformerManagerWithFakeClients(g, ctx, defaultFTCs, defaultObjs, defaultClusters, generators, clusterHandlers) // 2. Verify that handler1 and handler2 is registered initially for all FTCs and clusters @@ -1164,7 +1164,7 @@ func TestFederatedInformerManager(t *testing.T) { defaultClusters := []*fedcorev1a1.FederatedCluster{} generators := []*EventHandlerGenerator{} clusterHandlers := []*ClusterEventHandler{clusterHandler} - _, fedClient := boostrapFederatedInformerManagerWithFakeClients(g, ctx, defaultFTCs, defaultObjs, defaultClusters, generators, clusterHandlers) + _, fedClient := bootstrapFederatedInformerManagerWithFakeClients(g, ctx, defaultFTCs, defaultObjs, defaultClusters, generators, clusterHandlers) // 2. Create cluster @@ -1199,7 +1199,7 @@ func TestFederatedInformerManager(t *testing.T) { }) } -func boostrapFederatedInformerManagerWithFakeClients( +func bootstrapFederatedInformerManagerWithFakeClients( g *gomega.WithT, ctx context.Context, ftcs []*fedcorev1a1.FederatedTypeConfig, diff --git a/pkg/util/informermanager/interface.go b/pkg/util/informermanager/interface.go index dc4ce9c5..0fd1b72d 100644 --- a/pkg/util/informermanager/interface.go +++ b/pkg/util/informermanager/interface.go @@ -60,7 +60,7 @@ type ClusterEventPredicate func(oldCluster, newCluster *fedcorev1a1.FederatedClu // FederatedInformerManager provides an interface for controllers that need to dynamically register event handlers and // access objects in member clusters based on FederatedTypeConfigs. FederatedInformerManager will listen to FTC events -// and maintian informers for each FTC's source type and joined member cluster. +// and maintain informers for each FTC's source type and joined member cluster. // // Having multiple FTCs with the same source type is not supported and may cause FederatedInformerManager to behave // incorrectly. Updating FTC source types is also not supported and may also cause FederatedInformerManager to behave From 65f8ff15bba1f890ba1d89dfe58a2e1a3517b39e Mon Sep 17 00:00:00 2001 From: "hawjia.lim" Date: Wed, 12 Jul 2023 13:49:55 +0800 Subject: [PATCH 023/173] fix lint errors --- .../federatedinformermanager_test.go | 15 +++-- .../informermanager/informermanager_test.go | 9 ++- pkg/util/informermanager/testutils_test.go | 58 +++++++------------ 3 files changed, 38 insertions(+), 44 deletions(-) diff --git a/pkg/util/informermanager/federatedinformermanager_test.go b/pkg/util/informermanager/federatedinformermanager_test.go index 876c466a..a754e0a1 100644 --- a/pkg/util/informermanager/federatedinformermanager_test.go +++ b/pkg/util/informermanager/federatedinformermanager_test.go @@ -1210,9 +1210,12 @@ func bootstrapFederatedInformerManagerWithFakeClients( ) (FederatedInformerManager, fedclient.Interface) { scheme := runtime.NewScheme() - corev1.AddToScheme(scheme) - appsv1.AddToScheme(scheme) - fedcorev1a1.AddToScheme(scheme) + err := corev1.AddToScheme(scheme) + g.Expect(err).ToNot(gomega.HaveOccurred()) + err = appsv1.AddToScheme(scheme) + g.Expect(err).ToNot(gomega.HaveOccurred()) + err = fedcorev1a1.AddToScheme(scheme) + g.Expect(err).ToNot(gomega.HaveOccurred()) fedObjects := []runtime.Object{} for _, cluster := range clusters { @@ -1245,10 +1248,12 @@ func bootstrapFederatedInformerManagerWithFakeClients( ) for _, generator := range eventHandlerGenerators { - informerManager.AddEventHandlerGenerator(generator) + err := informerManager.AddEventHandlerGenerator(generator) + g.Expect(err).ToNot(gomega.HaveOccurred()) } for _, handler := range clusterEventHandlers { - informerManager.AddClusterEventHandler(handler) + err := informerManager.AddClusterEventHandler(handler) + g.Expect(err).ToNot(gomega.HaveOccurred()) } factory.Start(ctx.Done()) diff --git a/pkg/util/informermanager/informermanager_test.go b/pkg/util/informermanager/informermanager_test.go index 95889e2a..dcdc5558 100644 --- a/pkg/util/informermanager/informermanager_test.go +++ b/pkg/util/informermanager/informermanager_test.go @@ -700,9 +700,12 @@ func boostrapInformerManagerWithFakeClients( ) (InformerManager, dynamicclient.Interface, fedclient.Interface) { scheme := runtime.NewScheme() - corev1.AddToScheme(scheme) - appsv1.AddToScheme(scheme) - fedcorev1a1.AddToScheme(scheme) + err := corev1.AddToScheme(scheme) + g.Expect(err).ToNot(gomega.HaveOccurred()) + err = appsv1.AddToScheme(scheme) + g.Expect(err).ToNot(gomega.HaveOccurred()) + err = fedcorev1a1.AddToScheme(scheme) + g.Expect(err).ToNot(gomega.HaveOccurred()) dynamicObjects := []runtime.Object{} for _, object := range objects { diff --git a/pkg/util/informermanager/testutils_test.go b/pkg/util/informermanager/testutils_test.go index d9099f4a..dde4c05b 100644 --- a/pkg/util/informermanager/testutils_test.go +++ b/pkg/util/informermanager/testutils_test.go @@ -247,51 +247,37 @@ func (h *countingResourceEventHandler) ExpectDeleteEvents(gvk schema.GroupVersio h.expectedDeleteEventCount[gvk] = h.expectedDeleteEventCount[gvk] + n } -func (h *countingResourceEventHandler) AssertEventually(g gomega.Gomega, timeout time.Duration) { +func (h *countingResourceEventHandler) Assert(g gomega.Gomega) { _, file, no, _ := goruntime.Caller(1) callerInfo := fmt.Sprintf("%s:%d", path.Base(file), no) + for ftc := range h.expectedGenerateCount { + g.Expect(h.generateCount[ftc]). + To(gomega.BeNumerically("==", h.expectedGenerateCount[ftc]), "%s: incorrect number of generate events for %s", callerInfo, ftc) + } + for gvk := range h.expectedAddEventCount { + g.Expect(h.addEventCount[gvk]). + To(gomega.BeNumerically("==", h.expectedAddEventCount[gvk]), "%s: incorrect number of add events for %s", callerInfo, gvk) + } + for gvk := range h.expectedUpdateEventCount { + g.Expect(h.updateEventCount[gvk]). + To(gomega.BeNumerically("==", h.expectedUpdateEventCount[gvk]), "%s: incorrect number of update events for %s", callerInfo, gvk) + } + for gvk := range h.expectedDeleteEventCount { + g.Expect(h.deleteEventCount[gvk]). + To(gomega.BeNumerically("==", h.expectedDeleteEventCount[gvk]), "%s: incorrect number of delete events for %s", callerInfo, gvk) + } +} + +func (h *countingResourceEventHandler) AssertEventually(g gomega.Gomega, timeout time.Duration) { g.Eventually(func(g gomega.Gomega) { - for ftc := range h.expectedGenerateCount { - g.Expect(h.generateCount[ftc]). - To(gomega.BeNumerically("==", h.expectedGenerateCount[ftc]), "%s: incorrect number of generate events for %s", callerInfo, ftc) - } - for gvk := range h.expectedAddEventCount { - g.Expect(h.addEventCount[gvk]). - To(gomega.BeNumerically("==", h.expectedAddEventCount[gvk]), "%s: incorrect number of add events for %s", callerInfo, gvk) - } - for gvk := range h.expectedUpdateEventCount { - g.Expect(h.updateEventCount[gvk]). - To(gomega.BeNumerically("==", h.expectedUpdateEventCount[gvk]), "%s: incorrect number of update events for %s", callerInfo, gvk) - } - for gvk := range h.expectedDeleteEventCount { - g.Expect(h.deleteEventCount[gvk]). - To(gomega.BeNumerically("==", h.expectedDeleteEventCount[gvk]), "%s: incorrect number of delete events for %s", callerInfo, gvk) - } + h.Assert(g) }).WithTimeout(timeout).Should(gomega.Succeed()) } func (h *countingResourceEventHandler) AssertConsistently(g gomega.Gomega, timeout time.Duration) { - _, file, no, _ := goruntime.Caller(1) - callerInfo := fmt.Sprintf("%s:%d", file, no) - g.Consistently(func(g gomega.Gomega) { - for ftc := range h.expectedGenerateCount { - g.Expect(h.generateCount[ftc]). - To(gomega.BeNumerically("==", h.expectedGenerateCount[ftc]), "%s: incorrect number of generate events for %s", callerInfo, ftc) - } - for gvk := range h.expectedAddEventCount { - g.Expect(h.addEventCount[gvk]). - To(gomega.BeNumerically("==", h.expectedAddEventCount[gvk]), "%s: incorrect number of add events for %s", callerInfo, gvk) - } - for gvk := range h.expectedUpdateEventCount { - g.Expect(h.updateEventCount[gvk]). - To(gomega.BeNumerically("==", h.expectedUpdateEventCount[gvk]), "%s: incorrect number of update events for %s", callerInfo, gvk) - } - for gvk := range h.expectedDeleteEventCount { - g.Expect(h.deleteEventCount[gvk]). - To(gomega.BeNumerically("==", h.expectedDeleteEventCount[gvk]), "%s: incorrect number of delete events for %s", callerInfo, gvk) - } + h.Assert(g) }).WithTimeout(timeout).Should(gomega.Succeed()) } From 8b2db1936d3726101c835c5e493dd8e91996add8 Mon Sep 17 00:00:00 2001 From: "hawjia.lim" Date: Wed, 12 Jul 2023 17:18:11 +0800 Subject: [PATCH 024/173] fix lint errors and goroutine leak in tests --- pkg/util/bijection/bijection.go | 16 + .../federatedinformermanager.go | 45 +- .../federatedinformermanager_test.go | 717 +++++++++++------- pkg/util/informermanager/informermanager.go | 35 +- .../informermanager/informermanager_test.go | 437 ++++++----- pkg/util/informermanager/interface.go | 24 +- pkg/util/informermanager/testutils_test.go | 127 +++- 7 files changed, 913 insertions(+), 488 deletions(-) diff --git a/pkg/util/bijection/bijection.go b/pkg/util/bijection/bijection.go index 71efb696..41589519 100644 --- a/pkg/util/bijection/bijection.go +++ b/pkg/util/bijection/bijection.go @@ -1,3 +1,19 @@ +/* +Copyright 2023 The KubeAdmiral Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + package bijection import ( diff --git a/pkg/util/informermanager/federatedinformermanager.go b/pkg/util/informermanager/federatedinformermanager.go index 61b060a4..3e5cbf99 100644 --- a/pkg/util/informermanager/federatedinformermanager.go +++ b/pkg/util/informermanager/federatedinformermanager.go @@ -1,3 +1,19 @@ +/* +Copyright 2023 The KubeAdmiral Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + package informermanager import ( @@ -24,14 +40,15 @@ import ( type federatedInformerManager struct { lock sync.RWMutex - started bool + started bool + shutdown bool clientGetter ClusterClientGetter ftcInformer fedcorev1a1informers.FederatedTypeConfigInformer clusterInformer fedcorev1a1informers.FederatedClusterInformer eventHandlerGenerators []*EventHandlerGenerator - clusterEventHandlers []*ClusterEventHandler + clusterEventHandlers []*ClusterEventHandler clients map[string]dynamic.Interface connectionMap map[string][]byte @@ -49,11 +66,12 @@ func NewFederatedInformerManager( manager := &federatedInformerManager{ lock: sync.RWMutex{}, started: false, + shutdown: false, clientGetter: clientGetter, ftcInformer: ftcInformer, clusterInformer: clusterInformer, eventHandlerGenerators: []*EventHandlerGenerator{}, - clusterEventHandlers: []*ClusterEventHandler{}, + clusterEventHandlers: []*ClusterEventHandler{}, clients: map[string]dynamic.Interface{}, connectionMap: map[string][]byte{}, informerManagers: map[string]InformerManager{}, @@ -186,7 +204,7 @@ func (m *federatedInformerManager) processCluster( func (m *federatedInformerManager) processClusterDeletion(ctx context.Context, clusterName string) error { m.lock.Lock() - m.lock.Unlock() + defer m.lock.Unlock() return m.processClusterDeletionUnlocked(ctx, clusterName) } @@ -209,12 +227,11 @@ func (m *federatedInformerManager) AddClusterEventHandler(handler *ClusterEventH defer m.lock.Unlock() if m.started { - return fmt.Errorf("FederatedInformerManager is already started.") + return fmt.Errorf("failed to add ClusterEventHandler: FederatedInformerManager is already started") } m.clusterEventHandlers = append(m.clusterEventHandlers, handler) return nil - } func (m *federatedInformerManager) AddEventHandlerGenerator(generator *EventHandlerGenerator) error { @@ -222,7 +239,7 @@ func (m *federatedInformerManager) AddEventHandlerGenerator(generator *EventHand defer m.lock.Unlock() if m.started { - return fmt.Errorf("FederatedInformerManager is already started.") + return fmt.Errorf("failed to add EventHandlerGenerator: FederatedInformerManager is already started") } m.eventHandlerGenerators = append(m.eventHandlerGenerators, generator) @@ -313,20 +330,30 @@ func (m *federatedInformerManager) Start(ctx context.Context) { go func() { <-ctx.Done() + m.lock.Lock() + defer m.lock.Unlock() + logger.V(2).Info("Stopping FederatedInformerManager") m.queue.ShutDown() + m.shutdown = true }() } +func (m *federatedInformerManager) IsShutdown() bool { + m.lock.RLock() + defer m.lock.RUnlock() + return m.shutdown +} + var _ FederatedInformerManager = &federatedInformerManager{} func DefaultClusterConnectionHash(cluster *fedcorev1a1.FederatedCluster) ([]byte, error) { hashObj := struct { - ApiEndpoint string + APIEndpoint string SecretName string UseServiceAccountToken bool }{ - ApiEndpoint: cluster.Spec.APIEndpoint, + APIEndpoint: cluster.Spec.APIEndpoint, SecretName: cluster.Spec.SecretRef.Name, UseServiceAccountToken: cluster.Spec.UseServiceAccountToken, } diff --git a/pkg/util/informermanager/federatedinformermanager_test.go b/pkg/util/informermanager/federatedinformermanager_test.go index a754e0a1..2b10333a 100644 --- a/pkg/util/informermanager/federatedinformermanager_test.go +++ b/pkg/util/informermanager/federatedinformermanager_test.go @@ -1,3 +1,19 @@ +/* +Copyright 2023 The KubeAdmiral Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + package informermanager import ( @@ -5,14 +21,18 @@ import ( "testing" "time" + "github.com/onsi/gomega" appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/util/wait" dynamicclient "k8s.io/client-go/dynamic" dynamicfake "k8s.io/client-go/dynamic/fake" "k8s.io/client-go/tools/cache" + "k8s.io/klog/v2" + "k8s.io/klog/v2/ktesting" fedcorev1a1 "github.com/kubewharf/kubeadmiral/pkg/apis/core/v1alpha1" fedclient "github.com/kubewharf/kubeadmiral/pkg/client/clientset/versioned" @@ -20,16 +40,16 @@ import ( fedinformers "github.com/kubewharf/kubeadmiral/pkg/client/informers/externalversions" "github.com/kubewharf/kubeadmiral/pkg/controllers/common" schemautil "github.com/kubewharf/kubeadmiral/pkg/controllers/util/schema" - "github.com/onsi/gomega" ) +//nolint:gocyclo func TestFederatedInformerManager(t *testing.T) { + t.Parallel() + ctx := klog.NewContext(context.Background(), ktesting.NewLogger(t, ktesting.NewConfig(ktesting.Verbosity(2)))) + t.Run("clients for existing clusters should be available eventually", func(t *testing.T) { t.Parallel() - g := gomega.NewWithT(t) - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() // 1. Bootstrap environment @@ -42,7 +62,21 @@ func TestFederatedInformerManager(t *testing.T) { } generators := []*EventHandlerGenerator{} clusterHandlers := []*ClusterEventHandler{} - manager, _ := bootstrapFederatedInformerManagerWithFakeClients(g, ctx, defaultFTCs, defaultObjs, defaultClusters, generators, clusterHandlers) + + ctx, cancel := context.WithCancel(ctx) + manager, _ := bootstrapFederatedInformerManagerWithFakeClients( + g, + ctx, + defaultFTCs, + defaultObjs, + defaultClusters, + generators, + clusterHandlers, + ) + defer func() { + cancel() + _ = wait.PollInfinite(time.Millisecond, func() (done bool, err error) { return manager.IsShutdown(), nil }) + }() // 2. Verify that the clients for each cluster is eventually available @@ -63,10 +97,7 @@ func TestFederatedInformerManager(t *testing.T) { t.Run("clients for new clusters should be available eventually", func(t *testing.T) { t.Parallel() - g := gomega.NewWithT(t) - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() // 1. Bootstrap environment @@ -75,7 +106,21 @@ func TestFederatedInformerManager(t *testing.T) { defaultClusters := []*fedcorev1a1.FederatedCluster{} generators := []*EventHandlerGenerator{} clusterHandlers := []*ClusterEventHandler{} - manager, fedClient := bootstrapFederatedInformerManagerWithFakeClients(g, ctx, defaultFTCs, defaultObjs, defaultClusters, generators, clusterHandlers) + + ctx, cancel := context.WithCancel(ctx) + manager, fedClient := bootstrapFederatedInformerManagerWithFakeClients( + g, + ctx, + defaultFTCs, + defaultObjs, + defaultClusters, + generators, + clusterHandlers, + ) + defer func() { + cancel() + _ = wait.PollInfinite(time.Millisecond, func() (done bool, err error) { return manager.IsShutdown(), nil }) + }() // 2. Verify that client for cluster-1 does is not available initially. @@ -87,7 +132,11 @@ func TestFederatedInformerManager(t *testing.T) { // 3. Create a new cluster - cluster, err := fedClient.CoreV1alpha1().FederatedClusters().Create(ctx, getTestCluster("cluster-1"), metav1.CreateOptions{}) + cluster, err := fedClient.CoreV1alpha1().FederatedClusters().Create( + ctx, + getTestCluster("cluster-1"), + metav1.CreateOptions{}, + ) g.Expect(err).ToNot(gomega.HaveOccurred()) // 4. Verify that client for new cluster is eventually available @@ -101,10 +150,7 @@ func TestFederatedInformerManager(t *testing.T) { t.Run("listers for existing FTCs and clusters should be available eventually", func(t *testing.T) { t.Parallel() - g := gomega.NewWithT(t) - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() // 1. Bootstrap environment @@ -117,7 +163,21 @@ func TestFederatedInformerManager(t *testing.T) { } generators := []*EventHandlerGenerator{} clusterHandlers := []*ClusterEventHandler{} - manager, _ := bootstrapFederatedInformerManagerWithFakeClients(g, ctx, defaultFTCs, defaultObjs, defaultClusters, generators, clusterHandlers) + + ctx, cancel := context.WithCancel(ctx) + manager, _ := bootstrapFederatedInformerManagerWithFakeClients( + g, + ctx, + defaultFTCs, + defaultObjs, + defaultClusters, + generators, + clusterHandlers, + ) + defer func() { + cancel() + _ = wait.PollInfinite(time.Millisecond, func() (done bool, err error) { return manager.IsShutdown(), nil }) + }() // 2. Verify that listers for existing FTCs and clusters are eventually available @@ -132,7 +192,7 @@ func TestFederatedInformerManager(t *testing.T) { g.Expect(exists).To(gomega.BeTrue()) g.Expect(lister).ToNot(gomega.BeNil()) g.Expect(informerSynced()).To(gomega.BeTrue()) - }) + }).WithTimeout(time.Second * 2).Should(gomega.Succeed()) } } @@ -156,10 +216,7 @@ func TestFederatedInformerManager(t *testing.T) { t.Run("listers for new FTCs should be available eventually", func(t *testing.T) { t.Parallel() - g := gomega.NewWithT(t) - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() // 1. Bootstrap environment @@ -172,7 +229,21 @@ func TestFederatedInformerManager(t *testing.T) { } generators := []*EventHandlerGenerator{} clusterHandlers := []*ClusterEventHandler{} - manager, fedClient := bootstrapFederatedInformerManagerWithFakeClients(g, ctx, defaultFTCs, defaultObjs, defaultClusters, generators, clusterHandlers) + + ctx, cancel := context.WithCancel(ctx) + manager, fedClient := bootstrapFederatedInformerManagerWithFakeClients( + g, + ctx, + defaultFTCs, + defaultObjs, + defaultClusters, + generators, + clusterHandlers, + ) + defer func() { + cancel() + _ = wait.PollInfinite(time.Millisecond, func() (done bool, err error) { return manager.IsShutdown(), nil }) + }() ftc := daemonsetFTC apiresource := ftc.GetSourceType() @@ -208,10 +279,7 @@ func TestFederatedInformerManager(t *testing.T) { t.Run("listers for new clusters should be available eventually", func(t *testing.T) { t.Parallel() - g := gomega.NewWithT(t) - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() // 1. Bootstrap environment @@ -220,7 +288,21 @@ func TestFederatedInformerManager(t *testing.T) { defaultClusters := []*fedcorev1a1.FederatedCluster{} generators := []*EventHandlerGenerator{} clusterHandlers := []*ClusterEventHandler{} - manager, fedClient := bootstrapFederatedInformerManagerWithFakeClients(g, ctx, defaultFTCs, defaultObjs, defaultClusters, generators, clusterHandlers) + + ctx, cancel := context.WithCancel(ctx) + manager, fedClient := bootstrapFederatedInformerManagerWithFakeClients( + g, + ctx, + defaultFTCs, + defaultObjs, + defaultClusters, + generators, + clusterHandlers, + ) + defer func() { + cancel() + _ = wait.PollInfinite(time.Millisecond, func() (done bool, err error) { return manager.IsShutdown(), nil }) + }() cluster := getTestCluster("cluster-1") @@ -258,12 +340,9 @@ func TestFederatedInformerManager(t *testing.T) { }).WithTimeout(time.Second * 2).Should(gomega.Succeed()) }) - t.Run("event handlers for existing FTCs and clusters should be registed eventually", func(t *testing.T) { + t.Run("event handlers for existing FTCs and clusters should be registered eventually", func(t *testing.T) { t.Parallel() - g := gomega.NewWithT(t) - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() // 1. Bootstrap environment @@ -296,17 +375,33 @@ func TestFederatedInformerManager(t *testing.T) { }, } clusterHandlers := []*ClusterEventHandler{} - manager, _ := bootstrapFederatedInformerManagerWithFakeClients(g, ctx, defaultFTCs, defaultObjs, defaultClusters, generators, clusterHandlers) + + ctx, cancel := context.WithCancel(ctx) + manager, _ := bootstrapFederatedInformerManagerWithFakeClients( + g, + ctx, + defaultFTCs, + defaultObjs, + defaultClusters, + generators, + clusterHandlers, + ) + defer func() { + cancel() + _ = wait.PollInfinite(time.Millisecond, func() (done bool, err error) { return manager.IsShutdown(), nil }) + }() // 2. Verify alwaysRegistered is eventually registered for all existing FTCs and clusters. - for range defaultClusters { - alwaysRegistered.ExpectGenerateEvents(deploymentFTC.Name, 1) - alwaysRegistered.ExpectGenerateEvents(configmapFTC.Name, 1) - alwaysRegistered.ExpectGenerateEvents(secretFTC.Name, 1) - alwaysRegistered.ExpectAddEvents(deploymentGVK, 1) - alwaysRegistered.ExpectAddEvents(configmapGVK, 1) - alwaysRegistered.ExpectAddEvents(secretGVK, 1) + for _, cluster := range defaultClusters { + for _, ftc := range defaultFTCs { + alwaysRegistered.ExpectGenerateEvents(ftc.Name, 1) + } + + for _, obj := range defaultObjs[cluster.Name] { + gvk := mustParseObject(obj) + alwaysRegistered.ExpectAddEvents(gvk, 1) + } } alwaysRegistered.AssertEventually(g, time.Second*2) @@ -318,47 +413,57 @@ func TestFederatedInformerManager(t *testing.T) { dynamicClient, exists := manager.GetClusterClient(cluster.Name) g.Expect(exists).To(gomega.BeTrue()) - _, err := dynamicClient.Resource(common.SecretGVR). - Namespace("default"). - Create(ctx, getTestSecret("sc-2", "default"), metav1.CreateOptions{}) - g.Expect(err).ToNot(gomega.HaveOccurred()) - alwaysRegistered.ExpectAddEvents(secretGVK, 1) - - _, err = dynamicClient.Resource(common.DeploymentGVR).Namespace("default").Update(ctx, dp1, metav1.UpdateOptions{}) - g.Expect(err).ToNot(gomega.HaveOccurred()) - alwaysRegistered.ExpectUpdateEvents(deploymentGVK, 1) - - err = dynamicClient.Resource(common.ConfigMapGVR).Namespace("default").Delete(ctx, cm1.GetName(), metav1.DeleteOptions{}) - g.Expect(err).ToNot(gomega.HaveOccurred()) - alwaysRegistered.ExpectDeleteEvents(configmapGVK, 1) + generateEvents( + ctx, + g, + getTestSecret("sc-2", "default"), + dynamicClient.Resource(common.SecretGVR), + alwaysRegistered, + ) + + generateEvents( + ctx, + g, + getTestSecret("dp-2", "default"), + dynamicClient.Resource(common.DeploymentGVR), + alwaysRegistered, + ) + + generateEvents( + ctx, + g, + getTestConfigMap("cm-2", "default"), + dynamicClient.Resource(common.ConfigMapGVR), + alwaysRegistered, + ) } - alwaysRegistered.AssertEventually(g, time.Second*1) + alwaysRegistered.AssertEventually(g, time.Second*2) // 5. Verify that events for non-existent FTCs are not received for _, cluster := range defaultClusters { dynamicClient, exists := manager.GetClusterClient(cluster.Name) g.Expect(exists).To(gomega.BeTrue()) - _, err := dynamicClient.Resource(common.DaemonSetGVR). - Namespace("default"). - Create(ctx, getTestDaemonSet("dm-1", "default"), metav1.CreateOptions{}) - g.Expect(err).ToNot(gomega.HaveOccurred()) + + generateEvents( + ctx, + g, + getTestDaemonSet("dm-1", "default"), + dynamicClient.Resource(common.DaemonSetGVR), + ) } alwaysRegistered.AssertConsistently(g, time.Second*2) - // 5. Verify neverRegsitered receives no events + // 5. Verify neverRegistered receives no events neverRegistered.AssertConsistently(g, time.Second*2) }) t.Run("event handlers for new FTCs should be registered eventually", func(t *testing.T) { t.Parallel() - g := gomega.NewWithT(t) - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() // 1. Bootstrap environment @@ -392,7 +497,22 @@ func TestFederatedInformerManager(t *testing.T) { }, } clusterHandlers := []*ClusterEventHandler{} - manager, fedClient := bootstrapFederatedInformerManagerWithFakeClients(g, ctx, defaultFTCs, defaultObjs, defaultClusters, generators, clusterHandlers) + + ctx, cancel := context.WithCancel(ctx) + manager, fedClient := bootstrapFederatedInformerManagerWithFakeClients( + g, + ctx, + defaultFTCs, + defaultObjs, + defaultClusters, + generators, + clusterHandlers, + ) + + defer func() { + cancel() + _ = wait.PollInfinite(time.Millisecond, func() (done bool, err error) { return manager.IsShutdown(), nil }) + }() // 2. Verify that alwaysRegistered is not registered initially for daemonset @@ -414,20 +534,17 @@ func TestFederatedInformerManager(t *testing.T) { // 5. Verify that newly generated events are also received by alwaysRegistered - dm1.SetAnnotations(map[string]string{"test": "test"}) - for _, cluster := range defaultClusters { dynamicClient, exists := manager.GetClusterClient(cluster.Name) g.Expect(exists).To(gomega.BeTrue()) - _, err = dynamicClient.Resource(common.DaemonSetGVR).Namespace("default").Update(ctx, dm1, metav1.UpdateOptions{}) - g.Expect(err).ToNot(gomega.HaveOccurred()) - alwaysRegistered.ExpectUpdateEvents(daemonsetGVK, 1) - - g.Expect(exists).To(gomega.BeTrue()) - err = dynamicClient.Resource(common.DaemonSetGVR).Namespace("default").Delete(ctx, dm4.GetName(), metav1.DeleteOptions{}) - g.Expect(err).ToNot(gomega.HaveOccurred()) - alwaysRegistered.ExpectDeleteEvents(daemonsetGVK, 1) + generateEvents( + ctx, + g, + getTestDaemonSet("dm-5", "default"), + dynamicClient.Resource(common.DaemonSetGVR), + alwaysRegistered, + ) } alwaysRegistered.AssertEventually(g, time.Second*2) @@ -437,11 +554,15 @@ func TestFederatedInformerManager(t *testing.T) { for _, cluster := range defaultClusters { dynamicClient, exists := manager.GetClusterClient(cluster.Name) g.Expect(exists).To(gomega.BeTrue()) - _, err = dynamicClient.Resource(common.SecretGVR). - Namespace("default"). - Create(ctx, getTestSecret("sc-1", "default"), metav1.CreateOptions{}) - g.Expect(err).ToNot(gomega.HaveOccurred()) + + generateEvents( + ctx, + g, + getTestSecret("sc-1", "default"), + dynamicClient.Resource(common.SecretGVR), + ) } + alwaysRegistered.AssertConsistently(g, time.Second*2) // 7. Verify that unregisteredResourceEventHandler is not registered @@ -451,10 +572,7 @@ func TestFederatedInformerManager(t *testing.T) { t.Run("event handlers for new clusters should be registered eventually", func(t *testing.T) { t.Parallel() - g := gomega.NewWithT(t) - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() // 1. Bootstrap environment @@ -484,7 +602,21 @@ func TestFederatedInformerManager(t *testing.T) { }, } clusterHandlers := []*ClusterEventHandler{} - manager, fedClient := bootstrapFederatedInformerManagerWithFakeClients(g, ctx, defaultFTCs, defaultObjs, defaultClusters, generators, clusterHandlers) + + ctx, cancel := context.WithCancel(ctx) + manager, fedClient := bootstrapFederatedInformerManagerWithFakeClients( + g, + ctx, + defaultFTCs, + defaultObjs, + defaultClusters, + generators, + clusterHandlers, + ) + defer func() { + cancel() + _ = wait.PollInfinite(time.Millisecond, func() (done bool, err error) { return manager.IsShutdown(), nil }) + }() // 2. Verify that alwaysRegistered is not registered initially since there are no clusters @@ -492,13 +624,25 @@ func TestFederatedInformerManager(t *testing.T) { // 3. Create new clusters - _, err := fedClient.CoreV1alpha1().FederatedClusters().Create(ctx, getTestCluster("cluster-1"), metav1.CreateOptions{}) + _, err := fedClient.CoreV1alpha1().FederatedClusters().Create( + ctx, + getTestCluster("cluster-1"), + metav1.CreateOptions{}, + ) g.Expect(err).ToNot(gomega.HaveOccurred()) - _, err = fedClient.CoreV1alpha1().FederatedClusters().Create(ctx, getTestCluster("cluster-2"), metav1.CreateOptions{}) + _, err = fedClient.CoreV1alpha1().FederatedClusters().Create( + ctx, + getTestCluster("cluster-2"), + metav1.CreateOptions{}, + ) g.Expect(err).ToNot(gomega.HaveOccurred()) - _, err = fedClient.CoreV1alpha1().FederatedClusters().Create(ctx, getTestCluster("cluster-3"), metav1.CreateOptions{}) + _, err = fedClient.CoreV1alpha1().FederatedClusters().Create( + ctx, + getTestCluster("cluster-3"), + metav1.CreateOptions{}, + ) g.Expect(err).ToNot(gomega.HaveOccurred()) // 4. Verify that alwaysRegistered is eventually registered for the new Daemonset FTC @@ -512,27 +656,17 @@ func TestFederatedInformerManager(t *testing.T) { // 5. Verify that newly generated events are also received by alwaysRegistered - dm1.SetAnnotations(map[string]string{"test": "test"}) - for _, cluster := range defaultClusters { dynamicClient, exists := manager.GetClusterClient(cluster.Name) g.Expect(exists).To(gomega.BeTrue()) - _, err = dynamicClient.Resource(common.DaemonSetGVR).Namespace("default").Update(ctx, dm1, metav1.UpdateOptions{}) - g.Expect(err).ToNot(gomega.HaveOccurred()) - alwaysRegistered.ExpectUpdateEvents(daemonsetGVK, 1) - - g.Expect(exists).To(gomega.BeTrue()) - _, err = dynamicClient.Resource(common.DaemonSetGVR). - Namespace("default"). - Create(ctx, getTestDaemonSet("dm-5", "default"), metav1.CreateOptions{}) - g.Expect(err).ToNot(gomega.HaveOccurred()) - alwaysRegistered.ExpectAddEvents(daemonsetGVK, 1) - - g.Expect(exists).To(gomega.BeTrue()) - err = dynamicClient.Resource(common.DaemonSetGVR).Namespace("default").Delete(ctx, dm4.GetName(), metav1.DeleteOptions{}) - g.Expect(err).ToNot(gomega.HaveOccurred()) - alwaysRegistered.ExpectDeleteEvents(daemonsetGVK, 1) + generateEvents( + ctx, + g, + getTestDaemonSet("dm-5", "default"), + dynamicClient.Resource(common.DaemonSetGVR), + alwaysRegistered, + ) } alwaysRegistered.AssertEventually(g, time.Second*2) @@ -543,10 +677,12 @@ func TestFederatedInformerManager(t *testing.T) { dynamicClient, exists := manager.GetClusterClient(cluster.Name) g.Expect(exists).To(gomega.BeTrue()) - _, err = dynamicClient.Resource(common.SecretGVR). - Namespace("default"). - Create(ctx, getTestSecret("sc-1", "default"), metav1.CreateOptions{}) - g.Expect(err).ToNot(gomega.HaveOccurred()) + generateEvents( + ctx, + g, + getTestSecret("sc-1", "default"), + dynamicClient.Resource(common.SecretGVR), + ) } alwaysRegistered.AssertConsistently(g, time.Second*2) @@ -558,10 +694,7 @@ func TestFederatedInformerManager(t *testing.T) { t.Run("event handler should receive correct lastApplied and latest FTCs", func(t *testing.T) { t.Parallel() - g := gomega.NewWithT(t) - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() // 1. Bootstrap environment @@ -602,7 +735,21 @@ func TestFederatedInformerManager(t *testing.T) { } generators := []*EventHandlerGenerator{generator} clusterHandlers := []*ClusterEventHandler{} - _, fedClient := bootstrapFederatedInformerManagerWithFakeClients(g, ctx, defaultFTCs, defaultObjs, defaultClusters, generators, clusterHandlers) + + ctx, cancel := context.WithCancel(ctx) + manager, fedClient := bootstrapFederatedInformerManagerWithFakeClients( + g, + ctx, + defaultFTCs, + defaultObjs, + defaultClusters, + generators, + clusterHandlers, + ) + defer func() { + cancel() + _ = wait.PollInfinite(time.Millisecond, func() (done bool, err error) { return manager.IsShutdown(), nil }) + }() for range defaultClusters { fn := <-assertionCh @@ -628,17 +775,14 @@ func TestFederatedInformerManager(t *testing.T) { t.Run("event handler should be registered on FTC update", func(t *testing.T) { t.Parallel() - g := gomega.NewWithT(t) - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() // 1. Bootstrap environment dp1 := getTestDeployment("dp-1", "default") ftc := deploymentFTC.DeepCopy() - ftc.SetAnnotations(map[string]string{"predicate": "false", "generator": "true"}) + ftc.SetAnnotations(map[string]string{"predicate": predicateFalse, "generator": predicateTrue}) handler := newCountingResourceEventHandler() generator := newAnnotationBasedGenerator(handler) @@ -656,7 +800,21 @@ func TestFederatedInformerManager(t *testing.T) { } generators := []*EventHandlerGenerator{generator} clusterHandlers := []*ClusterEventHandler{} - manager, fedClient := bootstrapFederatedInformerManagerWithFakeClients(g, ctx, defaultFTCs, defaultObjs, defaultClusters, generators, clusterHandlers) + + ctx, cancel := context.WithCancel(ctx) + manager, fedClient := bootstrapFederatedInformerManagerWithFakeClients( + g, + ctx, + defaultFTCs, + defaultObjs, + defaultClusters, + generators, + clusterHandlers, + ) + defer func() { + cancel() + _ = wait.PollInfinite(time.Millisecond, func() (done bool, err error) { return manager.IsShutdown(), nil }) + }() // 2. Verify that handler is not registered initially. @@ -664,7 +822,7 @@ func TestFederatedInformerManager(t *testing.T) { // 3. Update FTC to trigger registration - ftc.SetAnnotations(map[string]string{"predicate": "true", "generator": "true"}) + ftc.SetAnnotations(map[string]string{"predicate": predicateTrue, "generator": predicateTrue}) ftc, err := fedClient.CoreV1alpha1().FederatedTypeConfigs().Update(ctx, ftc, metav1.UpdateOptions{}) g.Expect(err).ToNot(gomega.HaveOccurred()) @@ -679,20 +837,13 @@ func TestFederatedInformerManager(t *testing.T) { dynamicClient, exists := manager.GetClusterClient(cluster.Name) g.Expect(exists).To(gomega.BeTrue()) - dp2, err := dynamicClient.Resource(common.DeploymentGVR). - Namespace("default"). - Create(ctx, getTestDeployment("dp-2", "default"), metav1.CreateOptions{}) - g.Expect(err).ToNot(gomega.HaveOccurred()) - handler.ExpectAddEvents(deploymentGVK, 1) - - dp2.SetAnnotations(map[string]string{"test-annotation": "test-value"}) - dp2, err = dynamicClient.Resource(common.DeploymentGVR).Namespace("default").Update(ctx, dp2, metav1.UpdateOptions{}) - g.Expect(err).ToNot(gomega.HaveOccurred()) - handler.ExpectUpdateEvents(deploymentGVK, 1) - - err = dynamicClient.Resource(common.DeploymentGVR).Namespace("default").Delete(ctx, dp2.GetName(), metav1.DeleteOptions{}) - g.Expect(err).ToNot(gomega.HaveOccurred()) - handler.ExpectDeleteEvents(deploymentGVK, 1) + generateEvents( + ctx, + g, + getTestDeployment("dp-2", "default"), + dynamicClient.Resource(common.DeploymentGVR), + handler, + ) } handler.AssertEventually(g, time.Second*2) @@ -700,17 +851,14 @@ func TestFederatedInformerManager(t *testing.T) { t.Run("event handler should be unregistered on FTC update", func(t *testing.T) { t.Parallel() - g := gomega.NewWithT(t) - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() // 1. Bootstrap environment dp1 := getTestDeployment("dp-1", "default") ftc := deploymentFTC.DeepCopy() - ftc.SetAnnotations(map[string]string{"predicate": "true", "generator": "true"}) + ftc.SetAnnotations(map[string]string{"predicate": predicateTrue, "generator": predicateTrue}) handler := newCountingResourceEventHandler() generator := newAnnotationBasedGenerator(handler) @@ -728,7 +876,21 @@ func TestFederatedInformerManager(t *testing.T) { } generators := []*EventHandlerGenerator{generator} clusterHandlers := []*ClusterEventHandler{} - manager, fedClient := bootstrapFederatedInformerManagerWithFakeClients(g, ctx, defaultFTCs, defaultObjs, defaultClusters, generators, clusterHandlers) + + ctx, cancel := context.WithCancel(ctx) + manager, fedClient := bootstrapFederatedInformerManagerWithFakeClients( + g, + ctx, + defaultFTCs, + defaultObjs, + defaultClusters, + generators, + clusterHandlers, + ) + defer func() { + cancel() + _ = wait.PollInfinite(time.Millisecond, func() (done bool, err error) { return manager.IsShutdown(), nil }) + }() // 2. Verify that handler is registered initially. @@ -738,8 +900,8 @@ func TestFederatedInformerManager(t *testing.T) { // 3. Update FTC to trigger unregistration - ftc.SetAnnotations(map[string]string{"predicate": "true", "generator": "false"}) - ftc, err := fedClient.CoreV1alpha1().FederatedTypeConfigs().Update(ctx, ftc, metav1.UpdateOptions{}) + ftc.SetAnnotations(map[string]string{"predicate": predicateTrue, "generator": predicateFalse}) + _, err := fedClient.CoreV1alpha1().FederatedTypeConfigs().Update(ctx, ftc, metav1.UpdateOptions{}) g.Expect(err).ToNot(gomega.HaveOccurred()) <-time.After(time.Second) @@ -750,17 +912,12 @@ func TestFederatedInformerManager(t *testing.T) { dynamicClient, exists := manager.GetClusterClient(cluster.Name) g.Expect(exists).To(gomega.BeTrue()) - dp2, err := dynamicClient.Resource(common.DeploymentGVR). - Namespace("default"). - Create(ctx, getTestDeployment("dp-2", "default"), metav1.CreateOptions{}) - g.Expect(err).ToNot(gomega.HaveOccurred()) - - dp2.SetAnnotations(map[string]string{"test": "test"}) - _, err = dynamicClient.Resource(common.DeploymentGVR).Namespace("default").Update(ctx, dp2, metav1.UpdateOptions{}) - g.Expect(err).ToNot(gomega.HaveOccurred()) - - err = dynamicClient.Resource(common.DeploymentGVR).Namespace("default").Delete(ctx, dp2.GetName(), metav1.DeleteOptions{}) - g.Expect(err).ToNot(gomega.HaveOccurred()) + generateEvents( + ctx, + g, + getTestDeployment("dp-2", "default"), + dynamicClient.Resource(common.DeploymentGVR), + ) } handler.AssertConsistently(g, time.Second*2) @@ -768,10 +925,7 @@ func TestFederatedInformerManager(t *testing.T) { t.Run("event handler should be re-registered on FTC update", func(t *testing.T) { t.Parallel() - g := gomega.NewWithT(t) - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() // 1. Bootstrap environment @@ -797,7 +951,21 @@ func TestFederatedInformerManager(t *testing.T) { } generators := []*EventHandlerGenerator{generator} clusterHandlers := []*ClusterEventHandler{} - manager, fedClient := bootstrapFederatedInformerManagerWithFakeClients(g, ctx, defaultFTCs, defaultObjs, defaultClusters, generators, clusterHandlers) + + ctx, cancel := context.WithCancel(ctx) + manager, fedClient := bootstrapFederatedInformerManagerWithFakeClients( + g, + ctx, + defaultFTCs, + defaultObjs, + defaultClusters, + generators, + clusterHandlers, + ) + defer func() { + cancel() + _ = wait.PollInfinite(time.Millisecond, func() (done bool, err error) { return manager.IsShutdown(), nil }) + }() // 2. Verify that handler is registered initially @@ -820,9 +988,14 @@ func TestFederatedInformerManager(t *testing.T) { for _, cluster := range defaultClusters { dynamicClient, exists := manager.GetClusterClient(cluster.Name) g.Expect(exists).To(gomega.BeTrue()) - _, err = dynamicClient.Resource(common.DeploymentGVR).Namespace("default").Update(ctx, dp1, metav1.UpdateOptions{}) - g.Expect(err).ToNot(gomega.HaveOccurred()) - handler.ExpectUpdateEvents(deploymentGVK, 1) + + generateEvents( + ctx, + g, + getTestDeployment("dp-2", "default"), + dynamicClient.Resource(common.DeploymentGVR), + handler, + ) } handler.AssertEventually(g, time.Second*2) @@ -830,10 +1003,7 @@ func TestFederatedInformerManager(t *testing.T) { t.Run("event handler should remain unchanged on FTC update", func(t *testing.T) { t.Parallel() - g := gomega.NewWithT(t) - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() // 1. Bootstrap environment @@ -859,7 +1029,21 @@ func TestFederatedInformerManager(t *testing.T) { } generators := []*EventHandlerGenerator{generator} clusterHandlers := []*ClusterEventHandler{} - manager, fedClient := bootstrapFederatedInformerManagerWithFakeClients(g, ctx, defaultFTCs, defaultObjs, defaultClusters, generators, clusterHandlers) + + ctx, cancel := context.WithCancel(ctx) + manager, fedClient := bootstrapFederatedInformerManagerWithFakeClients( + g, + ctx, + defaultFTCs, + defaultObjs, + defaultClusters, + generators, + clusterHandlers, + ) + defer func() { + cancel() + _ = wait.PollInfinite(time.Millisecond, func() (done bool, err error) { return manager.IsShutdown(), nil }) + }() // 2. Verify that handler is registered initially @@ -882,9 +1066,14 @@ func TestFederatedInformerManager(t *testing.T) { for _, cluster := range defaultClusters { dynamicClient, exists := manager.GetClusterClient(cluster.Name) g.Expect(exists).To(gomega.BeTrue()) - _, err = dynamicClient.Resource(common.DeploymentGVR).Namespace("default").Update(ctx, dp1, metav1.UpdateOptions{}) - g.Expect(err).ToNot(gomega.HaveOccurred()) - handler.ExpectUpdateEvents(deploymentGVK, 1) + + generateEvents( + ctx, + g, + getTestDeployment("dp-2", "default"), + dynamicClient.Resource(common.DeploymentGVR), + handler, + ) } handler.AssertEventually(g, time.Second*2) @@ -892,10 +1081,7 @@ func TestFederatedInformerManager(t *testing.T) { t.Run("event handler should be unregistered on FTC delete", func(t *testing.T) { t.Parallel() - g := gomega.NewWithT(t) - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() // 1. Bootstrap environment @@ -927,24 +1113,35 @@ func TestFederatedInformerManager(t *testing.T) { } generators := []*EventHandlerGenerator{generator1, generator2} clusterHandlers := []*ClusterEventHandler{} - manager, fedClient := bootstrapFederatedInformerManagerWithFakeClients(g, ctx, defaultFTCs, defaultObjs, defaultClusters, generators, clusterHandlers) - // 2. Verify that handler1 and handler2 is registered initially for all FTCs + ctx, cancel := context.WithCancel(ctx) + manager, fedClient := bootstrapFederatedInformerManagerWithFakeClients( + g, + ctx, + defaultFTCs, + defaultObjs, + defaultClusters, + generators, + clusterHandlers, + ) + defer func() { + cancel() + _ = wait.PollInfinite(time.Millisecond, func() (done bool, err error) { return manager.IsShutdown(), nil }) + }() - for range defaultClusters { - handler1.ExpectGenerateEvents(deploymentFTC.Name, 1) - handler1.ExpectGenerateEvents(configmapFTC.Name, 1) - handler1.ExpectGenerateEvents(secretFTC.Name, 1) - handler1.ExpectAddEvents(deploymentGVK, 1) - handler1.ExpectAddEvents(configmapGVK, 1) - handler1.ExpectAddEvents(secretGVK, 1) - - handler2.ExpectGenerateEvents(deploymentFTC.Name, 1) - handler2.ExpectGenerateEvents(configmapFTC.Name, 1) - handler2.ExpectGenerateEvents(secretFTC.Name, 1) - handler2.ExpectAddEvents(deploymentGVK, 1) - handler2.ExpectAddEvents(configmapGVK, 1) - handler2.ExpectAddEvents(secretGVK, 1) + // 2. Verify that handler1 and handler2 is registered initially for all FTCs and clusters + + for _, cluster := range defaultClusters { + for _, ftc := range defaultFTCs { + handler1.ExpectGenerateEvents(ftc.Name, 1) + handler2.ExpectGenerateEvents(ftc.Name, 1) + } + + for _, obj := range defaultObjs[cluster.Name] { + gvk := mustParseObject(obj) + handler1.ExpectAddEvents(gvk, 1) + handler2.ExpectAddEvents(gvk, 1) + } } handler1.AssertEventually(g, time.Second*2) @@ -952,7 +1149,11 @@ func TestFederatedInformerManager(t *testing.T) { // 3. Delete the deployment FTC - err := fedClient.CoreV1alpha1().FederatedTypeConfigs().Delete(ctx, deploymentFTC.GetName(), metav1.DeleteOptions{}) + err := fedClient.CoreV1alpha1().FederatedTypeConfigs().Delete( + ctx, + deploymentFTC.GetName(), + metav1.DeleteOptions{}, + ) g.Expect(err).ToNot(gomega.HaveOccurred()) <-time.After(time.Second) @@ -963,17 +1164,12 @@ func TestFederatedInformerManager(t *testing.T) { dynamicClient, exists := manager.GetClusterClient(cluster.Name) g.Expect(exists).To(gomega.BeTrue()) - dp2, err := dynamicClient.Resource(common.DeploymentGVR). - Namespace("default"). - Create(ctx, getTestDeployment("dp-2", "default"), metav1.CreateOptions{}) - g.Expect(err).ToNot(gomega.HaveOccurred()) - - dp2.SetAnnotations(map[string]string{"test": "test"}) - _, err = dynamicClient.Resource(common.DeploymentGVR).Namespace("default").Update(ctx, dp2, metav1.UpdateOptions{}) - g.Expect(err).ToNot(gomega.HaveOccurred()) - - err = dynamicClient.Resource(common.DeploymentGVR).Namespace("default").Delete(ctx, dp2.GetName(), metav1.DeleteOptions{}) - g.Expect(err).ToNot(gomega.HaveOccurred()) + generateEvents( + ctx, + g, + getTestDeployment("dp-2", "default"), + dynamicClient.Resource(common.DeploymentGVR), + ) } handler1.AssertConsistently(g, time.Second*2) @@ -985,18 +1181,23 @@ func TestFederatedInformerManager(t *testing.T) { dynamicClient, exists := manager.GetClusterClient(cluster.Name) g.Expect(exists).To(gomega.BeTrue()) - _, err = dynamicClient.Resource(common.SecretGVR). - Namespace("default"). - Create(ctx, getTestSecret("sc-2", "default"), metav1.CreateOptions{}) - g.Expect(err).ToNot(gomega.HaveOccurred()) - handler1.ExpectAddEvents(secretGVK, 1) - handler2.ExpectAddEvents(secretGVK, 1) - - cm1.SetAnnotations(map[string]string{"test": "test"}) - _, err = dynamicClient.Resource(common.ConfigMapGVR).Namespace("default").Update(ctx, cm1, metav1.UpdateOptions{}) - g.Expect(err).ToNot(gomega.HaveOccurred()) - handler1.ExpectUpdateEvents(configmapGVK, 1) - handler2.ExpectUpdateEvents(configmapGVK, 1) + generateEvents( + ctx, + g, + getTestSecret("sc-2", "default"), + dynamicClient.Resource(common.SecretGVR), + handler1, + handler2, + ) + + generateEvents( + ctx, + g, + getTestConfigMap("cm-2", "default"), + dynamicClient.Resource(common.ConfigMapGVR), + handler1, + handler2, + ) } handler1.AssertEventually(g, time.Second*2) @@ -1005,10 +1206,7 @@ func TestFederatedInformerManager(t *testing.T) { t.Run("event handler should be unregistered on cluster delete", func(t *testing.T) { t.Parallel() - g := gomega.NewWithT(t) - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() // 1. Bootstrap environment @@ -1040,24 +1238,35 @@ func TestFederatedInformerManager(t *testing.T) { } generators := []*EventHandlerGenerator{generator1, generator2} clusterHandlers := []*ClusterEventHandler{} - manager, fedClient := bootstrapFederatedInformerManagerWithFakeClients(g, ctx, defaultFTCs, defaultObjs, defaultClusters, generators, clusterHandlers) + + ctx, cancel := context.WithCancel(ctx) + manager, fedClient := bootstrapFederatedInformerManagerWithFakeClients( + g, + ctx, + defaultFTCs, + defaultObjs, + defaultClusters, + generators, + clusterHandlers, + ) + defer func() { + cancel() + _ = wait.PollInfinite(time.Millisecond, func() (done bool, err error) { return manager.IsShutdown(), nil }) + }() // 2. Verify that handler1 and handler2 is registered initially for all FTCs and clusters - for range defaultClusters { - handler1.ExpectGenerateEvents(deploymentFTC.Name, 1) - handler1.ExpectGenerateEvents(configmapFTC.Name, 1) - handler1.ExpectGenerateEvents(secretFTC.Name, 1) - handler1.ExpectAddEvents(deploymentGVK, 1) - handler1.ExpectAddEvents(configmapGVK, 1) - handler1.ExpectAddEvents(secretGVK, 1) - - handler2.ExpectGenerateEvents(deploymentFTC.Name, 1) - handler2.ExpectGenerateEvents(configmapFTC.Name, 1) - handler2.ExpectGenerateEvents(secretFTC.Name, 1) - handler2.ExpectAddEvents(deploymentGVK, 1) - handler2.ExpectAddEvents(configmapGVK, 1) - handler2.ExpectAddEvents(secretGVK, 1) + for _, cluster := range defaultClusters { + for _, ftc := range defaultFTCs { + handler1.ExpectGenerateEvents(ftc.Name, 1) + handler2.ExpectGenerateEvents(ftc.Name, 1) + } + + for _, obj := range defaultObjs[cluster.Name] { + gvk := mustParseObject(obj) + handler1.ExpectAddEvents(gvk, 1) + handler2.ExpectAddEvents(gvk, 1) + } } handler1.AssertEventually(g, time.Second*2) @@ -1077,17 +1286,12 @@ func TestFederatedInformerManager(t *testing.T) { g.Expect(exists).To(gomega.BeTrue()) - dp2, err := dynamicClient.Resource(common.DeploymentGVR). - Namespace("default"). - Create(ctx, getTestDeployment("dp-2", "default"), metav1.CreateOptions{}) - g.Expect(err).ToNot(gomega.HaveOccurred()) - - dp2.SetAnnotations(map[string]string{"test": "test"}) - _, err = dynamicClient.Resource(common.DeploymentGVR).Namespace("default").Update(ctx, dp2, metav1.UpdateOptions{}) - g.Expect(err).ToNot(gomega.HaveOccurred()) - - err = dynamicClient.Resource(common.DeploymentGVR).Namespace("default").Delete(ctx, dp2.GetName(), metav1.DeleteOptions{}) - g.Expect(err).ToNot(gomega.HaveOccurred()) + generateEvents( + ctx, + g, + getTestDeployment("dp-2", "default"), + dynamicClient.Resource(common.DeploymentGVR), + ) handler1.AssertConsistently(g, time.Second*2) handler2.AssertConsistently(g, time.Second*2) @@ -1098,18 +1302,14 @@ func TestFederatedInformerManager(t *testing.T) { dynamicClient, exists := manager.GetClusterClient(cluster) g.Expect(exists).To(gomega.BeTrue()) - _, err = dynamicClient.Resource(common.SecretGVR). - Namespace("default"). - Create(ctx, getTestSecret("sc-2", "default"), metav1.CreateOptions{}) - g.Expect(err).ToNot(gomega.HaveOccurred()) - handler1.ExpectAddEvents(secretGVK, 1) - handler2.ExpectAddEvents(secretGVK, 1) - - cm1.SetAnnotations(map[string]string{"test": "test"}) - _, err = dynamicClient.Resource(common.ConfigMapGVR).Namespace("default").Update(ctx, cm1, metav1.UpdateOptions{}) - g.Expect(err).ToNot(gomega.HaveOccurred()) - handler1.ExpectUpdateEvents(configmapGVK, 1) - handler2.ExpectUpdateEvents(configmapGVK, 1) + generateEvents( + ctx, + g, + getTestDeployment("dp-2", "default"), + dynamicClient.Resource(common.DeploymentGVR), + handler1, + handler2, + ) } handler1.AssertEventually(g, time.Second*2) @@ -1118,10 +1318,7 @@ func TestFederatedInformerManager(t *testing.T) { t.Run("ClusterEventHandlers should receive correct old and new clusters", func(t *testing.T) { t.Parallel() - g := gomega.NewWithT(t) - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() // 1. Bootstrap environment @@ -1135,7 +1332,7 @@ func TestFederatedInformerManager(t *testing.T) { assertionCh := make(chan func()) cluster := getTestCluster("cluster-1") - cluster.SetAnnotations(map[string]string{"predicate": "true"}) + cluster.SetAnnotations(map[string]string{"predicate": predicateTrue}) cluster.SetGeneration(1) clusterHandler := &ClusterEventHandler{ @@ -1152,7 +1349,7 @@ func TestFederatedInformerManager(t *testing.T) { } } - return newCluster.GetAnnotations()["predicate"] == "true" + return newCluster.GetAnnotations()["predicate"] == predicateTrue }, Callback: func(cluster *fedcorev1a1.FederatedCluster) { callBackCount++ @@ -1164,8 +1361,22 @@ func TestFederatedInformerManager(t *testing.T) { defaultClusters := []*fedcorev1a1.FederatedCluster{} generators := []*EventHandlerGenerator{} clusterHandlers := []*ClusterEventHandler{clusterHandler} - _, fedClient := bootstrapFederatedInformerManagerWithFakeClients(g, ctx, defaultFTCs, defaultObjs, defaultClusters, generators, clusterHandlers) - + + ctx, cancel := context.WithCancel(ctx) + manager, fedClient := bootstrapFederatedInformerManagerWithFakeClients( + g, + ctx, + defaultFTCs, + defaultObjs, + defaultClusters, + generators, + clusterHandlers, + ) + defer func() { + cancel() + _ = wait.PollInfinite(time.Millisecond, func() (done bool, err error) { return manager.IsShutdown(), nil }) + }() + // 2. Create cluster cluster, err := fedClient.CoreV1alpha1().FederatedClusters().Create(ctx, cluster, metav1.CreateOptions{}) @@ -1181,10 +1392,10 @@ func TestFederatedInformerManager(t *testing.T) { generation++ cluster.SetGeneration(generation) - if i % 2 == 0 { - cluster.SetAnnotations(map[string]string{"predicate": "false"}) + if i%2 == 0 { + cluster.SetAnnotations(map[string]string{"predicate": predicateFalse}) } else { - cluster.SetAnnotations(map[string]string{"predicate": "true"}) + cluster.SetAnnotations(map[string]string{"predicate": predicateTrue}) expectedCallbackCount++ } @@ -1234,10 +1445,8 @@ func bootstrapFederatedInformerManagerWithFakeClients( dynamicObjects := []runtime.Object{} clusterObjects := objects[cluster.Name] - if clusterObjects != nil { - for _, object := range clusterObjects { - dynamicObjects = append(dynamicObjects, runtime.Object(object)) - } + for _, object := range clusterObjects { + dynamicObjects = append(dynamicObjects, runtime.Object(object)) } return dynamicfake.NewSimpleDynamicClient(scheme, dynamicObjects...), nil diff --git a/pkg/util/informermanager/informermanager.go b/pkg/util/informermanager/informermanager.go index be631d35..9c5afe37 100644 --- a/pkg/util/informermanager/informermanager.go +++ b/pkg/util/informermanager/informermanager.go @@ -1,3 +1,19 @@ +/* +Copyright 2023 The KubeAdmiral Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + package informermanager import ( @@ -18,7 +34,7 @@ import ( fedcorev1a1 "github.com/kubewharf/kubeadmiral/pkg/apis/core/v1alpha1" fedcorev1a1informers "github.com/kubewharf/kubeadmiral/pkg/client/informers/externalversions/core/v1alpha1" - "github.com/kubewharf/kubeadmiral/pkg/client/listers/core/v1alpha1" + fedcorev1a1listers "github.com/kubewharf/kubeadmiral/pkg/client/listers/core/v1alpha1" schemautil "github.com/kubewharf/kubeadmiral/pkg/controllers/util/schema" "github.com/kubewharf/kubeadmiral/pkg/util/bijection" ) @@ -26,7 +42,8 @@ import ( type informerManager struct { lock sync.RWMutex - started bool + started bool + shutdown bool client dynamic.Interface ftcInformer fedcorev1a1informers.FederatedTypeConfigInformer @@ -239,14 +256,14 @@ func (m *informerManager) AddEventHandlerGenerator(generator *EventHandlerGenera defer m.lock.Unlock() if m.started { - return fmt.Errorf("InformerManager is already started.") + return fmt.Errorf("failed to add EventHandlerGenerator: InformerManager is already started") } m.eventHandlerGenerators = append(m.eventHandlerGenerators, generator) return nil } -func (m *informerManager) GetFederatedTypeConfigLister() v1alpha1.FederatedTypeConfigLister { +func (m *informerManager) GetFederatedTypeConfigLister() fedcorev1a1listers.FederatedTypeConfigLister { return m.ftcInformer.Lister() } @@ -298,9 +315,19 @@ func (m *informerManager) Start(ctx context.Context) { go func() { <-ctx.Done() + m.lock.Lock() + defer m.lock.Unlock() + logger.V(2).Info("Stopping InformerManager") m.queue.ShutDown() + m.shutdown = true }() } +func (m *informerManager) IsShutdown() bool { + m.lock.RLock() + defer m.lock.RUnlock() + return m.shutdown +} + var _ InformerManager = &informerManager{} diff --git a/pkg/util/informermanager/informermanager_test.go b/pkg/util/informermanager/informermanager_test.go index dcdc5558..17ac4650 100644 --- a/pkg/util/informermanager/informermanager_test.go +++ b/pkg/util/informermanager/informermanager_test.go @@ -1,8 +1,23 @@ +/* +Copyright 2023 The KubeAdmiral Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + package informermanager import ( "context" - "flag" "testing" "time" @@ -12,11 +27,12 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime" - "k8s.io/klog/v2" - + "k8s.io/apimachinery/pkg/util/wait" dynamicclient "k8s.io/client-go/dynamic" dynamicfake "k8s.io/client-go/dynamic/fake" "k8s.io/client-go/tools/cache" + "k8s.io/klog/v2" + "k8s.io/klog/v2/ktesting" fedcorev1a1 "github.com/kubewharf/kubeadmiral/pkg/apis/core/v1alpha1" fedclient "github.com/kubewharf/kubeadmiral/pkg/client/clientset/versioned" @@ -26,26 +42,26 @@ import ( schemautil "github.com/kubewharf/kubeadmiral/pkg/controllers/util/schema" ) -func init() { - flags := flag.NewFlagSet("", flag.ContinueOnError) - klog.InitFlags(flags) - flags.Set("v", "2") -} - func TestInformerManager(t *testing.T) { + t.Parallel() + ctx := klog.NewContext(context.Background(), ktesting.NewLogger(t, ktesting.NewConfig(ktesting.Verbosity(2)))) + t.Run("listers for existing FTCs should be available eventually", func(t *testing.T) { t.Parallel() - g := gomega.NewWithT(t) - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() // 1. Bootstrap environment defaultFTCs := []*fedcorev1a1.FederatedTypeConfig{deploymentFTC, configmapFTC, secretFTC} defaultObjs := []*unstructured.Unstructured{} generators := []*EventHandlerGenerator{} - manager, _, _ := boostrapInformerManagerWithFakeClients(g, ctx, defaultFTCs, defaultObjs, generators) + + ctx, cancel := context.WithCancel(ctx) + manager, _, _ := bootstrapInformerManagerWithFakeClients(g, ctx, defaultFTCs, defaultObjs, generators) + defer func() { + cancel() + _ = wait.PollInfinite(time.Millisecond, func() (done bool, err error) { return manager.IsShutdown(), nil }) + }() // 2. Verify that the listers for each FTC is eventually available @@ -71,17 +87,20 @@ func TestInformerManager(t *testing.T) { t.Run("listers for new FTC should be available eventually", func(t *testing.T) { t.Parallel() - g := gomega.NewWithT(t) - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() // 1. Bootstrap environment defaultFTCs := []*fedcorev1a1.FederatedTypeConfig{} defaultObjs := []*unstructured.Unstructured{} generators := []*EventHandlerGenerator{} - manager, _, fedClient := boostrapInformerManagerWithFakeClients(g, ctx, defaultFTCs, defaultObjs, generators) + + ctx, cancel := context.WithCancel(ctx) + manager, _, fedClient := bootstrapInformerManagerWithFakeClients(g, ctx, defaultFTCs, defaultObjs, generators) + defer func() { + cancel() + _ = wait.PollInfinite(time.Millisecond, func() (done bool, err error) { return manager.IsShutdown(), nil }) + }() ftc := daemonsetFTC apiresource := ftc.GetSourceType() @@ -113,10 +132,7 @@ func TestInformerManager(t *testing.T) { t.Run("event handlers for existing FTCs should be registered eventually", func(t *testing.T) { t.Parallel() - g := gomega.NewWithT(t) - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() // 1. Bootstrap environment @@ -140,57 +156,73 @@ func TestInformerManager(t *testing.T) { }, } - _, dynamicClient, _ := boostrapInformerManagerWithFakeClients(g, ctx, defaultFTCs, defaultObjs, generators) + ctx, cancel := context.WithCancel(ctx) + manager, dynamicClient, _ := bootstrapInformerManagerWithFakeClients(g, ctx, defaultFTCs, defaultObjs, generators) + defer func() { + cancel() + _ = wait.PollInfinite(time.Millisecond, func() (done bool, err error) { return manager.IsShutdown(), nil }) + }() // 2. Verify alwaysRegistered is eventually registered for all existing FTCs. - alwaysRegistered.ExpectGenerateEvents(deploymentFTC.Name, 1) - alwaysRegistered.ExpectGenerateEvents(configmapFTC.Name, 1) - alwaysRegistered.ExpectGenerateEvents(secretFTC.Name, 1) - alwaysRegistered.ExpectAddEvents(deploymentGVK, 1) - alwaysRegistered.ExpectAddEvents(configmapGVK, 1) - alwaysRegistered.ExpectAddEvents(secretGVK, 1) - alwaysRegistered.AssertEventually(g, time.Second*2) + for _, ftc := range defaultFTCs { + alwaysRegistered.ExpectGenerateEvents(ftc.Name, 1) + } - // 3. Verify newly generated events are received by alwaysRegistered + for _, obj := range defaultObjs { + gvk := mustParseObject(obj) + alwaysRegistered.ExpectAddEvents(gvk, 1) + } - _, err := dynamicClient.Resource(common.SecretGVR). - Namespace("default"). - Create(ctx, getTestSecret("sc-2", "default"), metav1.CreateOptions{}) - g.Expect(err).ToNot(gomega.HaveOccurred()) - alwaysRegistered.ExpectAddEvents(secretGVK, 1) + alwaysRegistered.AssertEventually(g, time.Second*2) - dp1.SetAnnotations(map[string]string{"test": "test"}) - _, err = dynamicClient.Resource(common.DeploymentGVR).Namespace("default").Update(ctx, dp1, metav1.UpdateOptions{}) - g.Expect(err).ToNot(gomega.HaveOccurred()) - alwaysRegistered.ExpectUpdateEvents(deploymentGVK, 1) + // 3. Verify newly generated events are received by alwaysRegistered - err = dynamicClient.Resource(common.ConfigMapGVR).Namespace("default").Delete(ctx, cm1.GetName(), metav1.DeleteOptions{}) - g.Expect(err).ToNot(gomega.HaveOccurred()) - alwaysRegistered.ExpectDeleteEvents(configmapGVK, 1) + generateEvents( + ctx, + g, + getTestSecret("sc-2", "default"), + dynamicClient.Resource(common.SecretGVR), + alwaysRegistered, + ) + + generateEvents( + ctx, + g, + getTestDeployment("dp-2", "default"), + dynamicClient.Resource(common.DeploymentGVR), + alwaysRegistered, + ) + + generateEvents( + ctx, + g, + getTestConfigMap("cm-2", "default"), + dynamicClient.Resource(common.ConfigMapGVR), + alwaysRegistered, + ) alwaysRegistered.AssertEventually(g, time.Second*2) // 4. Verify that events for non-existent FTCs are not received - _, err = dynamicClient.Resource(common.DaemonSetGVR). - Namespace("default"). - Create(ctx, getTestDaemonSet("dm-1", "default"), metav1.CreateOptions{}) - g.Expect(err).ToNot(gomega.HaveOccurred()) + generateEvents( + ctx, + g, + getTestDaemonSet("dm-1", "default"), + dynamicClient.Resource(common.DaemonSetGVR), + ) alwaysRegistered.AssertConsistently(g, time.Second*2) - // 5. Verify neverRegsitered receives no events + // 5. Verify neverRegistered receives no events neverRegistered.AssertConsistently(g, time.Second*2) }) t.Run("event handlers for new FTCs should be registered eventually", func(t *testing.T) { t.Parallel() - g := gomega.NewWithT(t) - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() // 1. Bootstrap environment @@ -215,7 +247,12 @@ func TestInformerManager(t *testing.T) { }, } - _, dynamicClient, fedClient := boostrapInformerManagerWithFakeClients(g, ctx, defaultFTCs, defaultObjs, generators) + ctx, cancel := context.WithCancel(ctx) + manager, dynamicClient, fedClient := bootstrapInformerManagerWithFakeClients(g, ctx, defaultFTCs, defaultObjs, generators) + defer func() { + cancel() + _ = wait.PollInfinite(time.Millisecond, func() (done bool, err error) { return manager.IsShutdown(), nil }) + }() // 2. Verify that alwaysRegistered is not registered initially for daemonset @@ -234,23 +271,25 @@ func TestInformerManager(t *testing.T) { // 5. Verify that newly generated events are also received by alwaysRegistered - dm1.SetAnnotations(map[string]string{"test": "test"}) - _, err = dynamicClient.Resource(common.DaemonSetGVR).Namespace("default").Update(ctx, dm1, metav1.UpdateOptions{}) - g.Expect(err).ToNot(gomega.HaveOccurred()) - alwaysRegistered.ExpectUpdateEvents(daemonsetGVK, 1) - - err = dynamicClient.Resource(common.DaemonSetGVR).Namespace("default").Delete(ctx, dm4.GetName(), metav1.DeleteOptions{}) - g.Expect(err).ToNot(gomega.HaveOccurred()) - alwaysRegistered.ExpectDeleteEvents(daemonsetGVK, 1) + generateEvents( + ctx, + g, + getTestDaemonSet("dm-5", "default"), + dynamicClient.Resource(common.DaemonSetGVR), + alwaysRegistered, + ) alwaysRegistered.AssertEventually(g, time.Second*2) // 6. Verify that events for non-existent FTCs are not received by alwaysRegistered - _, err = dynamicClient.Resource(common.SecretGVR). - Namespace("default"). - Create(ctx, getTestSecret("sc-1", "default"), metav1.CreateOptions{}) - g.Expect(err).ToNot(gomega.HaveOccurred()) + generateEvents( + ctx, + g, + getTestSecret("sc-1", "default"), + dynamicClient.Resource(common.SecretGVR), + ) + alwaysRegistered.AssertConsistently(g, time.Second*2) // 7. Verify that unregisteredResourceEventHandler is not registered @@ -260,10 +299,7 @@ func TestInformerManager(t *testing.T) { t.Run("EventHandlerGenerators should receive correct lastApplied and latest FTCs", func(t *testing.T) { t.Parallel() - g := gomega.NewWithT(t) - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() // 1. Bootstrap environment @@ -299,7 +335,13 @@ func TestInformerManager(t *testing.T) { defaultFTCs := []*fedcorev1a1.FederatedTypeConfig{ftc} defaultObjs := []*unstructured.Unstructured{} generators := []*EventHandlerGenerator{generator} - _, _, fedClient := boostrapInformerManagerWithFakeClients(g, ctx, defaultFTCs, defaultObjs, generators) + + ctx, cancel := context.WithCancel(ctx) + manager, _, fedClient := bootstrapInformerManagerWithFakeClients(g, ctx, defaultFTCs, defaultObjs, generators) + defer func() { + cancel() + _ = wait.PollInfinite(time.Millisecond, func() (done bool, err error) { return manager.IsShutdown(), nil }) + }() fn := <-assertionCh fn() @@ -321,17 +363,14 @@ func TestInformerManager(t *testing.T) { t.Run("event handler should be registered on FTC update", func(t *testing.T) { t.Parallel() - g := gomega.NewWithT(t) - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() // 1. Bootstrap environment dp1 := getTestDeployment("dp-1", "default") ftc := deploymentFTC.DeepCopy() - ftc.SetAnnotations(map[string]string{"predicate": "false", "generator": "true"}) + ftc.SetAnnotations(map[string]string{"predicate": predicateFalse, "generator": predicateTrue}) handler := newCountingResourceEventHandler() generator := newAnnotationBasedGenerator(handler) @@ -340,7 +379,12 @@ func TestInformerManager(t *testing.T) { defaultObjs := []*unstructured.Unstructured{dp1} generators := []*EventHandlerGenerator{generator} - _, dynamicClient, fedClient := boostrapInformerManagerWithFakeClients(g, ctx, defaultFTCs, defaultObjs, generators) + ctx, cancel := context.WithCancel(ctx) + manager, dynamicClient, fedClient := bootstrapInformerManagerWithFakeClients(g, ctx, defaultFTCs, defaultObjs, generators) + defer func() { + cancel() + _ = wait.PollInfinite(time.Millisecond, func() (done bool, err error) { return manager.IsShutdown(), nil }) + }() // 2. Verify that handler is not registered initially. @@ -348,7 +392,7 @@ func TestInformerManager(t *testing.T) { // 3. Update FTC to trigger registration - ftc.SetAnnotations(map[string]string{"predicate": "true", "generator": "true"}) + ftc.SetAnnotations(map[string]string{"predicate": predicateTrue, "generator": predicateTrue}) ftc, err := fedClient.CoreV1alpha1().FederatedTypeConfigs().Update(ctx, ftc, metav1.UpdateOptions{}) g.Expect(err).ToNot(gomega.HaveOccurred()) @@ -359,37 +403,27 @@ func TestInformerManager(t *testing.T) { handler.AssertEventually(g, time.Second*2) - dp2, err := dynamicClient.Resource(common.DeploymentGVR). - Namespace("default"). - Create(ctx, getTestDeployment("dp-2", "default"), metav1.CreateOptions{}) - g.Expect(err).ToNot(gomega.HaveOccurred()) - handler.ExpectAddEvents(deploymentGVK, 1) - - dp2.SetAnnotations(map[string]string{"test-annotation": "test-value"}) - dp2, err = dynamicClient.Resource(common.DeploymentGVR).Namespace("default").Update(ctx, dp2, metav1.UpdateOptions{}) - g.Expect(err).ToNot(gomega.HaveOccurred()) - handler.ExpectUpdateEvents(deploymentGVK, 1) - - err = dynamicClient.Resource(common.DeploymentGVR).Namespace("default").Delete(ctx, dp2.GetName(), metav1.DeleteOptions{}) - g.Expect(err).ToNot(gomega.HaveOccurred()) - handler.ExpectDeleteEvents(deploymentGVK, 1) + generateEvents( + ctx, + g, + getTestDeployment("dp-2", "default"), + dynamicClient.Resource(common.DeploymentGVR), + handler, + ) handler.AssertEventually(g, time.Second*2) }) t.Run("event handler should be unregistered on FTC update", func(t *testing.T) { t.Parallel() - g := gomega.NewWithT(t) - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() // 1. Bootstrap environment dp1 := getTestDeployment("dp-1", "default") ftc := deploymentFTC.DeepCopy() - ftc.SetAnnotations(map[string]string{"predicate": "true", "generator": "true"}) + ftc.SetAnnotations(map[string]string{"predicate": predicateTrue, "generator": predicateTrue}) handler := newCountingResourceEventHandler() generator := newAnnotationBasedGenerator(handler) @@ -398,7 +432,12 @@ func TestInformerManager(t *testing.T) { defaultObjs := []*unstructured.Unstructured{dp1} generators := []*EventHandlerGenerator{generator} - _, dynamicClient, fedClient := boostrapInformerManagerWithFakeClients(g, ctx, defaultFTCs, defaultObjs, generators) + ctx, cancel := context.WithCancel(ctx) + manager, dynamicClient, fedClient := bootstrapInformerManagerWithFakeClients(g, ctx, defaultFTCs, defaultObjs, generators) + defer func() { + cancel() + _ = wait.PollInfinite(time.Millisecond, func() (done bool, err error) { return manager.IsShutdown(), nil }) + }() // 2. Verify that handler is registered initially. @@ -408,35 +447,27 @@ func TestInformerManager(t *testing.T) { // 3. Update FTC to trigger unregistration - ftc.SetAnnotations(map[string]string{"predicate": "true", "generator": "false"}) - ftc, err := fedClient.CoreV1alpha1().FederatedTypeConfigs().Update(ctx, ftc, metav1.UpdateOptions{}) + ftc.SetAnnotations(map[string]string{"predicate": predicateTrue, "generator": predicateFalse}) + _, err := fedClient.CoreV1alpha1().FederatedTypeConfigs().Update(ctx, ftc, metav1.UpdateOptions{}) g.Expect(err).ToNot(gomega.HaveOccurred()) <-time.After(time.Second) // 4. Verify that handler is unregistered and new events are no longer received by handler. - dp2, err := dynamicClient.Resource(common.DeploymentGVR). - Namespace("default"). - Create(ctx, getTestDeployment("dp-2", "default"), metav1.CreateOptions{}) - g.Expect(err).ToNot(gomega.HaveOccurred()) - - dp2.SetAnnotations(map[string]string{"test": "test"}) - _, err = dynamicClient.Resource(common.DeploymentGVR).Namespace("default").Update(ctx, dp2, metav1.UpdateOptions{}) - g.Expect(err).ToNot(gomega.HaveOccurred()) - - err = dynamicClient.Resource(common.DeploymentGVR).Namespace("default").Delete(ctx, dp2.GetName(), metav1.DeleteOptions{}) - g.Expect(err).ToNot(gomega.HaveOccurred()) + generateEvents( + ctx, + g, + getTestDeployment("dp-2", "default"), + dynamicClient.Resource(common.DeploymentGVR), + ) handler.AssertConsistently(g, time.Second*2) }) t.Run("event handler should be re-registered on FTC update", func(t *testing.T) { t.Parallel() - g := gomega.NewWithT(t) - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() // 1. Bootstrap environment @@ -453,7 +484,12 @@ func TestInformerManager(t *testing.T) { defaultObjs := []*unstructured.Unstructured{dp1} generators := []*EventHandlerGenerator{generator} - _, dynamicClient, fedClient := boostrapInformerManagerWithFakeClients(g, ctx, defaultFTCs, defaultObjs, generators) + ctx, cancel := context.WithCancel(ctx) + manager, dynamicClient, fedClient := bootstrapInformerManagerWithFakeClients(g, ctx, defaultFTCs, defaultObjs, generators) + defer func() { + cancel() + _ = wait.PollInfinite(time.Millisecond, func() (done bool, err error) { return manager.IsShutdown(), nil }) + }() // 2. Verify that handler is registered initially @@ -471,20 +507,20 @@ func TestInformerManager(t *testing.T) { handler.ExpectAddEvents(deploymentGVK, 1) handler.AssertEventually(g, time.Second*2) - dp1.SetAnnotations(map[string]string{"test": "test"}) - _, err = dynamicClient.Resource(common.DeploymentGVR).Namespace("default").Update(ctx, dp1, metav1.UpdateOptions{}) - g.Expect(err).ToNot(gomega.HaveOccurred()) + generateEvents( + ctx, + g, + getTestDeployment("dp-2", "default"), + dynamicClient.Resource(common.DeploymentGVR), + handler, + ) - handler.ExpectUpdateEvents(deploymentGVK, 1) handler.AssertEventually(g, time.Second*2) }) t.Run("event handler should be unchanged on FTC update", func(t *testing.T) { t.Parallel() - g := gomega.NewWithT(t) - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() // 1. Bootstrap environment @@ -501,7 +537,12 @@ func TestInformerManager(t *testing.T) { defaultObjs := []*unstructured.Unstructured{dp1} generators := []*EventHandlerGenerator{generator} - _, dynamicClient, fedClient := boostrapInformerManagerWithFakeClients(g, ctx, defaultFTCs, defaultObjs, generators) + ctx, cancel := context.WithCancel(ctx) + manager, dynamicClient, fedClient := bootstrapInformerManagerWithFakeClients(g, ctx, defaultFTCs, defaultObjs, generators) + defer func() { + cancel() + _ = wait.PollInfinite(time.Millisecond, func() (done bool, err error) { return manager.IsShutdown(), nil }) + }() // 2. Verify that handler is registered initially @@ -519,20 +560,20 @@ func TestInformerManager(t *testing.T) { // 4. Verify events are still received by handler - dp1.SetAnnotations(map[string]string{"test": "test"}) - _, err = dynamicClient.Resource(common.DeploymentGVR).Namespace("default").Update(ctx, dp1, metav1.UpdateOptions{}) - g.Expect(err).ToNot(gomega.HaveOccurred()) + generateEvents( + ctx, + g, + getTestDeployment("dp-2", "default"), + dynamicClient.Resource(common.DeploymentGVR), + handler, + ) - handler.ExpectUpdateEvents(deploymentGVK, 1) handler.AssertEventually(g, time.Second*2) }) - t.Run("event handler should be unregisterd on FTC delete", func(t *testing.T) { + t.Run("event handler should be unregistered on FTC delete", func(t *testing.T) { t.Parallel() - g := gomega.NewWithT(t) - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() // 1. Bootstrap environment @@ -555,24 +596,27 @@ func TestInformerManager(t *testing.T) { defaultObjs := []*unstructured.Unstructured{dp1, cm1, sc1} generators := []*EventHandlerGenerator{generator1, generator2} - _, dynamicClient, fedClient := boostrapInformerManagerWithFakeClients(g, ctx, defaultFTCs, defaultObjs, generators) + ctx, cancel := context.WithCancel(ctx) + manager, dynamicClient, fedClient := bootstrapInformerManagerWithFakeClients(g, ctx, defaultFTCs, defaultObjs, generators) + defer func() { + cancel() + _ = wait.PollInfinite(time.Millisecond, func() (done bool, err error) { return manager.IsShutdown(), nil }) + }() // 2. Verify that handler1 and handler2 is registered initially for all FTCs - handler1.ExpectGenerateEvents(deploymentFTC.Name, 1) - handler1.ExpectGenerateEvents(configmapFTC.Name, 1) - handler1.ExpectGenerateEvents(secretFTC.Name, 1) - handler1.ExpectAddEvents(deploymentGVK, 1) - handler1.ExpectAddEvents(configmapGVK, 1) - handler1.ExpectAddEvents(secretGVK, 1) - handler1.AssertEventually(g, time.Second*2) + for _, ftc := range defaultFTCs { + handler1.ExpectGenerateEvents(ftc.Name, 1) + handler2.ExpectGenerateEvents(ftc.Name, 1) + } - handler2.ExpectGenerateEvents(deploymentFTC.Name, 1) - handler2.ExpectGenerateEvents(configmapFTC.Name, 1) - handler2.ExpectGenerateEvents(secretFTC.Name, 1) - handler2.ExpectAddEvents(deploymentGVK, 1) - handler2.ExpectAddEvents(configmapGVK, 1) - handler2.ExpectAddEvents(secretGVK, 1) + for _, obj := range defaultObjs { + gvk := mustParseObject(obj) + handler1.ExpectAddEvents(gvk, 1) + handler2.ExpectAddEvents(gvk, 1) + } + + handler1.AssertEventually(g, time.Second*2) handler2.AssertEventually(g, time.Second*2) // 3. Delete the deployment FTC @@ -584,35 +628,35 @@ func TestInformerManager(t *testing.T) { // 4. Verify that handler1 and handler2 is unregistered for deployments and no additional events are received - dp2, err := dynamicClient.Resource(common.DeploymentGVR). - Namespace("default"). - Create(ctx, getTestDeployment("dp-2", "default"), metav1.CreateOptions{}) - g.Expect(err).ToNot(gomega.HaveOccurred()) - - dp2.SetAnnotations(map[string]string{"test": "test"}) - _, err = dynamicClient.Resource(common.DeploymentGVR).Namespace("default").Update(ctx, dp2, metav1.UpdateOptions{}) - g.Expect(err).ToNot(gomega.HaveOccurred()) - - err = dynamicClient.Resource(common.DeploymentGVR).Namespace("default").Delete(ctx, dp2.GetName(), metav1.DeleteOptions{}) - g.Expect(err).ToNot(gomega.HaveOccurred()) + generateEvents( + ctx, + g, + getTestDeployment("dp-2", "default"), + dynamicClient.Resource(common.DeploymentGVR), + ) handler1.AssertConsistently(g, time.Second*2) handler2.AssertConsistently(g, time.Second*2) // 5. Verify that handler1 and handler2 is not unregistered for other FTCs. - _, err = dynamicClient.Resource(common.SecretGVR). - Namespace("default"). - Create(ctx, getTestSecret("sc-2", "default"), metav1.CreateOptions{}) - g.Expect(err).ToNot(gomega.HaveOccurred()) - handler1.ExpectAddEvents(secretGVK, 1) - handler2.ExpectAddEvents(secretGVK, 1) - - cm1.SetAnnotations(map[string]string{"test": "test"}) - _, err = dynamicClient.Resource(common.ConfigMapGVR).Namespace("default").Update(ctx, cm1, metav1.UpdateOptions{}) - g.Expect(err).ToNot(gomega.HaveOccurred()) - handler1.ExpectUpdateEvents(configmapGVK, 1) - handler2.ExpectUpdateEvents(configmapGVK, 1) + generateEvents( + ctx, + g, + getTestSecret("sc-2", "default"), + dynamicClient.Resource(common.SecretGVR), + handler1, + handler2, + ) + + generateEvents( + ctx, + g, + getTestSecret("cm-2", "default"), + dynamicClient.Resource(common.ConfigMapGVR), + handler1, + handler2, + ) handler1.AssertEventually(g, time.Second*2) handler2.AssertEventually(g, time.Second*2) @@ -620,10 +664,7 @@ func TestInformerManager(t *testing.T) { t.Run("event handlers should be unregistered on manager shutdown", func(t *testing.T) { t.Parallel() - g := gomega.NewWithT(t) - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() // 1. Bootstrap environment @@ -647,24 +688,28 @@ func TestInformerManager(t *testing.T) { generators := []*EventHandlerGenerator{generator1, generator2} managerCtx, managerCancel := context.WithCancel(ctx) - _, dynamicClient, _ := boostrapInformerManagerWithFakeClients(g, managerCtx, defaultFTCs, defaultObjs, generators) + + ctx, cancel := context.WithCancel(ctx) + manager, dynamicClient, _ := bootstrapInformerManagerWithFakeClients(g, managerCtx, defaultFTCs, defaultObjs, generators) + defer func() { + cancel() + _ = wait.PollInfinite(time.Millisecond, func() (done bool, err error) { return manager.IsShutdown(), nil }) + }() // 2. Verify that handler1 and handler2 is registered initially for all FTCs - handler1.ExpectGenerateEvents(deploymentFTC.Name, 1) - handler1.ExpectGenerateEvents(configmapFTC.Name, 1) - handler1.ExpectGenerateEvents(secretFTC.Name, 1) - handler1.ExpectAddEvents(deploymentGVK, 1) - handler1.ExpectAddEvents(configmapGVK, 1) - handler1.ExpectAddEvents(secretGVK, 1) - handler1.AssertEventually(g, time.Second*2) + for _, ftc := range defaultFTCs { + handler1.ExpectGenerateEvents(ftc.Name, 1) + handler2.ExpectGenerateEvents(ftc.Name, 1) + } + + for _, obj := range defaultObjs { + gvk := mustParseObject(obj) + handler1.ExpectAddEvents(gvk, 1) + handler2.ExpectAddEvents(gvk, 1) + } - handler2.ExpectGenerateEvents(deploymentFTC.Name, 1) - handler2.ExpectGenerateEvents(configmapFTC.Name, 1) - handler2.ExpectGenerateEvents(secretFTC.Name, 1) - handler2.ExpectAddEvents(deploymentGVK, 1) - handler2.ExpectAddEvents(configmapGVK, 1) - handler2.ExpectAddEvents(secretGVK, 1) + handler1.AssertEventually(g, time.Second*2) handler2.AssertEventually(g, time.Second*2) // 3. Shutdown the manager @@ -674,24 +719,33 @@ func TestInformerManager(t *testing.T) { // 4. Verify that handler1 and handler2 is unregistered for all FTCs and no more events are received - _, err := dynamicClient.Resource(common.DeploymentGVR). - Namespace("default"). - Create(ctx, getTestDeployment("dp-2", "default"), metav1.CreateOptions{}) - g.Expect(err).ToNot(gomega.HaveOccurred()) - - err = dynamicClient.Resource(common.ConfigMapGVR).Namespace("default").Delete(ctx, cm1.GetName(), metav1.DeleteOptions{}) - g.Expect(err).ToNot(gomega.HaveOccurred()) - - sc1.SetAnnotations(map[string]string{"test": "test"}) - _, err = dynamicClient.Resource(common.SecretGVR).Namespace("default").Update(ctx, sc1, metav1.UpdateOptions{}) - g.Expect(err).ToNot(gomega.HaveOccurred()) + generateEvents( + ctx, + g, + getTestDeployment("dp-2", "default"), + dynamicClient.Resource(common.DeploymentGVR), + ) + + generateEvents( + ctx, + g, + getTestDeployment("sc-2", "default"), + dynamicClient.Resource(common.SecretGVR), + ) + + generateEvents( + ctx, + g, + getTestDeployment("cm-2", "default"), + dynamicClient.Resource(common.ConfigMapGVR), + ) handler1.AssertConsistently(g, time.Second*2) handler2.AssertConsistently(g, time.Second*2) }) } -func boostrapInformerManagerWithFakeClients( +func bootstrapInformerManagerWithFakeClients( g *gomega.WithT, ctx context.Context, ftcs []*fedcorev1a1.FederatedTypeConfig, @@ -723,7 +777,8 @@ func boostrapInformerManagerWithFakeClients( informerManager := NewInformerManager(dynamicClient, factory.Core().V1alpha1().FederatedTypeConfigs()) for _, generator := range eventHandlerGenerators { - informerManager.AddEventHandlerGenerator(generator) + err := informerManager.AddEventHandlerGenerator(generator) + g.Expect(err).ToNot(gomega.HaveOccurred()) } factory.Start(ctx.Done()) diff --git a/pkg/util/informermanager/interface.go b/pkg/util/informermanager/interface.go index 0fd1b72d..d9d15910 100644 --- a/pkg/util/informermanager/interface.go +++ b/pkg/util/informermanager/interface.go @@ -1,3 +1,19 @@ +/* +Copyright 2023 The KubeAdmiral Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + package informermanager import ( @@ -43,6 +59,9 @@ type InformerManager interface { // Starts processing FederatedTypeConfig events. Start(ctx context.Context) + + // Returns true if the InformerManager is gracefully shutdown. + IsShutdown() bool } // ClusterEventHandler can be registered by controllers to hook into the cluster events received by the @@ -93,6 +112,9 @@ type FederatedInformerManager interface { // Starts processing FederatedTypeConfig and FederatedCluster events. Start(ctx context.Context) + + // Returns true if the InformerManager is gracefully shutdown. + IsShutdown() bool } // ClusterClientGetter is used by the FederatedInformerManager to create clients for joined member clusters. @@ -102,5 +124,5 @@ type ClusterClientGetter struct { // cluster. ConnectionHash func(cluster *fedcorev1a1.FederatedCluster) ([]byte, error) // ClientGetter returns a dynamic client for the given member cluster. - ClientGetter func(cluster *fedcorev1a1.FederatedCluster) (dynamic.Interface, error) + ClientGetter func(cluster *fedcorev1a1.FederatedCluster) (dynamic.Interface, error) } diff --git a/pkg/util/informermanager/testutils_test.go b/pkg/util/informermanager/testutils_test.go index dde4c05b..9694840f 100644 --- a/pkg/util/informermanager/testutils_test.go +++ b/pkg/util/informermanager/testutils_test.go @@ -1,12 +1,30 @@ +/* +Copyright 2023 The KubeAdmiral Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + package informermanager import ( + "context" "fmt" "path" goruntime "runtime" "sync" "time" + "github.com/onsi/gomega" appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1" @@ -15,10 +33,10 @@ import ( "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/util/rand" + dynamicclient "k8s.io/client-go/dynamic" "k8s.io/client-go/tools/cache" fedcorev1a1 "github.com/kubewharf/kubeadmiral/pkg/apis/core/v1alpha1" - "github.com/onsi/gomega" ) var ( @@ -83,8 +101,6 @@ var ( var ( deploymentGVK = appsv1.SchemeGroupVersion.WithKind("Deployment") daemonsetGVK = appsv1.SchemeGroupVersion.WithKind("DaemonSet") - configmapGVK = corev1.SchemeGroupVersion.WithKind("ConfigMap") - secretGVK = corev1.SchemeGroupVersion.WithKind("Secret") ) func getTestDeployment(name, namespace string) *unstructured.Unstructured { @@ -226,65 +242,84 @@ type countingResourceEventHandler struct { func (h *countingResourceEventHandler) ExpectGenerateEvents(ftcName string, n int) { h.lock.Lock() defer h.lock.Unlock() - h.expectedGenerateCount[ftcName] = h.expectedGenerateCount[ftcName] + n + h.expectedGenerateCount[ftcName] += n } func (h *countingResourceEventHandler) ExpectAddEvents(gvk schema.GroupVersionKind, n int) { h.lock.Lock() defer h.lock.Unlock() - h.expectedAddEventCount[gvk] = h.expectedAddEventCount[gvk] + n + h.expectedAddEventCount[gvk] += n } func (h *countingResourceEventHandler) ExpectUpdateEvents(gvk schema.GroupVersionKind, n int) { h.lock.Lock() defer h.lock.Unlock() - h.expectedUpdateEventCount[gvk] = h.expectedUpdateEventCount[gvk] + n + h.expectedUpdateEventCount[gvk] += n } func (h *countingResourceEventHandler) ExpectDeleteEvents(gvk schema.GroupVersionKind, n int) { h.lock.Lock() defer h.lock.Unlock() - h.expectedDeleteEventCount[gvk] = h.expectedDeleteEventCount[gvk] + n + h.expectedDeleteEventCount[gvk] += n } -func (h *countingResourceEventHandler) Assert(g gomega.Gomega) { - _, file, no, _ := goruntime.Caller(1) - callerInfo := fmt.Sprintf("%s:%d", path.Base(file), no) - +func (h *countingResourceEventHandler) assert(g gomega.Gomega, callerInfo string) { for ftc := range h.expectedGenerateCount { - g.Expect(h.generateCount[ftc]). - To(gomega.BeNumerically("==", h.expectedGenerateCount[ftc]), "%s: incorrect number of generate events for %s", callerInfo, ftc) + g.Expect(h.generateCount[ftc]).To( + gomega.BeNumerically("==", h.expectedGenerateCount[ftc]), + "%s: incorrect number of generate events for %s", + callerInfo, + ftc, + ) } for gvk := range h.expectedAddEventCount { - g.Expect(h.addEventCount[gvk]). - To(gomega.BeNumerically("==", h.expectedAddEventCount[gvk]), "%s: incorrect number of add events for %s", callerInfo, gvk) + g.Expect(h.addEventCount[gvk]).To( + gomega.BeNumerically("==", h.expectedAddEventCount[gvk]), + "%s: incorrect number of add events for %s", + callerInfo, + gvk, + ) } for gvk := range h.expectedUpdateEventCount { - g.Expect(h.updateEventCount[gvk]). - To(gomega.BeNumerically("==", h.expectedUpdateEventCount[gvk]), "%s: incorrect number of update events for %s", callerInfo, gvk) + g.Expect(h.updateEventCount[gvk]).To( + gomega.BeNumerically("==", h.expectedUpdateEventCount[gvk]), + "%s: incorrect number of update events for %s", + callerInfo, + gvk, + ) } for gvk := range h.expectedDeleteEventCount { - g.Expect(h.deleteEventCount[gvk]). - To(gomega.BeNumerically("==", h.expectedDeleteEventCount[gvk]), "%s: incorrect number of delete events for %s", callerInfo, gvk) + g.Expect(h.deleteEventCount[gvk]).To( + gomega.BeNumerically("==", h.expectedDeleteEventCount[gvk]), + "%s: incorrect number of delete events for %s", + callerInfo, + gvk, + ) } } func (h *countingResourceEventHandler) AssertEventually(g gomega.Gomega, timeout time.Duration) { + _, file, no, _ := goruntime.Caller(1) + callerInfo := fmt.Sprintf("%s:%d", path.Base(file), no) + g.Eventually(func(g gomega.Gomega) { - h.Assert(g) + h.assert(g, callerInfo) }).WithTimeout(timeout).Should(gomega.Succeed()) } func (h *countingResourceEventHandler) AssertConsistently(g gomega.Gomega, timeout time.Duration) { + _, file, no, _ := goruntime.Caller(1) + callerInfo := fmt.Sprintf("%s:%d", path.Base(file), no) + g.Consistently(func(g gomega.Gomega) { - h.Assert(g) + h.assert(g, callerInfo) }).WithTimeout(timeout).Should(gomega.Succeed()) } func (h *countingResourceEventHandler) GenerateEventHandler(ftc *fedcorev1a1.FederatedTypeConfig) cache.ResourceEventHandler { h.lock.Lock() defer h.lock.Unlock() - h.generateCount[ftc.Name] = h.generateCount[ftc.Name] + 1 + h.generateCount[ftc.Name]++ return h } @@ -292,27 +327,27 @@ func (h *countingResourceEventHandler) OnAdd(obj interface{}) { h.lock.Lock() defer h.lock.Unlock() - gvk := h.mustParseObject(obj) - h.addEventCount[gvk] = h.addEventCount[gvk] + 1 + gvk := mustParseObject(obj) + h.addEventCount[gvk]++ } func (h *countingResourceEventHandler) OnDelete(obj interface{}) { h.lock.Lock() defer h.lock.Unlock() - gvk := h.mustParseObject(obj) - h.deleteEventCount[gvk] = h.deleteEventCount[gvk] + 1 + gvk := mustParseObject(obj) + h.deleteEventCount[gvk]++ } func (h *countingResourceEventHandler) OnUpdate(_ interface{}, obj interface{}) { h.lock.Lock() defer h.lock.Unlock() - gvk := h.mustParseObject(obj) - h.updateEventCount[gvk] = h.updateEventCount[gvk] + 1 + gvk := mustParseObject(obj) + h.updateEventCount[gvk]++ } -func (h *countingResourceEventHandler) mustParseObject(obj interface{}) schema.GroupVersionKind { +func mustParseObject(obj interface{}) schema.GroupVersionKind { uns := obj.(*unstructured.Unstructured) gv, err := schema.ParseGroupVersion(uns.GetAPIVersion()) if err != nil { @@ -335,6 +370,11 @@ func registerOncePredicate(old, _ *fedcorev1a1.FederatedTypeConfig) bool { return old == nil } +var ( + predicateTrue = "true" + predicateFalse = "false" +) + func newAnnotationBasedGenerator(handler *countingResourceEventHandler) *EventHandlerGenerator { return &EventHandlerGenerator{ Predicate: func(_, latest *fedcorev1a1.FederatedTypeConfig) bool { @@ -348,3 +388,32 @@ func newAnnotationBasedGenerator(handler *countingResourceEventHandler) *EventHa }, } } + +func generateEvents( + ctx context.Context, + g *gomega.WithT, + obj *unstructured.Unstructured, + client dynamicclient.NamespaceableResourceInterface, + handlers ...*countingResourceEventHandler, +) { + gvk := mustParseObject(obj) + + obj, err := client.Namespace(obj.GetNamespace()).Create(ctx, obj, metav1.CreateOptions{}) + g.Expect(err).ToNot(gomega.HaveOccurred()) + for _, handler := range handlers { + handler.ExpectAddEvents(gvk, 1) + } + + obj.SetAnnotations(map[string]string{"test": "test"}) + _, err = client.Namespace(obj.GetNamespace()).Update(ctx, obj, metav1.UpdateOptions{}) + g.Expect(err).ToNot(gomega.HaveOccurred()) + for _, handler := range handlers { + handler.ExpectUpdateEvents(gvk, 1) + } + + err = client.Namespace(obj.GetNamespace()).Delete(ctx, obj.GetName(), metav1.DeleteOptions{}) + g.Expect(err).ToNot(gomega.HaveOccurred()) + for _, handler := range handlers { + handler.ExpectDeleteEvents(gvk, 1) + } +} From 50591a5893cf8ab621389cb334ac9d0560dc8546 Mon Sep 17 00:00:00 2001 From: "hawjia.lim" Date: Wed, 12 Jul 2023 18:01:37 +0800 Subject: [PATCH 025/173] reset rate limiter on succesful worker reconciles --- pkg/util/informermanager/federatedinformermanager.go | 9 +++++++-- pkg/util/informermanager/informermanager.go | 9 +++++++-- 2 files changed, 14 insertions(+), 4 deletions(-) diff --git a/pkg/util/informermanager/federatedinformermanager.go b/pkg/util/informermanager/federatedinformermanager.go index 3e5cbf99..d199725c 100644 --- a/pkg/util/informermanager/federatedinformermanager.go +++ b/pkg/util/informermanager/federatedinformermanager.go @@ -133,7 +133,8 @@ func (m *federatedInformerManager) worker(ctx context.Context) { if err := m.processClusterDeletion(ctx, name); err != nil { logger.Error(err, "Failed to process FederatedCluster, will retry") m.queue.AddRateLimited(key) - return + } else { + m.queue.Forget(key) } return } @@ -142,12 +143,16 @@ func (m *federatedInformerManager) worker(ctx context.Context) { if err != nil { if needReenqueue { logger.Error(err, "Failed to process FederatedCluster, will retry") + m.queue.AddRateLimited(key) } else { logger.Error(err, "Failed to process FederatedCluster") } + return } + + m.queue.Forget(key) if needReenqueue { - m.queue.AddRateLimited(key) + m.queue.Add(key) } } diff --git a/pkg/util/informermanager/informermanager.go b/pkg/util/informermanager/informermanager.go index 9c5afe37..f7b080d3 100644 --- a/pkg/util/informermanager/informermanager.go +++ b/pkg/util/informermanager/informermanager.go @@ -116,7 +116,8 @@ func (m *informerManager) worker(ctx context.Context) { if err := m.processFTCDeletion(ctx, name); err != nil { logger.Error(err, "Failed to process FederatedTypeConfig, will retry") m.queue.AddRateLimited(key) - return + } else { + m.queue.Forget(key) } return } @@ -130,12 +131,16 @@ func (m *informerManager) worker(ctx context.Context) { if err != nil { if needReenqueue { logger.Error(err, "Failed to process FederatedTypeConfig, will retry") + m.queue.AddRateLimited(key) } else { logger.Error(err, "Failed to process FederatedTypeConfig") } + return } + + m.queue.Forget(key) if needReenqueue { - m.queue.AddRateLimited(key) + m.queue.Add(key) } } From f7edcf6bfc1103a2bc0a4e82cf8141ba742467f3 Mon Sep 17 00:00:00 2001 From: "hawjia.lim" Date: Wed, 12 Jul 2023 18:38:29 +0800 Subject: [PATCH 026/173] fix race --- .../extensions_federatedtypeconfig.go | 10 + pkg/controllers/util/overrides.go | 232 ++++++++++++++++++ .../federatedinformermanager_test.go | 13 +- pkg/util/informermanager/informermanager.go | 4 +- .../informermanager/informermanager_test.go | 7 +- pkg/util/informermanager/testutils_test.go | 3 + 6 files changed, 252 insertions(+), 17 deletions(-) create mode 100644 pkg/controllers/util/overrides.go diff --git a/pkg/apis/core/v1alpha1/extensions_federatedtypeconfig.go b/pkg/apis/core/v1alpha1/extensions_federatedtypeconfig.go index a2e3b3b5..f1634890 100644 --- a/pkg/apis/core/v1alpha1/extensions_federatedtypeconfig.go +++ b/pkg/apis/core/v1alpha1/extensions_federatedtypeconfig.go @@ -23,6 +23,7 @@ package v1alpha1 import ( apiextv1beta1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" ) const ( @@ -45,6 +46,15 @@ func (f *FederatedTypeConfig) GetSourceType() metav1.APIResource { return apiResourceToMeta(f.Spec.SourceType) } +func (f *FederatedTypeConfig) GetSourceTypeGVR() schema.GroupVersionResource { + apiResource := f.GetSourceType() + return schema.GroupVersionResource{ + Group: apiResource.Group, + Version: apiResource.Version, + Resource: apiResource.Name, + } +} + func (f *FederatedTypeConfig) GetStatusCollectionEnabled() bool { return f.Spec.StatusCollection != nil } diff --git a/pkg/controllers/util/overrides.go b/pkg/controllers/util/overrides.go new file mode 100644 index 00000000..6d185fb9 --- /dev/null +++ b/pkg/controllers/util/overrides.go @@ -0,0 +1,232 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +This file may have been modified by The KubeAdmiral Authors +("KubeAdmiral Modifications"). All KubeAdmiral Modifications +are Copyright 2023 The KubeAdmiral Authors. +*/ + +package util + +//import ( +// "encoding/json" +// "sort" + +// jsonpatch "github.com/evanphx/json-patch" +// "github.com/pkg/errors" +// "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" +// "k8s.io/apimachinery/pkg/util/sets" + +// fedtypesv1a1 "github.com/kubewharf/kubeadmiral/pkg/apis/types/v1alpha1" +// "github.com/kubewharf/kubeadmiral/pkg/controllers/common" +//) + +//// Namespace and name may not be overridden since these fields are the +//// primary mechanism of association between a federated resource in +//// the host cluster and the target resources in the member clusters. +//// +//// Kind should always be sourced from the FTC and not vary across +//// member clusters. +//// +//// apiVersion can be overridden to support managing resources like +//// Ingress which can exist in different groups at different +//// versions. Users will need to take care not to abuse this +//// capability. +//var invalidPaths = sets.NewString( +// "/metadata/namespace", +// "/metadata/name", +// "/metadata/generateName", +// "/kind", +//) + +//// Mapping of clusterName to overrides for the cluster +//type OverridesMap map[string]fedtypesv1a1.OverridePatches + +//func UnmarshalGenericOverrides(uns *unstructured.Unstructured) (*fedtypesv1a1.GenericObjectWithOverrides, error) { +// obj := &fedtypesv1a1.GenericObjectWithOverrides{} +// err := UnstructuredToInterface(uns, obj) +// if err != nil { +// return nil, err +// } +// return obj, nil +//} + +//// GetOverrides returns a map of overrides populated from the given +//// unstructured object. +//func GetOverrides(rawObj *unstructured.Unstructured, controller string) (OverridesMap, error) { +// overridesMap := make(OverridesMap) + +// if rawObj == nil { +// return overridesMap, nil +// } + +// overrideObj, err := UnmarshalGenericOverrides(rawObj) +// if err != nil { +// return nil, err +// } + +// if overrideObj.Spec == nil || overrideObj.Spec.Overrides == nil { +// // No overrides defined for the federated type +// return overridesMap, nil +// } + +// overrides := overrideObj.Spec.Overrides +// var clusterOverrides []fedtypesv1a1.ClusterOverride +// for i := range overrides { +// if overrides[i].Controller == controller { +// clusterOverrides = overrides[i].Clusters +// break +// } +// } + +// if clusterOverrides == nil { +// return overridesMap, nil +// } + +// for _, overrideItem := range clusterOverrides { +// clusterName := overrideItem.ClusterName +// if _, ok := overridesMap[clusterName]; ok { +// return nil, errors.Errorf("cluster %q appears more than once", clusterName) +// } + +// for i, pathEntry := range overrideItem.Patches { +// path := pathEntry.Path +// if invalidPaths.Has(path) { +// return nil, errors.Errorf("override[%d] for cluster %q has an invalid path: %s", i, clusterName, path) +// } +// } +// overridesMap[clusterName] = overrideItem.Patches +// } + +// return overridesMap, nil +//} + +//// SetOverrides sets the spec.overrides field of the unstructured +//// object from the provided overrides map. +//// +//// This function takes ownership of the `overridesMap` and may mutate it arbitrarily. +//func SetOverrides(uns *unstructured.Unstructured, controller string, overridesMap OverridesMap) error { +// for clusterName, clusterOverrides := range overridesMap { +// if len(clusterOverrides) == 0 { +// delete(overridesMap, clusterName) +// } +// } + +// overrideObj, err := UnmarshalGenericOverrides(uns) +// if err != nil { +// return err +// } + +// index := -1 +// for i, overrides := range overrideObj.Spec.Overrides { +// if overrides.Controller == controller { +// index = i +// break +// } +// } + +// if len(overridesMap) == 0 { +// // delete index +// if index != -1 { +// overrideObj.Spec.Overrides = append(overrideObj.Spec.Overrides[:index], overrideObj.Spec.Overrides[(index+1):]...) +// } +// } else { +// if index == -1 { +// index = len(overrideObj.Spec.Overrides) +// overrideObj.Spec.Overrides = append(overrideObj.Spec.Overrides, fedtypesv1a1.ControllerOverride{ +// Controller: controller, +// }) +// } + +// overrides := &overrideObj.Spec.Overrides[index] +// overrides.Clusters = nil + +// // Write in ascending order of cluster names for better readability +// clusterNames := make([]string, 0, len(overridesMap)) +// for clusterName := range overridesMap { +// clusterNames = append(clusterNames, clusterName) +// } +// sort.Strings(clusterNames) +// for _, clusterName := range clusterNames { +// clusterOverrides := overridesMap[clusterName] +// overrides.Clusters = append(overrides.Clusters, fedtypesv1a1.ClusterOverride{ +// ClusterName: clusterName, +// Patches: clusterOverrides, +// }) +// } +// } + +// overridesUns, err := InterfaceToUnstructured(overrideObj.Spec.Overrides) +// if err != nil { +// return err +// } + +// return unstructured.SetNestedField(uns.Object, overridesUns, common.OverridesPath...) +//} + +//// UnstructuredToInterface converts an unstructured object to the +//// provided interface by json marshalling/unmarshalling. +//func UnstructuredToInterface(rawObj *unstructured.Unstructured, obj interface{}) error { +// content, err := rawObj.MarshalJSON() +// if err != nil { +// return err +// } +// return json.Unmarshal(content, obj) +//} + +//// InterfaceToUnstructured converts the provided object to an +//// unstructured by json marshalling/unmarshalling. +//func InterfaceToUnstructured(obj interface{}) (ret interface{}, err error) { +// var buf []byte +// buf, err = json.Marshal(obj) +// if err != nil { +// return +// } + +// err = json.Unmarshal(buf, &ret) +// return +//} + +//// ApplyJsonPatch applies the override on to the given unstructured object. +//func ApplyJsonPatch(obj *unstructured.Unstructured, overrides fedtypesv1a1.OverridePatches) error { +// // TODO: Do the defaulting of "op" field to "replace" in API defaulting +// for i, overrideItem := range overrides { +// if overrideItem.Op == "" { +// overrides[i].Op = "replace" +// } +// } +// jsonPatchBytes, err := json.Marshal(overrides) +// if err != nil { +// return err +// } + +// patch, err := jsonpatch.DecodePatch(jsonPatchBytes) +// if err != nil { +// return err +// } + +// ObjectJSONBytes, err := obj.MarshalJSON() +// if err != nil { +// return err +// } + +// patchedObjectJSONBytes, err := patch.Apply(ObjectJSONBytes) +// if err != nil { +// return err +// } + +// err = obj.UnmarshalJSON(patchedObjectJSONBytes) +// return err +//} diff --git a/pkg/util/informermanager/federatedinformermanager_test.go b/pkg/util/informermanager/federatedinformermanager_test.go index 2b10333a..1053fbc8 100644 --- a/pkg/util/informermanager/federatedinformermanager_test.go +++ b/pkg/util/informermanager/federatedinformermanager_test.go @@ -39,7 +39,6 @@ import ( "github.com/kubewharf/kubeadmiral/pkg/client/clientset/versioned/fake" fedinformers "github.com/kubewharf/kubeadmiral/pkg/client/informers/externalversions" "github.com/kubewharf/kubeadmiral/pkg/controllers/common" - schemautil "github.com/kubewharf/kubeadmiral/pkg/controllers/util/schema" ) //nolint:gocyclo @@ -182,8 +181,7 @@ func TestFederatedInformerManager(t *testing.T) { // 2. Verify that listers for existing FTCs and clusters are eventually available for _, ftc := range defaultFTCs { - apiresource := ftc.GetSourceType() - gvr := schemautil.APIResourceToGVR(&apiresource) + gvr := ftc.GetSourceTypeGVR() for _, cluster := range defaultClusters { g.Eventually(func(g gomega.Gomega) { @@ -246,8 +244,7 @@ func TestFederatedInformerManager(t *testing.T) { }() ftc := daemonsetFTC - apiresource := ftc.GetSourceType() - gvr := schemautil.APIResourceToGVR(&apiresource) + gvr := ftc.GetSourceTypeGVR() // 2. Verify that listers for daemonsets FTCs is not available at the start @@ -310,8 +307,7 @@ func TestFederatedInformerManager(t *testing.T) { g.Consistently(func(g gomega.Gomega) { for _, ftc := range defaultFTCs { - apiresource := ftc.GetSourceType() - gvr := schemautil.APIResourceToGVR(&apiresource) + gvr := ftc.GetSourceTypeGVR() lister, informerSynced, exists := manager.GetResourceLister(gvr, cluster.Name) g.Expect(exists).To(gomega.BeFalse()) @@ -329,8 +325,7 @@ func TestFederatedInformerManager(t *testing.T) { g.Eventually(func(g gomega.Gomega) { for _, ftc := range defaultFTCs { - apiresource := ftc.GetSourceType() - gvr := schemautil.APIResourceToGVR(&apiresource) + gvr := ftc.GetSourceTypeGVR() lister, informerSynced, exists := manager.GetResourceLister(gvr, cluster.Name) g.Expect(exists).To(gomega.BeTrue()) diff --git a/pkg/util/informermanager/informermanager.go b/pkg/util/informermanager/informermanager.go index f7b080d3..9ab6c82b 100644 --- a/pkg/util/informermanager/informermanager.go +++ b/pkg/util/informermanager/informermanager.go @@ -35,7 +35,6 @@ import ( fedcorev1a1 "github.com/kubewharf/kubeadmiral/pkg/apis/core/v1alpha1" fedcorev1a1informers "github.com/kubewharf/kubeadmiral/pkg/client/informers/externalversions/core/v1alpha1" fedcorev1a1listers "github.com/kubewharf/kubeadmiral/pkg/client/listers/core/v1alpha1" - schemautil "github.com/kubewharf/kubeadmiral/pkg/controllers/util/schema" "github.com/kubewharf/kubeadmiral/pkg/util/bijection" ) @@ -149,8 +148,7 @@ func (m *informerManager) processFTC(ctx context.Context, ftc *fedcorev1a1.Feder defer m.lock.Unlock() ftcName := ftc.Name - apiResource := ftc.GetSourceType() - gvr := schemautil.APIResourceToGVR(&apiResource) + gvr := ftc.GetSourceTypeGVR() logger := klog.FromContext(ctx).WithValues("gvr", gvr.String()) ctx = klog.NewContext(ctx, logger) diff --git a/pkg/util/informermanager/informermanager_test.go b/pkg/util/informermanager/informermanager_test.go index 17ac4650..679f9b70 100644 --- a/pkg/util/informermanager/informermanager_test.go +++ b/pkg/util/informermanager/informermanager_test.go @@ -39,7 +39,6 @@ import ( "github.com/kubewharf/kubeadmiral/pkg/client/clientset/versioned/fake" fedinformers "github.com/kubewharf/kubeadmiral/pkg/client/informers/externalversions" "github.com/kubewharf/kubeadmiral/pkg/controllers/common" - schemautil "github.com/kubewharf/kubeadmiral/pkg/controllers/util/schema" ) func TestInformerManager(t *testing.T) { @@ -66,8 +65,7 @@ func TestInformerManager(t *testing.T) { // 2. Verify that the listers for each FTC is eventually available for _, ftc := range defaultFTCs { - apiresource := ftc.GetSourceType() - gvr := schemautil.APIResourceToGVR(&apiresource) + gvr := ftc.GetSourceTypeGVR() g.Eventually(func(g gomega.Gomega) { lister, informerSynced, exists := manager.GetResourceLister(gvr) @@ -103,8 +101,7 @@ func TestInformerManager(t *testing.T) { }() ftc := daemonsetFTC - apiresource := ftc.GetSourceType() - gvr := schemautil.APIResourceToGVR(&apiresource) + gvr := ftc.GetSourceTypeGVR() // 2. Verify that the lister for daemonsets is not available at the start diff --git a/pkg/util/informermanager/testutils_test.go b/pkg/util/informermanager/testutils_test.go index 9694840f..044b6579 100644 --- a/pkg/util/informermanager/testutils_test.go +++ b/pkg/util/informermanager/testutils_test.go @@ -264,6 +264,9 @@ func (h *countingResourceEventHandler) ExpectDeleteEvents(gvk schema.GroupVersio } func (h *countingResourceEventHandler) assert(g gomega.Gomega, callerInfo string) { + h.lock.RLock() + defer h.lock.RUnlock() + for ftc := range h.expectedGenerateCount { g.Expect(h.generateCount[ftc]).To( gomega.BeNumerically("==", h.expectedGenerateCount[ftc]), From ff75595457a1870f25060d6ae16ad8457bdb3420 Mon Sep 17 00:00:00 2001 From: "hawjia.lim" Date: Wed, 12 Jul 2023 18:58:34 +0800 Subject: [PATCH 027/173] fix race --- .../federatedinformermanager_test.go | 32 +++++++++++-------- .../informermanager/informermanager_test.go | 4 +-- 2 files changed, 20 insertions(+), 16 deletions(-) diff --git a/pkg/util/informermanager/federatedinformermanager_test.go b/pkg/util/informermanager/federatedinformermanager_test.go index 1053fbc8..aaf0b9cc 100644 --- a/pkg/util/informermanager/federatedinformermanager_test.go +++ b/pkg/util/informermanager/federatedinformermanager_test.go @@ -18,6 +18,7 @@ package informermanager import ( "context" + "sync/atomic" "testing" "time" @@ -1318,9 +1319,10 @@ func TestFederatedInformerManager(t *testing.T) { // 1. Bootstrap environment var generation int64 = 1 - var callBackCount int64 = 0 var expectedCallbackCount int64 = 1 + callBackCount := &atomic.Int64{} + // assertionCh is used to achieve 2 things: // 1. It is used to pass assertions to the main goroutine. // 2. It is used as an implicit lock to ensure FTC events are not squashed by the InformerManager. @@ -1347,7 +1349,7 @@ func TestFederatedInformerManager(t *testing.T) { return newCluster.GetAnnotations()["predicate"] == predicateTrue }, Callback: func(cluster *fedcorev1a1.FederatedCluster) { - callBackCount++ + callBackCount.Add(1) }, } @@ -1379,7 +1381,7 @@ func TestFederatedInformerManager(t *testing.T) { fn := <-assertionCh fn() - g.Expect(callBackCount).To(gomega.Equal(expectedCallbackCount)) + g.Expect(callBackCount.Load()).To(gomega.Equal(expectedCallbackCount)) // 3. Generate cluster update events @@ -1400,7 +1402,7 @@ func TestFederatedInformerManager(t *testing.T) { fn = <-assertionCh fn() - g.Expect(callBackCount).To(gomega.Equal(expectedCallbackCount)) + g.Expect(callBackCount.Load()).To(gomega.Equal(expectedCallbackCount)) } }) } @@ -1425,26 +1427,28 @@ func bootstrapFederatedInformerManagerWithFakeClients( fedObjects := []runtime.Object{} for _, cluster := range clusters { - fedObjects = append(fedObjects, runtime.Object(cluster)) + fedObjects = append(fedObjects, runtime.Object(cluster.DeepCopy())) } for _, ftc := range ftcs { - fedObjects = append(fedObjects, runtime.Object(ftc)) + fedObjects = append(fedObjects, runtime.Object(ftc.DeepCopy())) } fedClient := fake.NewSimpleClientset(fedObjects...) factory := fedinformers.NewSharedInformerFactory(fedClient, 0) + + dynamicObjects := map[string][]runtime.Object{} + for cluster, unsObjects := range objects { + dynamicObjects[cluster] = make([]runtime.Object, len(unsObjects)) + for i, unsObject := range unsObjects { + dynamicObjects[cluster][i] = runtime.Object(unsObject.DeepCopy()) + } + } + informerManager := NewFederatedInformerManager( ClusterClientGetter{ ConnectionHash: DefaultClusterConnectionHash, ClientGetter: func(cluster *fedcorev1a1.FederatedCluster) (dynamicclient.Interface, error) { - dynamicObjects := []runtime.Object{} - - clusterObjects := objects[cluster.Name] - for _, object := range clusterObjects { - dynamicObjects = append(dynamicObjects, runtime.Object(object)) - } - - return dynamicfake.NewSimpleDynamicClient(scheme, dynamicObjects...), nil + return dynamicfake.NewSimpleDynamicClient(scheme, dynamicObjects[cluster.Name]...), nil }, }, factory.Core().V1alpha1().FederatedTypeConfigs(), diff --git a/pkg/util/informermanager/informermanager_test.go b/pkg/util/informermanager/informermanager_test.go index 679f9b70..0441860b 100644 --- a/pkg/util/informermanager/informermanager_test.go +++ b/pkg/util/informermanager/informermanager_test.go @@ -760,13 +760,13 @@ func bootstrapInformerManagerWithFakeClients( dynamicObjects := []runtime.Object{} for _, object := range objects { - dynamicObjects = append(dynamicObjects, runtime.Object(object)) + dynamicObjects = append(dynamicObjects, runtime.Object(object.DeepCopy())) } dynamicClient := dynamicfake.NewSimpleDynamicClient(scheme, dynamicObjects...) fedObjects := []runtime.Object{} for _, ftc := range ftcs { - fedObjects = append(fedObjects, runtime.Object(ftc)) + fedObjects = append(fedObjects, runtime.Object(ftc.DeepCopy())) } fedClient := fake.NewSimpleClientset(fedObjects...) From b01676df5eea23aa1a29c7dfe217a5db9fd9e120 Mon Sep 17 00:00:00 2001 From: "hawjia.lim" Date: Wed, 12 Jul 2023 19:06:20 +0800 Subject: [PATCH 028/173] uncomment files --- pkg/controllers/util/overrides.go | 420 ++--- pkg/controllers/util/propagatedversion.go | 270 ++-- pkg/controllers/util/rolloutplan.go | 1698 ++++++++++----------- 3 files changed, 1194 insertions(+), 1194 deletions(-) diff --git a/pkg/controllers/util/overrides.go b/pkg/controllers/util/overrides.go index 6d185fb9..383fb3cf 100644 --- a/pkg/controllers/util/overrides.go +++ b/pkg/controllers/util/overrides.go @@ -20,213 +20,213 @@ are Copyright 2023 The KubeAdmiral Authors. package util -//import ( -// "encoding/json" -// "sort" - -// jsonpatch "github.com/evanphx/json-patch" -// "github.com/pkg/errors" -// "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" -// "k8s.io/apimachinery/pkg/util/sets" - -// fedtypesv1a1 "github.com/kubewharf/kubeadmiral/pkg/apis/types/v1alpha1" -// "github.com/kubewharf/kubeadmiral/pkg/controllers/common" -//) - -//// Namespace and name may not be overridden since these fields are the -//// primary mechanism of association between a federated resource in -//// the host cluster and the target resources in the member clusters. -//// -//// Kind should always be sourced from the FTC and not vary across -//// member clusters. -//// -//// apiVersion can be overridden to support managing resources like -//// Ingress which can exist in different groups at different -//// versions. Users will need to take care not to abuse this -//// capability. -//var invalidPaths = sets.NewString( -// "/metadata/namespace", -// "/metadata/name", -// "/metadata/generateName", -// "/kind", -//) - -//// Mapping of clusterName to overrides for the cluster -//type OverridesMap map[string]fedtypesv1a1.OverridePatches - -//func UnmarshalGenericOverrides(uns *unstructured.Unstructured) (*fedtypesv1a1.GenericObjectWithOverrides, error) { -// obj := &fedtypesv1a1.GenericObjectWithOverrides{} -// err := UnstructuredToInterface(uns, obj) -// if err != nil { -// return nil, err -// } -// return obj, nil -//} - -//// GetOverrides returns a map of overrides populated from the given -//// unstructured object. -//func GetOverrides(rawObj *unstructured.Unstructured, controller string) (OverridesMap, error) { -// overridesMap := make(OverridesMap) - -// if rawObj == nil { -// return overridesMap, nil -// } - -// overrideObj, err := UnmarshalGenericOverrides(rawObj) -// if err != nil { -// return nil, err -// } - -// if overrideObj.Spec == nil || overrideObj.Spec.Overrides == nil { -// // No overrides defined for the federated type -// return overridesMap, nil -// } - -// overrides := overrideObj.Spec.Overrides -// var clusterOverrides []fedtypesv1a1.ClusterOverride -// for i := range overrides { -// if overrides[i].Controller == controller { -// clusterOverrides = overrides[i].Clusters -// break -// } -// } - -// if clusterOverrides == nil { -// return overridesMap, nil -// } - -// for _, overrideItem := range clusterOverrides { -// clusterName := overrideItem.ClusterName -// if _, ok := overridesMap[clusterName]; ok { -// return nil, errors.Errorf("cluster %q appears more than once", clusterName) -// } - -// for i, pathEntry := range overrideItem.Patches { -// path := pathEntry.Path -// if invalidPaths.Has(path) { -// return nil, errors.Errorf("override[%d] for cluster %q has an invalid path: %s", i, clusterName, path) -// } -// } -// overridesMap[clusterName] = overrideItem.Patches -// } - -// return overridesMap, nil -//} - -//// SetOverrides sets the spec.overrides field of the unstructured -//// object from the provided overrides map. -//// -//// This function takes ownership of the `overridesMap` and may mutate it arbitrarily. -//func SetOverrides(uns *unstructured.Unstructured, controller string, overridesMap OverridesMap) error { -// for clusterName, clusterOverrides := range overridesMap { -// if len(clusterOverrides) == 0 { -// delete(overridesMap, clusterName) -// } -// } - -// overrideObj, err := UnmarshalGenericOverrides(uns) -// if err != nil { -// return err -// } - -// index := -1 -// for i, overrides := range overrideObj.Spec.Overrides { -// if overrides.Controller == controller { -// index = i -// break -// } -// } - -// if len(overridesMap) == 0 { -// // delete index -// if index != -1 { -// overrideObj.Spec.Overrides = append(overrideObj.Spec.Overrides[:index], overrideObj.Spec.Overrides[(index+1):]...) -// } -// } else { -// if index == -1 { -// index = len(overrideObj.Spec.Overrides) -// overrideObj.Spec.Overrides = append(overrideObj.Spec.Overrides, fedtypesv1a1.ControllerOverride{ -// Controller: controller, -// }) -// } - -// overrides := &overrideObj.Spec.Overrides[index] -// overrides.Clusters = nil - -// // Write in ascending order of cluster names for better readability -// clusterNames := make([]string, 0, len(overridesMap)) -// for clusterName := range overridesMap { -// clusterNames = append(clusterNames, clusterName) -// } -// sort.Strings(clusterNames) -// for _, clusterName := range clusterNames { -// clusterOverrides := overridesMap[clusterName] -// overrides.Clusters = append(overrides.Clusters, fedtypesv1a1.ClusterOverride{ -// ClusterName: clusterName, -// Patches: clusterOverrides, -// }) -// } -// } - -// overridesUns, err := InterfaceToUnstructured(overrideObj.Spec.Overrides) -// if err != nil { -// return err -// } - -// return unstructured.SetNestedField(uns.Object, overridesUns, common.OverridesPath...) -//} - -//// UnstructuredToInterface converts an unstructured object to the -//// provided interface by json marshalling/unmarshalling. -//func UnstructuredToInterface(rawObj *unstructured.Unstructured, obj interface{}) error { -// content, err := rawObj.MarshalJSON() -// if err != nil { -// return err -// } -// return json.Unmarshal(content, obj) -//} - -//// InterfaceToUnstructured converts the provided object to an -//// unstructured by json marshalling/unmarshalling. -//func InterfaceToUnstructured(obj interface{}) (ret interface{}, err error) { -// var buf []byte -// buf, err = json.Marshal(obj) -// if err != nil { -// return -// } - -// err = json.Unmarshal(buf, &ret) -// return -//} - -//// ApplyJsonPatch applies the override on to the given unstructured object. -//func ApplyJsonPatch(obj *unstructured.Unstructured, overrides fedtypesv1a1.OverridePatches) error { -// // TODO: Do the defaulting of "op" field to "replace" in API defaulting -// for i, overrideItem := range overrides { -// if overrideItem.Op == "" { -// overrides[i].Op = "replace" -// } -// } -// jsonPatchBytes, err := json.Marshal(overrides) -// if err != nil { -// return err -// } - -// patch, err := jsonpatch.DecodePatch(jsonPatchBytes) -// if err != nil { -// return err -// } - -// ObjectJSONBytes, err := obj.MarshalJSON() -// if err != nil { -// return err -// } - -// patchedObjectJSONBytes, err := patch.Apply(ObjectJSONBytes) -// if err != nil { -// return err -// } - -// err = obj.UnmarshalJSON(patchedObjectJSONBytes) -// return err -//} +import ( + "encoding/json" + "sort" + + jsonpatch "github.com/evanphx/json-patch" + "github.com/pkg/errors" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/util/sets" + + fedtypesv1a1 "github.com/kubewharf/kubeadmiral/pkg/apis/types/v1alpha1" + "github.com/kubewharf/kubeadmiral/pkg/controllers/common" +) + +// Namespace and name may not be overridden since these fields are the +// primary mechanism of association between a federated resource in +// the host cluster and the target resources in the member clusters. +// +// Kind should always be sourced from the FTC and not vary across +// member clusters. +// +// apiVersion can be overridden to support managing resources like +// Ingress which can exist in different groups at different +// versions. Users will need to take care not to abuse this +// capability. +var invalidPaths = sets.NewString( + "/metadata/namespace", + "/metadata/name", + "/metadata/generateName", + "/kind", +) + +// Mapping of clusterName to overrides for the cluster +type OverridesMap map[string]fedtypesv1a1.OverridePatches + +func UnmarshalGenericOverrides(uns *unstructured.Unstructured) (*fedtypesv1a1.GenericObjectWithOverrides, error) { + obj := &fedtypesv1a1.GenericObjectWithOverrides{} + err := UnstructuredToInterface(uns, obj) + if err != nil { + return nil, err + } + return obj, nil +} + +// GetOverrides returns a map of overrides populated from the given +// unstructured object. +func GetOverrides(rawObj *unstructured.Unstructured, controller string) (OverridesMap, error) { + overridesMap := make(OverridesMap) + + if rawObj == nil { + return overridesMap, nil + } + + overrideObj, err := UnmarshalGenericOverrides(rawObj) + if err != nil { + return nil, err + } + + if overrideObj.Spec == nil || overrideObj.Spec.Overrides == nil { + // No overrides defined for the federated type + return overridesMap, nil + } + + overrides := overrideObj.Spec.Overrides + var clusterOverrides []fedtypesv1a1.ClusterOverride + for i := range overrides { + if overrides[i].Controller == controller { + clusterOverrides = overrides[i].Clusters + break + } + } + + if clusterOverrides == nil { + return overridesMap, nil + } + + for _, overrideItem := range clusterOverrides { + clusterName := overrideItem.ClusterName + if _, ok := overridesMap[clusterName]; ok { + return nil, errors.Errorf("cluster %q appears more than once", clusterName) + } + + for i, pathEntry := range overrideItem.Patches { + path := pathEntry.Path + if invalidPaths.Has(path) { + return nil, errors.Errorf("override[%d] for cluster %q has an invalid path: %s", i, clusterName, path) + } + } + overridesMap[clusterName] = overrideItem.Patches + } + + return overridesMap, nil +} + +// SetOverrides sets the spec.overrides field of the unstructured +// object from the provided overrides map. +// +// This function takes ownership of the `overridesMap` and may mutate it arbitrarily. +func SetOverrides(uns *unstructured.Unstructured, controller string, overridesMap OverridesMap) error { + for clusterName, clusterOverrides := range overridesMap { + if len(clusterOverrides) == 0 { + delete(overridesMap, clusterName) + } + } + + overrideObj, err := UnmarshalGenericOverrides(uns) + if err != nil { + return err + } + + index := -1 + for i, overrides := range overrideObj.Spec.Overrides { + if overrides.Controller == controller { + index = i + break + } + } + + if len(overridesMap) == 0 { + // delete index + if index != -1 { + overrideObj.Spec.Overrides = append(overrideObj.Spec.Overrides[:index], overrideObj.Spec.Overrides[(index+1):]...) + } + } else { + if index == -1 { + index = len(overrideObj.Spec.Overrides) + overrideObj.Spec.Overrides = append(overrideObj.Spec.Overrides, fedtypesv1a1.ControllerOverride{ + Controller: controller, + }) + } + + overrides := &overrideObj.Spec.Overrides[index] + overrides.Clusters = nil + + // Write in ascending order of cluster names for better readability + clusterNames := make([]string, 0, len(overridesMap)) + for clusterName := range overridesMap { + clusterNames = append(clusterNames, clusterName) + } + sort.Strings(clusterNames) + for _, clusterName := range clusterNames { + clusterOverrides := overridesMap[clusterName] + overrides.Clusters = append(overrides.Clusters, fedtypesv1a1.ClusterOverride{ + ClusterName: clusterName, + Patches: clusterOverrides, + }) + } + } + + overridesUns, err := InterfaceToUnstructured(overrideObj.Spec.Overrides) + if err != nil { + return err + } + + return unstructured.SetNestedField(uns.Object, overridesUns, common.OverridesPath...) +} + +// UnstructuredToInterface converts an unstructured object to the +// provided interface by json marshalling/unmarshalling. +func UnstructuredToInterface(rawObj *unstructured.Unstructured, obj interface{}) error { + content, err := rawObj.MarshalJSON() + if err != nil { + return err + } + return json.Unmarshal(content, obj) +} + +// InterfaceToUnstructured converts the provided object to an +// unstructured by json marshalling/unmarshalling. +func InterfaceToUnstructured(obj interface{}) (ret interface{}, err error) { + var buf []byte + buf, err = json.Marshal(obj) + if err != nil { + return + } + + err = json.Unmarshal(buf, &ret) + return +} + +// ApplyJsonPatch applies the override on to the given unstructured object. +func ApplyJsonPatch(obj *unstructured.Unstructured, overrides fedtypesv1a1.OverridePatches) error { + // TODO: Do the defaulting of "op" field to "replace" in API defaulting + for i, overrideItem := range overrides { + if overrideItem.Op == "" { + overrides[i].Op = "replace" + } + } + jsonPatchBytes, err := json.Marshal(overrides) + if err != nil { + return err + } + + patch, err := jsonpatch.DecodePatch(jsonPatchBytes) + if err != nil { + return err + } + + ObjectJSONBytes, err := obj.MarshalJSON() + if err != nil { + return err + } + + patchedObjectJSONBytes, err := patch.Apply(ObjectJSONBytes) + if err != nil { + return err + } + + err = obj.UnmarshalJSON(patchedObjectJSONBytes) + return err +} diff --git a/pkg/controllers/util/propagatedversion.go b/pkg/controllers/util/propagatedversion.go index 77292917..4df3d4c6 100644 --- a/pkg/controllers/util/propagatedversion.go +++ b/pkg/controllers/util/propagatedversion.go @@ -20,138 +20,138 @@ are Copyright 2023 The KubeAdmiral Authors. package util -// import ( -// "fmt" -// "reflect" -// "sort" -// "strconv" -// "strings" - -// "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" - -// fedcorev1a1 "github.com/kubewharf/kubeadmiral/pkg/apis/core/v1alpha1" -// utilunstructured "github.com/kubewharf/kubeadmiral/pkg/controllers/util/unstructured" -// ) - -// const ( -// generationPrefix = "gen:" -// resourceVersionPrefix = "rv:" -// ) - -// // ObjectVersion retrieves the field type-prefixed value used for -// // determining currency of the given cluster object. -// func ObjectVersion(clusterObj *unstructured.Unstructured) string { -// generation := clusterObj.GetGeneration() -// if generation != 0 { -// return fmt.Sprintf("%s%d", generationPrefix, generation) -// } -// return fmt.Sprintf("%s%s", resourceVersionPrefix, clusterObj.GetResourceVersion()) -// } - -// // ObjectNeedsUpdate determines whether the 2 objects provided cluster -// // object needs to be updated according to the desired object and the -// // recorded version. -// func ObjectNeedsUpdate( -// desiredObj, clusterObj *unstructured.Unstructured, -// recordedVersion string, -// typeConfig *fedcorev1a1.FederatedTypeConfig, -// ) bool { -// targetVersion := ObjectVersion(clusterObj) - -// if recordedVersion != targetVersion { -// return true -// } - -// needUpdate := true -// if desiredReplicas, err := utilunstructured.GetInt64FromPath(desiredObj, typeConfig.Spec.PathDefinition.ReplicasSpec, nil); err == nil { -// if currentReplicas, err := utilunstructured.GetInt64FromPath(clusterObj, typeConfig.Spec.PathDefinition.ReplicasSpec, nil); err == nil { -// if desiredReplicas == nil && currentReplicas == nil || -// desiredReplicas != nil && currentReplicas != nil && *desiredReplicas == *currentReplicas { -// needUpdate = false -// } -// } -// } -// if needUpdate { -// return true -// } - -// needUpdate = true -// if desiredMaxSurge, ok, err := unstructured.NestedString(desiredObj.Object, MaxSurgePathSlice...); err == nil { -// if currentMaxSurge, ok2, err := unstructured.NestedString(clusterObj.Object, MaxSurgePathSlice...); err == nil && -// ok == ok2 { -// if desiredMaxSurge == currentMaxSurge { -// needUpdate = false -// } -// } -// } else if desiredMaxSurge, ok, err := unstructured.NestedInt64(desiredObj.Object, MaxSurgePathSlice...); err == nil { -// if currentMaxSurge, ok2, err := unstructured.NestedInt64(clusterObj.Object, MaxSurgePathSlice...); err == nil && ok == ok2 { -// if desiredMaxSurge == currentMaxSurge { -// needUpdate = false -// } -// } -// } -// if needUpdate { -// return true -// } - -// needUpdate = true -// if desiredMaxUnavailable, ok, err := unstructured.NestedString(desiredObj.Object, MaxUnavailablePathSlice...); err == nil { -// if currentMaxUnavailable, ok2, err := unstructured.NestedString(clusterObj.Object, MaxUnavailablePathSlice...); err == nil && -// ok == ok2 { -// if desiredMaxUnavailable == currentMaxUnavailable { -// needUpdate = false -// } -// } -// } else if desiredMaxUnavailable, ok, err := unstructured.NestedInt64(desiredObj.Object, MaxUnavailablePathSlice...); err == nil { -// if currentMaxUnavailable, ok2, err := unstructured.NestedInt64(clusterObj.Object, MaxUnavailablePathSlice...); err == nil && ok == ok2 { -// if desiredMaxUnavailable == currentMaxUnavailable { -// needUpdate = false -// } -// } -// } -// if needUpdate { -// return true -// } -// // If versions match and the version is sourced from the -// // generation field, a further check of metadata equivalency is -// // required. -// return strings.HasPrefix(targetVersion, generationPrefix) && !ObjectMetaObjEquivalent(desiredObj, clusterObj) -// } - -// // SortClusterVersions ASCII sorts the given cluster versions slice -// // based on cluster name. -// func SortClusterVersions(versions []fedcorev1a1.ClusterObjectVersion) { -// sort.Slice(versions, func(i, j int) bool { -// return versions[i].ClusterName < versions[j].ClusterName -// }) -// } - -// // PropagatedVersionStatusEquivalent returns true if both statuses are equal by -// // comparing Template and Override version, and their ClusterVersion slices; -// // false otherwise. -// func PropagatedVersionStatusEquivalent(pvs1, pvs2 *fedcorev1a1.PropagatedVersionStatus) bool { -// return pvs1.TemplateVersion == pvs2.TemplateVersion && -// pvs1.OverrideVersion == pvs2.OverrideVersion && -// reflect.DeepEqual(pvs1.ClusterVersions, pvs2.ClusterVersions) -// } - -// func ConvertVersionMapToGenerationMap(versionMap map[string]string) map[string]int64 { -// generationMap := make(map[string]int64, len(versionMap)) -// for key, version := range versionMap { -// if strings.HasPrefix(version, resourceVersionPrefix) { -// generationMap[key] = 0 -// continue -// } -// if !strings.HasPrefix(version, generationPrefix) { -// continue -// } - -// generationString := strings.TrimPrefix(version, generationPrefix) -// generation, err := strconv.ParseInt(generationString, 10, 64) -// if err != nil { -// continue -// } -// generationMap[key] = generation -// } -// return generationMap -// } +import ( + "fmt" + "reflect" + "sort" + "strconv" + "strings" + + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + + fedcorev1a1 "github.com/kubewharf/kubeadmiral/pkg/apis/core/v1alpha1" + utilunstructured "github.com/kubewharf/kubeadmiral/pkg/controllers/util/unstructured" +) + +const ( + generationPrefix = "gen:" + resourceVersionPrefix = "rv:" +) + +// ObjectVersion retrieves the field type-prefixed value used for +// determining currency of the given cluster object. +func ObjectVersion(clusterObj *unstructured.Unstructured) string { + generation := clusterObj.GetGeneration() + if generation != 0 { + return fmt.Sprintf("%s%d", generationPrefix, generation) + } + return fmt.Sprintf("%s%s", resourceVersionPrefix, clusterObj.GetResourceVersion()) +} + +// ObjectNeedsUpdate determines whether the 2 objects provided cluster +// object needs to be updated according to the desired object and the +// recorded version. +func ObjectNeedsUpdate( + desiredObj, clusterObj *unstructured.Unstructured, + recordedVersion string, + typeConfig *fedcorev1a1.FederatedTypeConfig, +) bool { + targetVersion := ObjectVersion(clusterObj) + + if recordedVersion != targetVersion { + return true + } + + needUpdate := true + if desiredReplicas, err := utilunstructured.GetInt64FromPath(desiredObj, typeConfig.Spec.PathDefinition.ReplicasSpec, nil); err == nil { + if currentReplicas, err := utilunstructured.GetInt64FromPath(clusterObj, typeConfig.Spec.PathDefinition.ReplicasSpec, nil); err == nil { + if desiredReplicas == nil && currentReplicas == nil || + desiredReplicas != nil && currentReplicas != nil && *desiredReplicas == *currentReplicas { + needUpdate = false + } + } + } + if needUpdate { + return true + } + + needUpdate = true + if desiredMaxSurge, ok, err := unstructured.NestedString(desiredObj.Object, MaxSurgePathSlice...); err == nil { + if currentMaxSurge, ok2, err := unstructured.NestedString(clusterObj.Object, MaxSurgePathSlice...); err == nil && + ok == ok2 { + if desiredMaxSurge == currentMaxSurge { + needUpdate = false + } + } + } else if desiredMaxSurge, ok, err := unstructured.NestedInt64(desiredObj.Object, MaxSurgePathSlice...); err == nil { + if currentMaxSurge, ok2, err := unstructured.NestedInt64(clusterObj.Object, MaxSurgePathSlice...); err == nil && ok == ok2 { + if desiredMaxSurge == currentMaxSurge { + needUpdate = false + } + } + } + if needUpdate { + return true + } + + needUpdate = true + if desiredMaxUnavailable, ok, err := unstructured.NestedString(desiredObj.Object, MaxUnavailablePathSlice...); err == nil { + if currentMaxUnavailable, ok2, err := unstructured.NestedString(clusterObj.Object, MaxUnavailablePathSlice...); err == nil && + ok == ok2 { + if desiredMaxUnavailable == currentMaxUnavailable { + needUpdate = false + } + } + } else if desiredMaxUnavailable, ok, err := unstructured.NestedInt64(desiredObj.Object, MaxUnavailablePathSlice...); err == nil { + if currentMaxUnavailable, ok2, err := unstructured.NestedInt64(clusterObj.Object, MaxUnavailablePathSlice...); err == nil && ok == ok2 { + if desiredMaxUnavailable == currentMaxUnavailable { + needUpdate = false + } + } + } + if needUpdate { + return true + } + // If versions match and the version is sourced from the + // generation field, a further check of metadata equivalency is + // required. + return strings.HasPrefix(targetVersion, generationPrefix) && !ObjectMetaObjEquivalent(desiredObj, clusterObj) +} + +// SortClusterVersions ASCII sorts the given cluster versions slice +// based on cluster name. +func SortClusterVersions(versions []fedcorev1a1.ClusterObjectVersion) { + sort.Slice(versions, func(i, j int) bool { + return versions[i].ClusterName < versions[j].ClusterName + }) +} + +// PropagatedVersionStatusEquivalent returns true if both statuses are equal by +// comparing Template and Override version, and their ClusterVersion slices; +// false otherwise. +func PropagatedVersionStatusEquivalent(pvs1, pvs2 *fedcorev1a1.PropagatedVersionStatus) bool { + return pvs1.TemplateVersion == pvs2.TemplateVersion && + pvs1.OverrideVersion == pvs2.OverrideVersion && + reflect.DeepEqual(pvs1.ClusterVersions, pvs2.ClusterVersions) +} + +func ConvertVersionMapToGenerationMap(versionMap map[string]string) map[string]int64 { + generationMap := make(map[string]int64, len(versionMap)) + for key, version := range versionMap { + if strings.HasPrefix(version, resourceVersionPrefix) { + generationMap[key] = 0 + continue + } + if !strings.HasPrefix(version, generationPrefix) { + continue + } + + generationString := strings.TrimPrefix(version, generationPrefix) + generation, err := strconv.ParseInt(generationString, 10, 64) + if err != nil { + continue + } + generationMap[key] = generation + } + return generationMap +} diff --git a/pkg/controllers/util/rolloutplan.go b/pkg/controllers/util/rolloutplan.go index 7e96c589..9aababa1 100644 --- a/pkg/controllers/util/rolloutplan.go +++ b/pkg/controllers/util/rolloutplan.go @@ -16,852 +16,852 @@ limitations under the License. package util -// import ( -// "fmt" -// "sort" -// "strconv" -// "strings" - -// "github.com/pkg/errors" -// v1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" -// "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" -// intstrutil "k8s.io/apimachinery/pkg/util/intstr" -// "k8s.io/klog/v2" - -// fedcorev1a1 "github.com/kubewharf/kubeadmiral/pkg/apis/core/v1alpha1" -// "github.com/kubewharf/kubeadmiral/pkg/controllers/common" -// utilunstructured "github.com/kubewharf/kubeadmiral/pkg/controllers/util/unstructured" -// ) - -// const ( -// ReplicaPath = "/spec/replicas" -// MaxSurgePath = "/spec/strategy/rollingUpdate/maxSurge" -// MaxUnavailablePath = "/spec/strategy/rollingUpdate/maxUnavailable" -// Nil = "nil" -// ) - -// var ( -// MaxSurgePathSlice = []string{ -// common.SpecField, -// common.StrategyField, -// common.RollingUpdateField, -// common.MaxSurgeField, -// } -// MaxUnavailablePathSlice = []string{ -// common.SpecField, -// common.StrategyField, -// common.RollingUpdateField, -// common.MaxUnavailableField, -// } -// ) - -// type RolloutPlan struct { -// Replicas *int64 -// MaxSurge *int64 -// MaxUnavailable *int64 -// OnlyPatchReplicas bool -// } - -// func (p RolloutPlan) String() string { -// r, s, u := Nil, Nil, Nil -// if p.Replicas != nil { -// r = fmt.Sprintf("%d", *p.Replicas) -// } -// if p.MaxSurge != nil { -// s = fmt.Sprintf("%d", *p.MaxSurge) -// } -// if p.MaxUnavailable != nil { -// u = fmt.Sprintf("%d", *p.MaxUnavailable) -// } -// return fmt.Sprintf("%s,%s,%s,%t", r, s, u, p.OnlyPatchReplicas) -// } - -// func (p RolloutPlan) toOverrides() fedcorev1a1.OverridePatches { -// overrides := fedcorev1a1.OverridePatches{} -// if p.Replicas != nil { -// overrides = append(overrides, fedcorev1a1.OverridePatch{Path: ReplicaPath, Value: v1.JSON{Raw: []byte(*p.Replicas)}}) -// } -// if p.MaxSurge != nil { -// overrides = append(overrides, fedcorev1a1.OverridePatch{Path: MaxSurgePath, Value: *p.MaxSurge}) -// } -// if p.MaxUnavailable != nil { -// overrides = append(overrides, fedcorev1a1.OverridePatch{Path: MaxUnavailablePath, Value: *p.MaxUnavailable}) -// } -// return overrides -// } - -// func (p *RolloutPlan) correctFencepost(t *TargetInfo, defaultIsSurge bool) { -// completed := t.UpdateCompleted() -// isSurge := t.IsSurge() -// flip := t.Flip(defaultIsSurge) - -// if completed && !flip { -// // If the new replica set is saturated, set maxSurge & maxUnavailable to the final value. -// // If there are unavailable instances in the new replica set, they will be part of maxUnavailable -// p.MaxSurge = nil -// p.MaxUnavailable = nil -// } else if *p.MaxSurge == 0 && *p.MaxUnavailable == 0 { -// // Like deployment controller, we set one of them to one if both maxSurge & maxUnavailable is zero -// var one int32 = 1 -// if isSurge { -// p.MaxSurge = &one -// } else { -// p.MaxUnavailable = &one -// } -// } -// } - -// type RolloutPlans map[string]*RolloutPlan - -// func (r RolloutPlans) String() string { -// var strs []string -// for k, v := range r { -// strs = append(strs, fmt.Sprintf("%s:%v", k, v)) -// } -// return strings.Join(strs, "; ") -// } - -// func (r RolloutPlans) GetRolloutOverrides(clusterName string) fedcorev1a1.OverridePatches { -// p, ok := r[clusterName] -// if !ok { -// return fedcorev1a1.OverridePatches{} -// } -// return p.toOverrides() -// } - -// type Targets []*TargetInfo - -// func (s Targets) CurrentReplicas() int32 { -// var currentReplicas int32 -// for _, t := range s { -// currentReplicas += t.Status.Replicas -// } -// return currentReplicas -// } - -// func (s Targets) DesiredReplicas() int32 { -// var desiredReplicas int32 -// for _, t := range s { -// desiredReplicas += t.DesiredReplicas -// } -// return desiredReplicas -// } - -// func (s Targets) AvailableReplicas() int32 { -// var totalAvailable int32 -// for _, t := range s { -// totalAvailable += t.Status.AvailableReplicas -// } -// return totalAvailable -// } - -// func (s Targets) ActualReplicas() int32 { -// var totalActual int32 -// for _, t := range s { -// totalActual += t.Status.ActualReplicas -// } -// return totalActual -// } - -// type TargetStatus struct { -// Replicas int32 // dp.Spec.Replicas -// ActualReplicas int32 // dp.Status.Replicas -// AvailableReplicas int32 // dp.Status.AvailableReplicas -// UpdatedReplicas int32 // latestreplicaset.kubeadmiral.io/replicas if it's up-to-date, else 0 -// UpdatedAvailableReplicas int32 // latestreplicaset.kubeadmiral.io/available-replicas if it's up-to-date, else 0 -// CurrentNewReplicas int32 // the replicas of new replicaset which belong to current deployment -// CurrentNewAvailableReplicas int32 // the available replicas of new replicaset which belong to current deployment -// Updated bool // whether pod template is up to date in current dp with which in fedDp -// MaxSurge int32 // maxSurge in current dp -// MaxUnavailable int32 // maxUnavailable in current dp -// } - -// type TargetInfo struct { -// ClusterName string -// Status TargetStatus -// DesiredReplicas int32 -// } - -// func (t *TargetInfo) String() string { -// return fmt.Sprintf("%s:%d->%d,%d/%d,%d/%d,%d/%d,%d,%d,%t", t.ClusterName, t.Status.Replicas, t.DesiredReplicas, -// t.Status.UpdatedAvailableReplicas, t.Status.UpdatedReplicas, -// t.Status.CurrentNewAvailableReplicas, t.Status.CurrentNewReplicas, -// t.Status.AvailableReplicas, t.Status.ActualReplicas, -// t.Status.MaxSurge, t.Status.MaxUnavailable, t.Status.Updated) -// } - -// func (t *TargetInfo) MaxSurge(maxSurge, leastSurge int32) (int32, int32) { -// res := Int32Min(maxSurge+leastSurge, t.ReplicasToUpdate()) -// if res < 0 { -// res = 0 -// } -// more := res - leastSurge -// // impossible in normal cases -// // normalize to zero to get a more strict plan, try the best to correct the unexpected situation -// if more < 0 { -// more = 0 -// } -// if maxSurge < 0 && leastSurge > t.Status.MaxSurge && res > t.Status.MaxSurge { -// res = t.Status.MaxSurge -// } -// return res, more -// } - -// func (t *TargetInfo) MaxUnavailable(maxUnavailable, leastUnavailable int32) (int32, int32) { -// res := Int32Min(maxUnavailable+leastUnavailable, t.ReplicasToUpdatedAvailable()) -// if res < 0 { -// res = 0 -// } -// more := res - leastUnavailable -// // impossible in normal cases -// // normalize to zero to get a more strict plan, try the best to correct the unexpected situation -// if more < 0 { -// more = 0 -// } -// if maxUnavailable < 0 && leastUnavailable > t.Status.MaxUnavailable && res > t.Status.MaxUnavailable { -// res = t.Status.MaxUnavailable -// } -// return res, more -// } - -// func (t *TargetInfo) MaxScaleOut(maxScaleOut, leastSurge int32) (int32, int32) { -// res := Int32Min(maxScaleOut+leastSurge, t.DesiredReplicas-t.Status.Replicas) -// if res < 0 { -// res = 0 -// } -// more := res - leastSurge -// if more < 0 { -// more = 0 -// } -// return res, more -// } - -// func (t *TargetInfo) MaxScaleIn(maxScaleIn, leastUnavailable int32) (int32, int32) { -// res := Int32Min(maxScaleIn+leastUnavailable, t.Status.Replicas-t.DesiredReplicas) -// // impossible -// if res > t.Status.Replicas { -// res = t.Status.Replicas -// } -// if res < 0 { -// res = 0 -// } -// more := res - leastUnavailable -// if more < 0 { -// more = 0 -// } -// return res, more -// } - -// func (t *TargetInfo) LeastSurge() int32 { -// res := t.Status.ActualReplicas - t.Status.Replicas -// if res < 0 { -// res = 0 -// } -// if !t.DuringUpdating() { -// return res -// } -// return Int32Max(res, Int32Min(t.Status.MaxSurge, res+t.ReplicasToUpdateCurrently())) -// } - -// func (t *TargetInfo) LeastUnavailable() int32 { -// res := t.Status.Replicas - t.Status.AvailableReplicas -// if res < 0 { -// res = 0 -// } -// if !t.DuringUpdating() { -// return res -// } -// return Int32Max(res, Int32Min(t.Status.MaxUnavailable, t.ReplicasToUpdatedAvailableCurrently())) -// } - -// func (t *TargetInfo) ReplicasToUpdate() int32 { -// res := t.Status.Replicas - t.Status.UpdatedReplicas -// if res < 0 { -// res = 0 -// } -// return res -// } - -// func (t *TargetInfo) ReplicasToUpdatedAvailable() int32 { -// res := t.Status.Replicas - t.Status.UpdatedAvailableReplicas -// if res < 0 { -// res = 0 -// } -// return res -// } - -// func (t *TargetInfo) ReplicasToUpdateCurrently() int32 { -// res := t.Status.Replicas - t.Status.CurrentNewReplicas -// if res < 0 { -// res = 0 -// } -// return res -// } - -// func (t *TargetInfo) ReplicasToUpdatedAvailableCurrently() int32 { -// res := t.Status.Replicas - t.Status.CurrentNewAvailableReplicas -// if res < 0 { -// res = 0 -// } -// return res -// } - -// func (t *TargetInfo) DuringUpdating() bool { -// // todo: only return t.Status.CurrentNewReplicas < t.Status.Replicas after we get the real currentNewReplicas -// if t.Status.CurrentNewReplicas < t.Status.Replicas { -// return true -// } -// if t.Status.Updated && t.ReplicasToUpdate() > 0 { -// return true -// } -// return false -// } - -// func (t *TargetInfo) UpdateCompleted() bool { -// return t.ReplicasToUpdate() == 0 -// } - -// func (t *TargetInfo) IsSurge() bool { -// return t.Status.MaxSurge != 0 && t.Status.MaxUnavailable == 0 -// } - -// func (t *TargetInfo) Flip(defaultIsSurge bool) bool { -// // a temporary fix to avoid unexpected flipping -// // todo: avoiding this nasty judgment by restricting the replicas changes to be used only for scaling -// return t.IsSurge() && !defaultIsSurge && t.ReplicasToUpdatedAvailable() > 0 -// } - -// func (t *TargetInfo) SkipPlanForUpdate(maxSurge, maxUnavailable int32) bool { -// return maxSurge <= 0 && maxUnavailable <= 0 && !t.Status.Updated && !t.DuringUpdating() && t.LeastSurge() <= 0 && -// t.LeastUnavailable() <= 0 -// } - -// func (t *TargetInfo) SkipPlanForUpdateForThoseToScaleIn(maxSurge, maxUnavailable, leastUnavailable int32) bool { -// if maxSurge <= 0 && maxUnavailable <= 0 && !t.Status.Updated && !t.DuringUpdating() { -// if leastUnavailable > 0 { -// return false -// } -// leastSurge := t.LeastSurge() -// if t.DesiredReplicas < t.Status.Replicas { -// leastSurge = 0 -// } -// if leastSurge > 0 { -// return false -// } -// return true -// } -// return false -// } - -// func (t *TargetInfo) SkipPlanForScaleIn(maxUnavailable int32) bool { -// return maxUnavailable <= 0 && t.LeastUnavailable() <= 0 -// } - -// func (t *TargetInfo) SkipPlanForScaleOut(maxSurge int32) bool { -// return maxSurge <= 0 && t.LeastSurge() <= 0 -// } - -// func unstructuredObjToTargetInfo(clusterName string, unstructuredObj *unstructured.Unstructured, desiredReplicas int32, -// desiredRevision string, typeConfig *fedcorev1a1.FederatedTypeConfig, -// ) (*TargetInfo, error) { -// if unstructuredObj == nil { -// return &TargetInfo{ -// ClusterName: clusterName, -// DesiredReplicas: desiredReplicas, -// }, nil -// } - -// replicas, err := utilunstructured.GetInt64FromPath(unstructuredObj, typeConfig.Spec.PathDefinition.ReplicasSpec, nil) -// if err != nil || replicas == nil { -// return nil, errors.Errorf("failed to retrieve replicas, err: %v", err) -// } -// maxSurge, maxUnavailable, err := RetrieveFencepost( -// unstructuredObj, -// MaxSurgePathSlice, -// MaxUnavailablePathSlice, -// int32(*replicas), -// ) -// if err != nil { -// return nil, errors.Wrap(err, "failed to retrieve fencepost") -// } -// revision, ok := unstructuredObj.GetAnnotations()[common.CurrentRevisionAnnotation] -// if !ok { -// return nil, errors.Errorf("failed to retrieve annotation %s", common.CurrentRevisionAnnotation) -// } -// // consider it has been updated as long as the template is updated. We don't wait for the refresh of -// // latestreplicaset annotations since the latency due to asynchronous updates may bring some problems -// updated := revision == desiredRevision -// currentNewReplicas, currentNewAvailableReplicas, err := retrieveNewReplicaSetInfo(unstructuredObj) -// if err != nil { -// return nil, errors.Wrap(err, "failed to retrieve new replicaSet info") -// } - -// updatedReplicas, updatedAvailableReplicas := currentNewReplicas, currentNewAvailableReplicas -// if !updated { -// updatedReplicas, updatedAvailableReplicas = 0, 0 -// } - -// actualReplicasOption, err := utilunstructured.GetInt64FromPath( -// unstructuredObj, -// typeConfig.Spec.PathDefinition.ReplicasSpec, -// nil, -// ) -// if err != nil { -// return nil, errors.Wrap(err, "failed to retrieve actual replicas") -// } -// var actualReplicas int32 -// if actualReplicasOption != nil { -// actualReplicas = int32(*actualReplicasOption) -// } - -// availableReplicasOption, err := utilunstructured.GetInt64FromPath( -// unstructuredObj, -// typeConfig.Spec.PathDefinition.AvailableReplicasStatus, -// nil, -// ) -// if err != nil { -// return nil, errors.Wrap(err, "failed to retrieve actual available replicas") -// } -// var availableReplicas int32 -// if availableReplicasOption != nil { -// availableReplicas = int32(*availableReplicasOption) -// } - -// t := &TargetInfo{ -// ClusterName: clusterName, -// Status: TargetStatus{ -// Replicas: int32(*replicas), -// ActualReplicas: actualReplicas, -// AvailableReplicas: availableReplicas, -// UpdatedReplicas: updatedReplicas, -// UpdatedAvailableReplicas: updatedAvailableReplicas, -// CurrentNewReplicas: currentNewReplicas, -// CurrentNewAvailableReplicas: currentNewAvailableReplicas, -// Updated: updated, -// MaxSurge: maxSurge, -// MaxUnavailable: maxUnavailable, -// }, -// DesiredReplicas: desiredReplicas, -// } -// return t, nil -// } - -// type RolloutPlanner struct { -// typeConfig *fedcorev1a1.FederatedTypeConfig -// Key string -// Targets Targets -// MaxSurge int32 -// MaxUnavailable int32 -// Replicas int32 -// Revision string -// } - -// func NewRolloutPlanner( -// key string, -// typeConfig *fedcorev1a1.FederatedTypeConfig, -// federatedResource *unstructured.Unstructured, -// replicas int32, -// ) (*RolloutPlanner, error) { -// pathPrefix := []string{common.SpecField, common.TemplateField} -// maxSurgePath := append(pathPrefix, MaxSurgePathSlice...) -// maxUnavailablePath := append(pathPrefix, MaxUnavailablePathSlice...) -// maxSurge, maxUnavailable, err := RetrieveFencepost(federatedResource, maxSurgePath, maxUnavailablePath, replicas) -// if err != nil { -// return nil, errors.Wrap(err, "failed to retrieve maxSurge or maxUnavailable from federated resource") -// } -// desiredRevision, ok := federatedResource.GetAnnotations()[common.CurrentRevisionAnnotation] -// if !ok { -// return nil, errors.Errorf( -// "failed to retrieve annotation %s from federated resource", -// common.CurrentRevisionAnnotation, -// ) -// } -// return &RolloutPlanner{ -// typeConfig: typeConfig, -// Key: key, -// MaxSurge: maxSurge, -// MaxUnavailable: maxUnavailable, -// Replicas: replicas, -// Revision: desiredRevision, -// }, nil -// } - -// func (p *RolloutPlanner) RegisterTarget( -// clusterName string, -// targetObj *unstructured.Unstructured, -// desiredReplicas int32, -// ) error { -// t, err := unstructuredObjToTargetInfo(clusterName, targetObj, desiredReplicas, p.Revision, p.typeConfig) -// if err != nil { -// return err -// } -// p.Targets = append(p.Targets, t) -// return nil -// } - -// func (p *RolloutPlanner) IsScalingEvent() bool { -// _, targetsToScaleOut, targetsToScaleIn := sortTargets(p.Targets) -// // create / scale out / scale in -// if len(targetsToScaleOut) != 0 && len(targetsToScaleIn) != 0 { -// return false -// } -// if len(targetsToScaleOut) == 0 && len(targetsToScaleIn) == 0 { -// return false -// } -// for _, t := range p.Targets { -// if !t.UpdateCompleted() { -// return false -// } -// if t.Flip(p.IsSurge()) { -// return false -// } -// } -// return true -// } - -// func (p *RolloutPlanner) PlanScale() RolloutPlans { -// plans := make(map[string]*RolloutPlan) -// for _, t := range p.Targets { -// plans[t.ClusterName] = &RolloutPlan{} -// } -// return plans -// } - -// func (p *RolloutPlanner) String() string { -// var ts []string -// for _, t := range p.Targets { -// ts = append(ts, fmt.Sprintf("%v", t)) -// } -// return fmt.Sprintf("%s[%d,%d,%d,%s]: %v", -// p.Key, p.Replicas, p.MaxSurge, p.MaxUnavailable, p.Revision, strings.Join(ts, "; ")) -// } - -// func (p *RolloutPlanner) RemainingMaxSurge() int32 { -// // maxSurge := p.Replicas + p.MaxSurge - p.Targets.ActualReplicas() -// // maxSurge := p.MaxSurge - (p.Targets.ActualReplicas() - p.Replicas) -// var replicas, occupied int32 -// for _, t := range p.Targets { -// replicas += t.Status.Replicas -// occupied += t.LeastSurge() -// } -// return p.MaxSurge - (replicas - p.Replicas) - occupied -// } - -// func (p *RolloutPlanner) RemainingMaxUnavailable() int32 { -// // maxUnavailable := p.Targets.AvailableReplicas() - (p.Replicas - p.MaxUnavailable) -// // maxUnavailable := p.MaxUnavailable - (p.Replicas - p.Targets.AvailableReplicas()) -// var replicas, occupied int32 -// for _, t := range p.Targets { -// replicas += t.Status.Replicas -// occupied += t.LeastUnavailable() -// } -// return p.MaxUnavailable - (p.Replicas - replicas) - occupied -// } - -// func (p *RolloutPlanner) IsSurge() bool { -// return p.MaxSurge != 0 && p.MaxUnavailable == 0 -// } - -// // ComputeRolloutPlans compute maxUnavailable, maxSurge, replicas during rollout process. It returns a map that -// // contains all the targets which are planned according to current status. Nil in a plan means the corresponding field -// // won't be overridden by the rollout plan and should be set with the original value. If there's no plan for a target, -// // it means "don't rollout it, it should wait for it's turn". -// func (p *RolloutPlanner) Plan() RolloutPlans { -// targetsToUpdate, targetsToScaleOut, targetsToScaleIn := sortTargets(p.Targets) -// plans := make(map[string]*RolloutPlan) - -// if p.IsScalingEvent() { -// return p.PlanScale() -// } - -// // the remaining maxSurge & maxUnavailable that can be dispatched to deployments. If there are clusters that are -// // not ready, or that we failed to get deployment from, the maxSurge/maxUnavailble will be increased/decreased -// maxSurge, maxUnavailable := p.RemainingMaxSurge(), p.RemainingMaxUnavailable() - -// // execution sequence (try to upgrade before scale out and scale in before upgrade): -// // 1. upgrade targets waiting to be scaled out -// // 2. scale in targets waiting to be scaled in -// // 3. upgrade targets that only need to be upgraded -// // 4. scale out targets waiting to be scaled out -// // 5. upgrade targets waiting to be scaled in -// for _, t := range targetsToScaleOut { -// if t.SkipPlanForUpdate(maxSurge, maxUnavailable) { -// continue -// } -// s, sm := t.MaxSurge(maxSurge, t.LeastSurge()) -// u, um := t.MaxUnavailable(maxUnavailable, t.LeastUnavailable()) -// maxSurge -= sm -// maxUnavailable -= um -// r := t.Status.Replicas -// plan := &RolloutPlan{Replicas: &r} -// plan.MaxSurge = &s -// plan.MaxUnavailable = &u -// plan.correctFencepost(t, p.IsSurge()) -// plans[t.ClusterName] = plan -// } - -// for _, t := range targetsToScaleIn { -// if t.SkipPlanForScaleIn(maxUnavailable) { -// continue -// } -// // we tend to scale in those that are already unavailable -// leastUnavailable := t.LeastUnavailable() -// if t.DuringUpdating() { -// // if it' during updating (for example, the maxUnavailable is enough for scale in and updating coming next, -// // so we set the replica and maxUnavailable; but a fed weight adjusting followed so we have to scale in again -// // even though it's being updated), scaling will be performed proportionally and may not cover the -// // unavailable instances as expected. -// leastUnavailable = 0 -// } -// scale, more := t.MaxScaleIn(maxUnavailable, leastUnavailable) -// maxUnavailable -= more -// plan := &RolloutPlan{OnlyPatchReplicas: true} -// r := t.Status.Replicas - scale -// plan.Replicas = &r -// plans[t.ClusterName] = plan -// } - -// for _, t := range targetsToUpdate { -// if t.SkipPlanForUpdate(maxSurge, maxUnavailable) { -// continue -// } -// s, sm := t.MaxSurge(maxSurge, t.LeastSurge()) -// u, um := t.MaxUnavailable(maxUnavailable, t.LeastUnavailable()) -// maxSurge -= sm -// maxUnavailable -= um -// plan := &RolloutPlan{} -// plan.MaxSurge = &s -// plan.MaxUnavailable = &u -// plan.correctFencepost(t, p.IsSurge()) -// plans[t.ClusterName] = plan -// } - -// for _, t := range targetsToScaleOut { -// if t.SkipPlanForScaleOut(maxSurge) { -// continue -// } -// // make sure new rs exists to avoid too much unnecessary work -// if !t.Status.Updated && t.Status.Replicas != 0 { -// continue -// } -// leastSurge := t.LeastSurge() -// if t.DuringUpdating() { -// leastSurge = 0 -// } -// scale, more := t.MaxScaleOut(maxSurge, leastSurge) -// maxSurge -= more -// plan, ok := plans[t.ClusterName] -// if !ok || plan == nil { -// plan = &RolloutPlan{} -// } -// r := t.Status.Replicas + scale -// plan.Replicas = &r -// plans[t.ClusterName] = plan -// } - -// for _, t := range targetsToScaleIn { -// plan, ok := plans[t.ClusterName] -// if !ok || plan == nil { -// r := t.Status.Replicas -// plan = &RolloutPlan{Replicas: &r} -// } -// // we have already scale in some unavailable instances in the second step, exclude them -// leastUnavailable := t.LeastUnavailable() -// if !t.DuringUpdating() { -// leastUnavailable -= t.Status.Replicas - *plan.Replicas -// if leastUnavailable < 0 { -// leastUnavailable = 0 -// } -// } -// if t.SkipPlanForUpdateForThoseToScaleIn(maxSurge, maxUnavailable, leastUnavailable) { -// continue -// } - -// plan.OnlyPatchReplicas = false -// s, sm := t.MaxSurge(maxSurge, t.LeastSurge()) -// u, um := t.MaxUnavailable(maxUnavailable, leastUnavailable) -// maxSurge -= sm -// maxUnavailable -= um -// plan.MaxSurge = &s -// plan.MaxUnavailable = &u -// plan.correctFencepost(t, p.IsSurge()) -// plans[t.ClusterName] = plan -// } -// if err := validatePlans(p, plans); err != nil { -// klog.Errorf("Failed to generate rollout plan for %s: %v. Current status: %s", p.Key, err, p) -// return RolloutPlans{} -// } -// return plans -// } - -// func sortTargets(targets []*TargetInfo) ([]*TargetInfo, []*TargetInfo, []*TargetInfo) { -// // sort the list to first update the targets that are already in update process -// sort.Slice(targets, func(i, j int) bool { -// return targets[i].ClusterName < targets[j].ClusterName -// }) -// var toUpdate, toScaleOut, toScaleIn []*TargetInfo -// for _, t := range targets { -// change := t.DesiredReplicas - t.Status.Replicas -// switch { -// case change < 0: -// toScaleIn = append(toScaleIn, t) -// case change > 0: -// toScaleOut = append(toScaleOut, t) -// default: -// toUpdate = append(toUpdate, t) -// } -// } -// return toUpdate, toScaleOut, toScaleIn -// } - -// func Int32Min(a, b int32) int32 { -// if b < a { -// return b -// } -// return a -// } - -// func Int32Max(a, b int32) int32 { -// if b > a { -// return b -// } -// return a -// } - -// func resolveFenceposts(maxSurge, maxUnavailable *intstrutil.IntOrString, desired int32) (int32, int32, error) { -// surge, err := intstrutil.GetValueFromIntOrPercent( -// intstrutil.ValueOrDefault(maxSurge, intstrutil.FromInt(0)), -// int(desired), -// true, -// ) -// if err != nil { -// return 0, 0, err -// } -// unavailable, err := intstrutil.GetValueFromIntOrPercent( -// intstrutil.ValueOrDefault(maxUnavailable, intstrutil.FromInt(0)), -// int(desired), -// false, -// ) -// if err != nil { -// return 0, 0, err -// } - -// if surge == 0 && unavailable == 0 { -// // Validation should never allow the user to explicitly use zero values for both maxSurge -// // maxUnavailable. Due to rounding down maxUnavailable though, it may resolve to zero. -// // If both fenceposts resolve to zero, then we should set maxUnavailable to 1 on the -// // theory that surge might not work due to quota. -// unavailable = 1 -// } - -// return int32(surge), int32(unavailable), nil -// } - -// func RetrieveFencepost(unstructuredObj *unstructured.Unstructured, maxSurgePath []string, maxUnavailablePath []string, -// replicas int32, -// ) (int32, int32, error) { -// var maxSurge, maxUnavailable *intstrutil.IntOrString -// if ms, ok, err := unstructured.NestedString(unstructuredObj.Object, maxSurgePath...); ok && err == nil { -// maxSurge = &intstrutil.IntOrString{Type: intstrutil.String, StrVal: ms} -// } else { -// if ms, ok, err2 := unstructured.NestedInt64(unstructuredObj.Object, maxSurgePath...); ok && err2 == nil { -// maxSurge = &intstrutil.IntOrString{Type: intstrutil.Int, IntVal: int32(ms)} -// } else { -// klog.V(4).Infof("Failed to retrieve maxSurge from %s/%s: %v, %v", -// unstructuredObj.GetNamespace(), unstructuredObj.GetName(), err, err2) -// } -// } -// if mu, ok, err := unstructured.NestedString(unstructuredObj.Object, maxUnavailablePath...); ok && err == nil { -// maxUnavailable = &intstrutil.IntOrString{Type: intstrutil.String, StrVal: mu} -// } else { -// if mu, ok, err2 := unstructured.NestedInt64(unstructuredObj.Object, maxUnavailablePath...); ok && err2 == nil { -// maxUnavailable = &intstrutil.IntOrString{Type: intstrutil.Int, IntVal: int32(mu)} -// } else { -// klog.V(4).Infof("Failed to retrieve maxUnavailable from %s/%s: %v, %v", -// unstructuredObj.GetNamespace(), unstructuredObj.GetName(), err, err2) -// } -// } - -// ms, mu, err := resolveFenceposts(maxSurge, maxUnavailable, replicas) -// if err != nil { -// return 0, 0, err -// } -// if ms < 0 { -// ms = 0 -// } -// if mu < 0 { -// mu = 0 -// } -// return ms, mu, nil -// } - -// func retrieveNewReplicaSetInfo(unstructuredObj *unstructured.Unstructured) (int32, int32, error) { -// ann, ok := unstructuredObj.GetAnnotations()[LatestReplicasetReplicasAnnotation] -// if !ok || ann == "" { -// return 0, 0, errors.Errorf("missing annotation %s", LatestReplicasetReplicasAnnotation) -// } -// replicas, err := strconv.ParseInt(ann, 10, 32) -// if err != nil { -// return 0, 0, err -// } -// ann, ok = unstructuredObj.GetAnnotations()[LatestReplicasetAvailableReplicasAnnotation] -// if !ok || ann == "" { -// return 0, 0, errors.Errorf( -// "missing annotation %s", LatestReplicasetAvailableReplicasAnnotation) -// } -// availableReplicas, err := strconv.ParseInt(ann, 10, 32) -// if err != nil { -// return 0, 0, err -// } -// // todo: make sure the latestreplicaset annotations describe the current pod template of deployment -// // a simple way to tell if the latestreplicaset annotations is up to date with current deployment. -// lastRsName, lastRsNameExists := unstructuredObj.GetAnnotations()[common.LastReplicasetName] -// rsName, rsNameExists := unstructuredObj.GetAnnotations()[LatestReplicasetNameAnnotation] -// if !rsNameExists { -// return 0, 0, errors.Errorf("missing annotation %s", LatestReplicasetNameAnnotation) -// } -// rsNameOutdated := lastRsNameExists && rsNameExists && lastRsName == rsName -// if rsNameOutdated { -// // paused=true may also result in this situation -// replicas, availableReplicas = 0, 0 -// } -// return int32(replicas), int32(availableReplicas), nil -// } - -// func validatePlans(p *RolloutPlanner, plans RolloutPlans) error { -// var planned, desired, current, maxUnavailable int32 -// for _, t := range p.Targets { -// desired += t.DesiredReplicas -// cluster := t.ClusterName -// r := t.Status.Replicas -// current += r -// if p, ok := plans[cluster]; ok { -// if p == nil { -// return errors.Errorf("invalid plan for %s: %v", cluster, p) -// } -// if p.Replicas != nil { -// r = *p.Replicas -// } else { -// r = t.DesiredReplicas -// } -// if p.MaxUnavailable != nil { -// if p.MaxSurge == nil || *p.MaxSurge != 0 || *p.MaxUnavailable != 1 { -// maxUnavailable += *p.MaxUnavailable -// } -// } -// } -// planned += r -// } -// if p.Replicas-desired > p.MaxUnavailable { -// return errors.Errorf("desired replicas deviates too much from the initial replicas, maybe some " + -// "clusters are not ready") -// } -// l, h := desired, current -// if desired > current { -// l, h = current, desired -// } -// if l-planned > p.MaxUnavailable || planned-h > p.MaxSurge { -// return errors.Errorf("invalid plan: %v", plans) -// } -// return nil -// } +import ( + "fmt" + "sort" + "strconv" + "strings" + + "github.com/pkg/errors" + v1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + intstrutil "k8s.io/apimachinery/pkg/util/intstr" + "k8s.io/klog/v2" + + fedcorev1a1 "github.com/kubewharf/kubeadmiral/pkg/apis/core/v1alpha1" + "github.com/kubewharf/kubeadmiral/pkg/controllers/common" + utilunstructured "github.com/kubewharf/kubeadmiral/pkg/controllers/util/unstructured" +) + +const ( + ReplicaPath = "/spec/replicas" + MaxSurgePath = "/spec/strategy/rollingUpdate/maxSurge" + MaxUnavailablePath = "/spec/strategy/rollingUpdate/maxUnavailable" + Nil = "nil" +) + +var ( + MaxSurgePathSlice = []string{ + common.SpecField, + common.StrategyField, + common.RollingUpdateField, + common.MaxSurgeField, + } + MaxUnavailablePathSlice = []string{ + common.SpecField, + common.StrategyField, + common.RollingUpdateField, + common.MaxUnavailableField, + } +) + +type RolloutPlan struct { + Replicas *int64 + MaxSurge *int64 + MaxUnavailable *int64 + OnlyPatchReplicas bool +} + +func (p RolloutPlan) String() string { + r, s, u := Nil, Nil, Nil + if p.Replicas != nil { + r = fmt.Sprintf("%d", *p.Replicas) + } + if p.MaxSurge != nil { + s = fmt.Sprintf("%d", *p.MaxSurge) + } + if p.MaxUnavailable != nil { + u = fmt.Sprintf("%d", *p.MaxUnavailable) + } + return fmt.Sprintf("%s,%s,%s,%t", r, s, u, p.OnlyPatchReplicas) +} + +func (p RolloutPlan) toOverrides() fedcorev1a1.OverridePatches { + overrides := fedcorev1a1.OverridePatches{} + if p.Replicas != nil { + overrides = append(overrides, fedcorev1a1.OverridePatch{Path: ReplicaPath, Value: v1.JSON{Raw: []byte(*p.Replicas)}}) + } + if p.MaxSurge != nil { + overrides = append(overrides, fedcorev1a1.OverridePatch{Path: MaxSurgePath, Value: *p.MaxSurge}) + } + if p.MaxUnavailable != nil { + overrides = append(overrides, fedcorev1a1.OverridePatch{Path: MaxUnavailablePath, Value: *p.MaxUnavailable}) + } + return overrides +} + +func (p *RolloutPlan) correctFencepost(t *TargetInfo, defaultIsSurge bool) { + completed := t.UpdateCompleted() + isSurge := t.IsSurge() + flip := t.Flip(defaultIsSurge) + + if completed && !flip { + // If the new replica set is saturated, set maxSurge & maxUnavailable to the final value. + // If there are unavailable instances in the new replica set, they will be part of maxUnavailable + p.MaxSurge = nil + p.MaxUnavailable = nil + } else if *p.MaxSurge == 0 && *p.MaxUnavailable == 0 { + // Like deployment controller, we set one of them to one if both maxSurge & maxUnavailable is zero + var one int32 = 1 + if isSurge { + p.MaxSurge = &one + } else { + p.MaxUnavailable = &one + } + } +} + +type RolloutPlans map[string]*RolloutPlan + +func (r RolloutPlans) String() string { + var strs []string + for k, v := range r { + strs = append(strs, fmt.Sprintf("%s:%v", k, v)) + } + return strings.Join(strs, "; ") +} + +func (r RolloutPlans) GetRolloutOverrides(clusterName string) fedcorev1a1.OverridePatches { + p, ok := r[clusterName] + if !ok { + return fedcorev1a1.OverridePatches{} + } + return p.toOverrides() +} + +type Targets []*TargetInfo + +func (s Targets) CurrentReplicas() int32 { + var currentReplicas int32 + for _, t := range s { + currentReplicas += t.Status.Replicas + } + return currentReplicas +} + +func (s Targets) DesiredReplicas() int32 { + var desiredReplicas int32 + for _, t := range s { + desiredReplicas += t.DesiredReplicas + } + return desiredReplicas +} + +func (s Targets) AvailableReplicas() int32 { + var totalAvailable int32 + for _, t := range s { + totalAvailable += t.Status.AvailableReplicas + } + return totalAvailable +} + +func (s Targets) ActualReplicas() int32 { + var totalActual int32 + for _, t := range s { + totalActual += t.Status.ActualReplicas + } + return totalActual +} + +type TargetStatus struct { + Replicas int32 // dp.Spec.Replicas + ActualReplicas int32 // dp.Status.Replicas + AvailableReplicas int32 // dp.Status.AvailableReplicas + UpdatedReplicas int32 // latestreplicaset.kubeadmiral.io/replicas if it's up-to-date, else 0 + UpdatedAvailableReplicas int32 // latestreplicaset.kubeadmiral.io/available-replicas if it's up-to-date, else 0 + CurrentNewReplicas int32 // the replicas of new replicaset which belong to current deployment + CurrentNewAvailableReplicas int32 // the available replicas of new replicaset which belong to current deployment + Updated bool // whether pod template is up to date in current dp with which in fedDp + MaxSurge int32 // maxSurge in current dp + MaxUnavailable int32 // maxUnavailable in current dp +} + +type TargetInfo struct { + ClusterName string + Status TargetStatus + DesiredReplicas int32 +} + +func (t *TargetInfo) String() string { + return fmt.Sprintf("%s:%d->%d,%d/%d,%d/%d,%d/%d,%d,%d,%t", t.ClusterName, t.Status.Replicas, t.DesiredReplicas, + t.Status.UpdatedAvailableReplicas, t.Status.UpdatedReplicas, + t.Status.CurrentNewAvailableReplicas, t.Status.CurrentNewReplicas, + t.Status.AvailableReplicas, t.Status.ActualReplicas, + t.Status.MaxSurge, t.Status.MaxUnavailable, t.Status.Updated) +} + +func (t *TargetInfo) MaxSurge(maxSurge, leastSurge int32) (int32, int32) { + res := Int32Min(maxSurge+leastSurge, t.ReplicasToUpdate()) + if res < 0 { + res = 0 + } + more := res - leastSurge + // impossible in normal cases + // normalize to zero to get a more strict plan, try the best to correct the unexpected situation + if more < 0 { + more = 0 + } + if maxSurge < 0 && leastSurge > t.Status.MaxSurge && res > t.Status.MaxSurge { + res = t.Status.MaxSurge + } + return res, more +} + +func (t *TargetInfo) MaxUnavailable(maxUnavailable, leastUnavailable int32) (int32, int32) { + res := Int32Min(maxUnavailable+leastUnavailable, t.ReplicasToUpdatedAvailable()) + if res < 0 { + res = 0 + } + more := res - leastUnavailable + // impossible in normal cases + // normalize to zero to get a more strict plan, try the best to correct the unexpected situation + if more < 0 { + more = 0 + } + if maxUnavailable < 0 && leastUnavailable > t.Status.MaxUnavailable && res > t.Status.MaxUnavailable { + res = t.Status.MaxUnavailable + } + return res, more +} + +func (t *TargetInfo) MaxScaleOut(maxScaleOut, leastSurge int32) (int32, int32) { + res := Int32Min(maxScaleOut+leastSurge, t.DesiredReplicas-t.Status.Replicas) + if res < 0 { + res = 0 + } + more := res - leastSurge + if more < 0 { + more = 0 + } + return res, more +} + +func (t *TargetInfo) MaxScaleIn(maxScaleIn, leastUnavailable int32) (int32, int32) { + res := Int32Min(maxScaleIn+leastUnavailable, t.Status.Replicas-t.DesiredReplicas) + // impossible + if res > t.Status.Replicas { + res = t.Status.Replicas + } + if res < 0 { + res = 0 + } + more := res - leastUnavailable + if more < 0 { + more = 0 + } + return res, more +} + +func (t *TargetInfo) LeastSurge() int32 { + res := t.Status.ActualReplicas - t.Status.Replicas + if res < 0 { + res = 0 + } + if !t.DuringUpdating() { + return res + } + return Int32Max(res, Int32Min(t.Status.MaxSurge, res+t.ReplicasToUpdateCurrently())) +} + +func (t *TargetInfo) LeastUnavailable() int32 { + res := t.Status.Replicas - t.Status.AvailableReplicas + if res < 0 { + res = 0 + } + if !t.DuringUpdating() { + return res + } + return Int32Max(res, Int32Min(t.Status.MaxUnavailable, t.ReplicasToUpdatedAvailableCurrently())) +} + +func (t *TargetInfo) ReplicasToUpdate() int32 { + res := t.Status.Replicas - t.Status.UpdatedReplicas + if res < 0 { + res = 0 + } + return res +} + +func (t *TargetInfo) ReplicasToUpdatedAvailable() int32 { + res := t.Status.Replicas - t.Status.UpdatedAvailableReplicas + if res < 0 { + res = 0 + } + return res +} + +func (t *TargetInfo) ReplicasToUpdateCurrently() int32 { + res := t.Status.Replicas - t.Status.CurrentNewReplicas + if res < 0 { + res = 0 + } + return res +} + +func (t *TargetInfo) ReplicasToUpdatedAvailableCurrently() int32 { + res := t.Status.Replicas - t.Status.CurrentNewAvailableReplicas + if res < 0 { + res = 0 + } + return res +} + +func (t *TargetInfo) DuringUpdating() bool { + // todo: only return t.Status.CurrentNewReplicas < t.Status.Replicas after we get the real currentNewReplicas + if t.Status.CurrentNewReplicas < t.Status.Replicas { + return true + } + if t.Status.Updated && t.ReplicasToUpdate() > 0 { + return true + } + return false +} + +func (t *TargetInfo) UpdateCompleted() bool { + return t.ReplicasToUpdate() == 0 +} + +func (t *TargetInfo) IsSurge() bool { + return t.Status.MaxSurge != 0 && t.Status.MaxUnavailable == 0 +} + +func (t *TargetInfo) Flip(defaultIsSurge bool) bool { + // a temporary fix to avoid unexpected flipping + // todo: avoiding this nasty judgment by restricting the replicas changes to be used only for scaling + return t.IsSurge() && !defaultIsSurge && t.ReplicasToUpdatedAvailable() > 0 +} + +func (t *TargetInfo) SkipPlanForUpdate(maxSurge, maxUnavailable int32) bool { + return maxSurge <= 0 && maxUnavailable <= 0 && !t.Status.Updated && !t.DuringUpdating() && t.LeastSurge() <= 0 && + t.LeastUnavailable() <= 0 +} + +func (t *TargetInfo) SkipPlanForUpdateForThoseToScaleIn(maxSurge, maxUnavailable, leastUnavailable int32) bool { + if maxSurge <= 0 && maxUnavailable <= 0 && !t.Status.Updated && !t.DuringUpdating() { + if leastUnavailable > 0 { + return false + } + leastSurge := t.LeastSurge() + if t.DesiredReplicas < t.Status.Replicas { + leastSurge = 0 + } + if leastSurge > 0 { + return false + } + return true + } + return false +} + +func (t *TargetInfo) SkipPlanForScaleIn(maxUnavailable int32) bool { + return maxUnavailable <= 0 && t.LeastUnavailable() <= 0 +} + +func (t *TargetInfo) SkipPlanForScaleOut(maxSurge int32) bool { + return maxSurge <= 0 && t.LeastSurge() <= 0 +} + +func unstructuredObjToTargetInfo(clusterName string, unstructuredObj *unstructured.Unstructured, desiredReplicas int32, + desiredRevision string, typeConfig *fedcorev1a1.FederatedTypeConfig, +) (*TargetInfo, error) { + if unstructuredObj == nil { + return &TargetInfo{ + ClusterName: clusterName, + DesiredReplicas: desiredReplicas, + }, nil + } + + replicas, err := utilunstructured.GetInt64FromPath(unstructuredObj, typeConfig.Spec.PathDefinition.ReplicasSpec, nil) + if err != nil || replicas == nil { + return nil, errors.Errorf("failed to retrieve replicas, err: %v", err) + } + maxSurge, maxUnavailable, err := RetrieveFencepost( + unstructuredObj, + MaxSurgePathSlice, + MaxUnavailablePathSlice, + int32(*replicas), + ) + if err != nil { + return nil, errors.Wrap(err, "failed to retrieve fencepost") + } + revision, ok := unstructuredObj.GetAnnotations()[common.CurrentRevisionAnnotation] + if !ok { + return nil, errors.Errorf("failed to retrieve annotation %s", common.CurrentRevisionAnnotation) + } + // consider it has been updated as long as the template is updated. We don't wait for the refresh of + // latestreplicaset annotations since the latency due to asynchronous updates may bring some problems + updated := revision == desiredRevision + currentNewReplicas, currentNewAvailableReplicas, err := retrieveNewReplicaSetInfo(unstructuredObj) + if err != nil { + return nil, errors.Wrap(err, "failed to retrieve new replicaSet info") + } + + updatedReplicas, updatedAvailableReplicas := currentNewReplicas, currentNewAvailableReplicas + if !updated { + updatedReplicas, updatedAvailableReplicas = 0, 0 + } + + actualReplicasOption, err := utilunstructured.GetInt64FromPath( + unstructuredObj, + typeConfig.Spec.PathDefinition.ReplicasSpec, + nil, + ) + if err != nil { + return nil, errors.Wrap(err, "failed to retrieve actual replicas") + } + var actualReplicas int32 + if actualReplicasOption != nil { + actualReplicas = int32(*actualReplicasOption) + } + + availableReplicasOption, err := utilunstructured.GetInt64FromPath( + unstructuredObj, + typeConfig.Spec.PathDefinition.AvailableReplicasStatus, + nil, + ) + if err != nil { + return nil, errors.Wrap(err, "failed to retrieve actual available replicas") + } + var availableReplicas int32 + if availableReplicasOption != nil { + availableReplicas = int32(*availableReplicasOption) + } + + t := &TargetInfo{ + ClusterName: clusterName, + Status: TargetStatus{ + Replicas: int32(*replicas), + ActualReplicas: actualReplicas, + AvailableReplicas: availableReplicas, + UpdatedReplicas: updatedReplicas, + UpdatedAvailableReplicas: updatedAvailableReplicas, + CurrentNewReplicas: currentNewReplicas, + CurrentNewAvailableReplicas: currentNewAvailableReplicas, + Updated: updated, + MaxSurge: maxSurge, + MaxUnavailable: maxUnavailable, + }, + DesiredReplicas: desiredReplicas, + } + return t, nil +} + +type RolloutPlanner struct { + typeConfig *fedcorev1a1.FederatedTypeConfig + Key string + Targets Targets + MaxSurge int32 + MaxUnavailable int32 + Replicas int32 + Revision string +} + +func NewRolloutPlanner( + key string, + typeConfig *fedcorev1a1.FederatedTypeConfig, + federatedResource *unstructured.Unstructured, + replicas int32, +) (*RolloutPlanner, error) { + pathPrefix := []string{common.SpecField, common.TemplateField} + maxSurgePath := append(pathPrefix, MaxSurgePathSlice...) + maxUnavailablePath := append(pathPrefix, MaxUnavailablePathSlice...) + maxSurge, maxUnavailable, err := RetrieveFencepost(federatedResource, maxSurgePath, maxUnavailablePath, replicas) + if err != nil { + return nil, errors.Wrap(err, "failed to retrieve maxSurge or maxUnavailable from federated resource") + } + desiredRevision, ok := federatedResource.GetAnnotations()[common.CurrentRevisionAnnotation] + if !ok { + return nil, errors.Errorf( + "failed to retrieve annotation %s from federated resource", + common.CurrentRevisionAnnotation, + ) + } + return &RolloutPlanner{ + typeConfig: typeConfig, + Key: key, + MaxSurge: maxSurge, + MaxUnavailable: maxUnavailable, + Replicas: replicas, + Revision: desiredRevision, + }, nil +} + +func (p *RolloutPlanner) RegisterTarget( + clusterName string, + targetObj *unstructured.Unstructured, + desiredReplicas int32, +) error { + t, err := unstructuredObjToTargetInfo(clusterName, targetObj, desiredReplicas, p.Revision, p.typeConfig) + if err != nil { + return err + } + p.Targets = append(p.Targets, t) + return nil +} + +func (p *RolloutPlanner) IsScalingEvent() bool { + _, targetsToScaleOut, targetsToScaleIn := sortTargets(p.Targets) + // create / scale out / scale in + if len(targetsToScaleOut) != 0 && len(targetsToScaleIn) != 0 { + return false + } + if len(targetsToScaleOut) == 0 && len(targetsToScaleIn) == 0 { + return false + } + for _, t := range p.Targets { + if !t.UpdateCompleted() { + return false + } + if t.Flip(p.IsSurge()) { + return false + } + } + return true +} + +func (p *RolloutPlanner) PlanScale() RolloutPlans { + plans := make(map[string]*RolloutPlan) + for _, t := range p.Targets { + plans[t.ClusterName] = &RolloutPlan{} + } + return plans +} + +func (p *RolloutPlanner) String() string { + var ts []string + for _, t := range p.Targets { + ts = append(ts, fmt.Sprintf("%v", t)) + } + return fmt.Sprintf("%s[%d,%d,%d,%s]: %v", + p.Key, p.Replicas, p.MaxSurge, p.MaxUnavailable, p.Revision, strings.Join(ts, "; ")) +} + +func (p *RolloutPlanner) RemainingMaxSurge() int32 { + // maxSurge := p.Replicas + p.MaxSurge - p.Targets.ActualReplicas() + // maxSurge := p.MaxSurge - (p.Targets.ActualReplicas() - p.Replicas) + var replicas, occupied int32 + for _, t := range p.Targets { + replicas += t.Status.Replicas + occupied += t.LeastSurge() + } + return p.MaxSurge - (replicas - p.Replicas) - occupied +} + +func (p *RolloutPlanner) RemainingMaxUnavailable() int32 { + // maxUnavailable := p.Targets.AvailableReplicas() - (p.Replicas - p.MaxUnavailable) + // maxUnavailable := p.MaxUnavailable - (p.Replicas - p.Targets.AvailableReplicas()) + var replicas, occupied int32 + for _, t := range p.Targets { + replicas += t.Status.Replicas + occupied += t.LeastUnavailable() + } + return p.MaxUnavailable - (p.Replicas - replicas) - occupied +} + +func (p *RolloutPlanner) IsSurge() bool { + return p.MaxSurge != 0 && p.MaxUnavailable == 0 +} + +// ComputeRolloutPlans compute maxUnavailable, maxSurge, replicas during rollout process. It returns a map that +// contains all the targets which are planned according to current status. Nil in a plan means the corresponding field +// won't be overridden by the rollout plan and should be set with the original value. If there's no plan for a target, +// it means "don't rollout it, it should wait for it's turn". +func (p *RolloutPlanner) Plan() RolloutPlans { + targetsToUpdate, targetsToScaleOut, targetsToScaleIn := sortTargets(p.Targets) + plans := make(map[string]*RolloutPlan) + + if p.IsScalingEvent() { + return p.PlanScale() + } + + // the remaining maxSurge & maxUnavailable that can be dispatched to deployments. If there are clusters that are + // not ready, or that we failed to get deployment from, the maxSurge/maxUnavailble will be increased/decreased + maxSurge, maxUnavailable := p.RemainingMaxSurge(), p.RemainingMaxUnavailable() + + // execution sequence (try to upgrade before scale out and scale in before upgrade): + // 1. upgrade targets waiting to be scaled out + // 2. scale in targets waiting to be scaled in + // 3. upgrade targets that only need to be upgraded + // 4. scale out targets waiting to be scaled out + // 5. upgrade targets waiting to be scaled in + for _, t := range targetsToScaleOut { + if t.SkipPlanForUpdate(maxSurge, maxUnavailable) { + continue + } + s, sm := t.MaxSurge(maxSurge, t.LeastSurge()) + u, um := t.MaxUnavailable(maxUnavailable, t.LeastUnavailable()) + maxSurge -= sm + maxUnavailable -= um + r := t.Status.Replicas + plan := &RolloutPlan{Replicas: &r} + plan.MaxSurge = &s + plan.MaxUnavailable = &u + plan.correctFencepost(t, p.IsSurge()) + plans[t.ClusterName] = plan + } + + for _, t := range targetsToScaleIn { + if t.SkipPlanForScaleIn(maxUnavailable) { + continue + } + // we tend to scale in those that are already unavailable + leastUnavailable := t.LeastUnavailable() + if t.DuringUpdating() { + // if it' during updating (for example, the maxUnavailable is enough for scale in and updating coming next, + // so we set the replica and maxUnavailable; but a fed weight adjusting followed so we have to scale in again + // even though it's being updated), scaling will be performed proportionally and may not cover the + // unavailable instances as expected. + leastUnavailable = 0 + } + scale, more := t.MaxScaleIn(maxUnavailable, leastUnavailable) + maxUnavailable -= more + plan := &RolloutPlan{OnlyPatchReplicas: true} + r := t.Status.Replicas - scale + plan.Replicas = &r + plans[t.ClusterName] = plan + } + + for _, t := range targetsToUpdate { + if t.SkipPlanForUpdate(maxSurge, maxUnavailable) { + continue + } + s, sm := t.MaxSurge(maxSurge, t.LeastSurge()) + u, um := t.MaxUnavailable(maxUnavailable, t.LeastUnavailable()) + maxSurge -= sm + maxUnavailable -= um + plan := &RolloutPlan{} + plan.MaxSurge = &s + plan.MaxUnavailable = &u + plan.correctFencepost(t, p.IsSurge()) + plans[t.ClusterName] = plan + } + + for _, t := range targetsToScaleOut { + if t.SkipPlanForScaleOut(maxSurge) { + continue + } + // make sure new rs exists to avoid too much unnecessary work + if !t.Status.Updated && t.Status.Replicas != 0 { + continue + } + leastSurge := t.LeastSurge() + if t.DuringUpdating() { + leastSurge = 0 + } + scale, more := t.MaxScaleOut(maxSurge, leastSurge) + maxSurge -= more + plan, ok := plans[t.ClusterName] + if !ok || plan == nil { + plan = &RolloutPlan{} + } + r := t.Status.Replicas + scale + plan.Replicas = &r + plans[t.ClusterName] = plan + } + + for _, t := range targetsToScaleIn { + plan, ok := plans[t.ClusterName] + if !ok || plan == nil { + r := t.Status.Replicas + plan = &RolloutPlan{Replicas: &r} + } + // we have already scale in some unavailable instances in the second step, exclude them + leastUnavailable := t.LeastUnavailable() + if !t.DuringUpdating() { + leastUnavailable -= t.Status.Replicas - *plan.Replicas + if leastUnavailable < 0 { + leastUnavailable = 0 + } + } + if t.SkipPlanForUpdateForThoseToScaleIn(maxSurge, maxUnavailable, leastUnavailable) { + continue + } + + plan.OnlyPatchReplicas = false + s, sm := t.MaxSurge(maxSurge, t.LeastSurge()) + u, um := t.MaxUnavailable(maxUnavailable, leastUnavailable) + maxSurge -= sm + maxUnavailable -= um + plan.MaxSurge = &s + plan.MaxUnavailable = &u + plan.correctFencepost(t, p.IsSurge()) + plans[t.ClusterName] = plan + } + if err := validatePlans(p, plans); err != nil { + klog.Errorf("Failed to generate rollout plan for %s: %v. Current status: %s", p.Key, err, p) + return RolloutPlans{} + } + return plans +} + +func sortTargets(targets []*TargetInfo) ([]*TargetInfo, []*TargetInfo, []*TargetInfo) { + // sort the list to first update the targets that are already in update process + sort.Slice(targets, func(i, j int) bool { + return targets[i].ClusterName < targets[j].ClusterName + }) + var toUpdate, toScaleOut, toScaleIn []*TargetInfo + for _, t := range targets { + change := t.DesiredReplicas - t.Status.Replicas + switch { + case change < 0: + toScaleIn = append(toScaleIn, t) + case change > 0: + toScaleOut = append(toScaleOut, t) + default: + toUpdate = append(toUpdate, t) + } + } + return toUpdate, toScaleOut, toScaleIn +} + +func Int32Min(a, b int32) int32 { + if b < a { + return b + } + return a +} + +func Int32Max(a, b int32) int32 { + if b > a { + return b + } + return a +} + +func resolveFenceposts(maxSurge, maxUnavailable *intstrutil.IntOrString, desired int32) (int32, int32, error) { + surge, err := intstrutil.GetValueFromIntOrPercent( + intstrutil.ValueOrDefault(maxSurge, intstrutil.FromInt(0)), + int(desired), + true, + ) + if err != nil { + return 0, 0, err + } + unavailable, err := intstrutil.GetValueFromIntOrPercent( + intstrutil.ValueOrDefault(maxUnavailable, intstrutil.FromInt(0)), + int(desired), + false, + ) + if err != nil { + return 0, 0, err + } + + if surge == 0 && unavailable == 0 { + // Validation should never allow the user to explicitly use zero values for both maxSurge + // maxUnavailable. Due to rounding down maxUnavailable though, it may resolve to zero. + // If both fenceposts resolve to zero, then we should set maxUnavailable to 1 on the + // theory that surge might not work due to quota. + unavailable = 1 + } + + return int32(surge), int32(unavailable), nil +} + +func RetrieveFencepost(unstructuredObj *unstructured.Unstructured, maxSurgePath []string, maxUnavailablePath []string, + replicas int32, +) (int32, int32, error) { + var maxSurge, maxUnavailable *intstrutil.IntOrString + if ms, ok, err := unstructured.NestedString(unstructuredObj.Object, maxSurgePath...); ok && err == nil { + maxSurge = &intstrutil.IntOrString{Type: intstrutil.String, StrVal: ms} + } else { + if ms, ok, err2 := unstructured.NestedInt64(unstructuredObj.Object, maxSurgePath...); ok && err2 == nil { + maxSurge = &intstrutil.IntOrString{Type: intstrutil.Int, IntVal: int32(ms)} + } else { + klog.V(4).Infof("Failed to retrieve maxSurge from %s/%s: %v, %v", + unstructuredObj.GetNamespace(), unstructuredObj.GetName(), err, err2) + } + } + if mu, ok, err := unstructured.NestedString(unstructuredObj.Object, maxUnavailablePath...); ok && err == nil { + maxUnavailable = &intstrutil.IntOrString{Type: intstrutil.String, StrVal: mu} + } else { + if mu, ok, err2 := unstructured.NestedInt64(unstructuredObj.Object, maxUnavailablePath...); ok && err2 == nil { + maxUnavailable = &intstrutil.IntOrString{Type: intstrutil.Int, IntVal: int32(mu)} + } else { + klog.V(4).Infof("Failed to retrieve maxUnavailable from %s/%s: %v, %v", + unstructuredObj.GetNamespace(), unstructuredObj.GetName(), err, err2) + } + } + + ms, mu, err := resolveFenceposts(maxSurge, maxUnavailable, replicas) + if err != nil { + return 0, 0, err + } + if ms < 0 { + ms = 0 + } + if mu < 0 { + mu = 0 + } + return ms, mu, nil +} + +func retrieveNewReplicaSetInfo(unstructuredObj *unstructured.Unstructured) (int32, int32, error) { + ann, ok := unstructuredObj.GetAnnotations()[LatestReplicasetReplicasAnnotation] + if !ok || ann == "" { + return 0, 0, errors.Errorf("missing annotation %s", LatestReplicasetReplicasAnnotation) + } + replicas, err := strconv.ParseInt(ann, 10, 32) + if err != nil { + return 0, 0, err + } + ann, ok = unstructuredObj.GetAnnotations()[LatestReplicasetAvailableReplicasAnnotation] + if !ok || ann == "" { + return 0, 0, errors.Errorf( + "missing annotation %s", LatestReplicasetAvailableReplicasAnnotation) + } + availableReplicas, err := strconv.ParseInt(ann, 10, 32) + if err != nil { + return 0, 0, err + } + // todo: make sure the latestreplicaset annotations describe the current pod template of deployment + // a simple way to tell if the latestreplicaset annotations is up to date with current deployment. + lastRsName, lastRsNameExists := unstructuredObj.GetAnnotations()[common.LastReplicasetName] + rsName, rsNameExists := unstructuredObj.GetAnnotations()[LatestReplicasetNameAnnotation] + if !rsNameExists { + return 0, 0, errors.Errorf("missing annotation %s", LatestReplicasetNameAnnotation) + } + rsNameOutdated := lastRsNameExists && rsNameExists && lastRsName == rsName + if rsNameOutdated { + // paused=true may also result in this situation + replicas, availableReplicas = 0, 0 + } + return int32(replicas), int32(availableReplicas), nil +} + +func validatePlans(p *RolloutPlanner, plans RolloutPlans) error { + var planned, desired, current, maxUnavailable int32 + for _, t := range p.Targets { + desired += t.DesiredReplicas + cluster := t.ClusterName + r := t.Status.Replicas + current += r + if p, ok := plans[cluster]; ok { + if p == nil { + return errors.Errorf("invalid plan for %s: %v", cluster, p) + } + if p.Replicas != nil { + r = *p.Replicas + } else { + r = t.DesiredReplicas + } + if p.MaxUnavailable != nil { + if p.MaxSurge == nil || *p.MaxSurge != 0 || *p.MaxUnavailable != 1 { + maxUnavailable += *p.MaxUnavailable + } + } + } + planned += r + } + if p.Replicas-desired > p.MaxUnavailable { + return errors.Errorf("desired replicas deviates too much from the initial replicas, maybe some " + + "clusters are not ready") + } + l, h := desired, current + if desired > current { + l, h = current, desired + } + if l-planned > p.MaxUnavailable || planned-h > p.MaxSurge { + return errors.Errorf("invalid plan: %v", plans) + } + return nil +} From 0d2fb005c625d8aa92af26ff406c61c2433e8c94 Mon Sep 17 00:00:00 2001 From: "hawjia.lim" Date: Wed, 12 Jul 2023 19:07:21 +0800 Subject: [PATCH 029/173] uncomment files --- pkg/controllers/util/rolloutplan.go | 22 +++++++++++----------- 1 file changed, 11 insertions(+), 11 deletions(-) diff --git a/pkg/controllers/util/rolloutplan.go b/pkg/controllers/util/rolloutplan.go index 9aababa1..7f777770 100644 --- a/pkg/controllers/util/rolloutplan.go +++ b/pkg/controllers/util/rolloutplan.go @@ -23,12 +23,12 @@ import ( "strings" "github.com/pkg/errors" - v1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" intstrutil "k8s.io/apimachinery/pkg/util/intstr" "k8s.io/klog/v2" fedcorev1a1 "github.com/kubewharf/kubeadmiral/pkg/apis/core/v1alpha1" + fedtypesv1a1 "github.com/kubewharf/kubeadmiral/pkg/apis/types/v1alpha1" "github.com/kubewharf/kubeadmiral/pkg/controllers/common" utilunstructured "github.com/kubewharf/kubeadmiral/pkg/controllers/util/unstructured" ) @@ -56,9 +56,9 @@ var ( ) type RolloutPlan struct { - Replicas *int64 - MaxSurge *int64 - MaxUnavailable *int64 + Replicas *int32 + MaxSurge *int32 + MaxUnavailable *int32 OnlyPatchReplicas bool } @@ -76,16 +76,16 @@ func (p RolloutPlan) String() string { return fmt.Sprintf("%s,%s,%s,%t", r, s, u, p.OnlyPatchReplicas) } -func (p RolloutPlan) toOverrides() fedcorev1a1.OverridePatches { - overrides := fedcorev1a1.OverridePatches{} +func (p RolloutPlan) toOverrides() fedtypesv1a1.OverridePatches { + overrides := fedtypesv1a1.OverridePatches{} if p.Replicas != nil { - overrides = append(overrides, fedcorev1a1.OverridePatch{Path: ReplicaPath, Value: v1.JSON{Raw: []byte(*p.Replicas)}}) + overrides = append(overrides, fedtypesv1a1.OverridePatch{Path: ReplicaPath, Value: *p.Replicas}) } if p.MaxSurge != nil { - overrides = append(overrides, fedcorev1a1.OverridePatch{Path: MaxSurgePath, Value: *p.MaxSurge}) + overrides = append(overrides, fedtypesv1a1.OverridePatch{Path: MaxSurgePath, Value: *p.MaxSurge}) } if p.MaxUnavailable != nil { - overrides = append(overrides, fedcorev1a1.OverridePatch{Path: MaxUnavailablePath, Value: *p.MaxUnavailable}) + overrides = append(overrides, fedtypesv1a1.OverridePatch{Path: MaxUnavailablePath, Value: *p.MaxUnavailable}) } return overrides } @@ -121,10 +121,10 @@ func (r RolloutPlans) String() string { return strings.Join(strs, "; ") } -func (r RolloutPlans) GetRolloutOverrides(clusterName string) fedcorev1a1.OverridePatches { +func (r RolloutPlans) GetRolloutOverrides(clusterName string) fedtypesv1a1.OverridePatches { p, ok := r[clusterName] if !ok { - return fedcorev1a1.OverridePatches{} + return fedtypesv1a1.OverridePatches{} } return p.toOverrides() } From 89e78e499a047b164ef58bae47ce54682b8c7b78 Mon Sep 17 00:00:00 2001 From: "hawjia.lim" Date: Wed, 12 Jul 2023 23:57:56 +0800 Subject: [PATCH 030/173] add utils to inject klog values in context --- .../federatedinformermanager.go | 7 ++-- pkg/util/informermanager/informermanager.go | 16 ++++----- pkg/util/logging/logging.go | 36 +++++++++++++++++++ 3 files changed, 45 insertions(+), 14 deletions(-) create mode 100644 pkg/util/logging/logging.go diff --git a/pkg/util/informermanager/federatedinformermanager.go b/pkg/util/informermanager/federatedinformermanager.go index d199725c..b3942e9c 100644 --- a/pkg/util/informermanager/federatedinformermanager.go +++ b/pkg/util/informermanager/federatedinformermanager.go @@ -35,6 +35,7 @@ import ( fedcorev1a1informers "github.com/kubewharf/kubeadmiral/pkg/client/informers/externalversions/core/v1alpha1" fedcorev1a1listers "github.com/kubewharf/kubeadmiral/pkg/client/listers/core/v1alpha1" "github.com/kubewharf/kubeadmiral/pkg/controllers/util" + "github.com/kubewharf/kubeadmiral/pkg/util/logging" ) type federatedInformerManager struct { @@ -120,8 +121,7 @@ func (m *federatedInformerManager) worker(ctx context.Context) { return } - logger = logger.WithValues("cluster", name) - ctx = klog.NewContext(ctx, logger) + ctx, logger = logging.InjectLoggerValues(ctx, "cluster", name) cluster, err := m.clusterInformer.Lister().Get(name) if err != nil && !apierrors.IsNotFound(err) { @@ -289,8 +289,7 @@ func (m *federatedInformerManager) Start(ctx context.Context) { m.lock.Lock() defer m.lock.Unlock() - logger := klog.LoggerWithName(klog.FromContext(ctx), "federated-informer-manager") - ctx = klog.NewContext(ctx, logger) + ctx, logger := logging.InjectLoggerName(ctx, "federated-informer-manager") if m.started { logger.Error(nil, "FederatedInformerManager cannot be started more than once") diff --git a/pkg/util/informermanager/informermanager.go b/pkg/util/informermanager/informermanager.go index 9ab6c82b..0345cb65 100644 --- a/pkg/util/informermanager/informermanager.go +++ b/pkg/util/informermanager/informermanager.go @@ -36,6 +36,7 @@ import ( fedcorev1a1informers "github.com/kubewharf/kubeadmiral/pkg/client/informers/externalversions/core/v1alpha1" fedcorev1a1listers "github.com/kubewharf/kubeadmiral/pkg/client/listers/core/v1alpha1" "github.com/kubewharf/kubeadmiral/pkg/util/bijection" + "github.com/kubewharf/kubeadmiral/pkg/util/logging" ) type informerManager struct { @@ -107,8 +108,7 @@ func (m *informerManager) worker(ctx context.Context) { return } - logger = logger.WithValues("ftc", name) - ctx = klog.NewContext(ctx, logger) + ctx, logger = logging.InjectLoggerValues(ctx, "ftc", name) ftc, err := m.ftcInformer.Lister().Get(name) if apierrors.IsNotFound(err) { @@ -150,14 +150,12 @@ func (m *informerManager) processFTC(ctx context.Context, ftc *fedcorev1a1.Feder ftcName := ftc.Name gvr := ftc.GetSourceTypeGVR() - logger := klog.FromContext(ctx).WithValues("gvr", gvr.String()) - ctx = klog.NewContext(ctx, logger) + ctx, logger := logging.InjectLoggerValues(ctx, "gvr", gvr.String()) var informer informers.GenericInformer if oldGVR, exists := m.gvrMapping.LookupByT1(ftcName); exists { - logger = klog.FromContext(ctx).WithValues("old-gvr", oldGVR.String()) - ctx = klog.NewContext(ctx, logger) + ctx, _ := logging.InjectLoggerValues(ctx, "old-gvr", oldGVR.String()) if oldGVR != gvr { // This might occur if a ftc was deleted and recreated with a different source type within a short period of @@ -232,8 +230,7 @@ func (m *informerManager) processFTCDeletion(ctx context.Context, ftcName string defer m.lock.Unlock() if gvr, exists := m.gvrMapping.LookupByT1(ftcName); exists { - logger := klog.FromContext(ctx).WithValues("gvr", gvr.String()) - ctx = klog.NewContext(ctx, logger) + ctx, _ = logging.InjectLoggerValues(ctx, "gvr", gvr.String()) } return m.processFTCDeletionUnlocked(ctx, ftcName) @@ -297,8 +294,7 @@ func (m *informerManager) Start(ctx context.Context) { m.lock.Lock() defer m.lock.Unlock() - logger := klog.LoggerWithName(klog.FromContext(ctx), "informer-manager") - ctx = klog.NewContext(ctx, logger) + ctx, logger := logging.InjectLoggerName(ctx, "informer-manager") if m.started { logger.Error(nil, "InformerManager cannot be started more than once") diff --git a/pkg/util/logging/logging.go b/pkg/util/logging/logging.go new file mode 100644 index 00000000..cf2d33d4 --- /dev/null +++ b/pkg/util/logging/logging.go @@ -0,0 +1,36 @@ +/* +Copyright 2023 The KubeAdmiral Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package logging + +import ( + "context" + + "github.com/go-logr/logr" + "k8s.io/klog/v2" +) + +func InjectLoggerValues(ctx context.Context, values ...interface{}) (context.Context, logr.Logger) { + logger := klog.FromContext(ctx).WithValues(values...) + ctx = klog.NewContext(ctx, logger) + return ctx, logger +} + +func InjectLoggerName(ctx context.Context, name string) (context.Context, logr.Logger) { + logger := klog.LoggerWithName(klog.FromContext(ctx), name) + ctx = klog.NewContext(ctx, logger) + return ctx, logger +} From a457bd4ad712b40f9fbe9e020dbf868c468d2ac7 Mon Sep 17 00:00:00 2001 From: "hawjia.lim" Date: Thu, 13 Jul 2023 00:00:09 +0800 Subject: [PATCH 031/173] reset rate limiter on no reenqueue --- pkg/util/informermanager/federatedinformermanager.go | 1 + pkg/util/informermanager/informermanager.go | 1 + 2 files changed, 2 insertions(+) diff --git a/pkg/util/informermanager/federatedinformermanager.go b/pkg/util/informermanager/federatedinformermanager.go index b3942e9c..d4e342f5 100644 --- a/pkg/util/informermanager/federatedinformermanager.go +++ b/pkg/util/informermanager/federatedinformermanager.go @@ -146,6 +146,7 @@ func (m *federatedInformerManager) worker(ctx context.Context) { m.queue.AddRateLimited(key) } else { logger.Error(err, "Failed to process FederatedCluster") + m.queue.Forget(key) } return } diff --git a/pkg/util/informermanager/informermanager.go b/pkg/util/informermanager/informermanager.go index 0345cb65..64323e48 100644 --- a/pkg/util/informermanager/informermanager.go +++ b/pkg/util/informermanager/informermanager.go @@ -133,6 +133,7 @@ func (m *informerManager) worker(ctx context.Context) { m.queue.AddRateLimited(key) } else { logger.Error(err, "Failed to process FederatedTypeConfig") + m.queue.Forget(key) } return } From 5b4fed026561663d2c30df5f6bfece4215efcf54 Mon Sep 17 00:00:00 2001 From: "hawjia.lim" Date: Thu, 13 Jul 2023 14:56:28 +0800 Subject: [PATCH 032/173] make counters atomic --- .../federatedinformermanager_test.go | 27 +++++++++++-------- .../informermanager/informermanager_test.go | 20 ++++++++------ 2 files changed, 28 insertions(+), 19 deletions(-) diff --git a/pkg/util/informermanager/federatedinformermanager_test.go b/pkg/util/informermanager/federatedinformermanager_test.go index aaf0b9cc..45c40ed6 100644 --- a/pkg/util/informermanager/federatedinformermanager_test.go +++ b/pkg/util/informermanager/federatedinformermanager_test.go @@ -1318,14 +1318,18 @@ func TestFederatedInformerManager(t *testing.T) { // 1. Bootstrap environment - var generation int64 = 1 - var expectedCallbackCount int64 = 1 + generation := &atomic.Int64{} + generation.Store(1) + + expectedCallbackCount := &atomic.Int64{} + expectedCallbackCount.Store(1) callBackCount := &atomic.Int64{} - // assertionCh is used to achieve 2 things: + // assertionCh is used to achieve 3 things: // 1. It is used to pass assertions to the main goroutine. // 2. It is used as an implicit lock to ensure FTC events are not squashed by the InformerManager. + // 3. It is used to ensure that the last event has been processed before the main goroutine sends an update. assertionCh := make(chan func()) cluster := getTestCluster("cluster-1") @@ -1334,15 +1338,16 @@ func TestFederatedInformerManager(t *testing.T) { clusterHandler := &ClusterEventHandler{ Predicate: func(oldCluster *fedcorev1a1.FederatedCluster, newCluster *fedcorev1a1.FederatedCluster) bool { - if generation == 1 { + curGeneration := generation.Load() + if curGeneration == 1 { assertionCh <- func() { g.Expect(oldCluster).To(gomega.BeNil()) g.Expect(newCluster.GetGeneration()).To(gomega.BeNumerically("==", 1)) } } else { assertionCh <- func() { - g.Expect(oldCluster.GetGeneration()).To(gomega.BeNumerically("==", generation-1)) - g.Expect(newCluster.GetGeneration()).To(gomega.BeNumerically("==", generation)) + g.Expect(oldCluster.GetGeneration()).To(gomega.BeNumerically("==", curGeneration-1)) + g.Expect(newCluster.GetGeneration()).To(gomega.BeNumerically("==", curGeneration)) } } @@ -1381,19 +1386,19 @@ func TestFederatedInformerManager(t *testing.T) { fn := <-assertionCh fn() - g.Expect(callBackCount.Load()).To(gomega.Equal(expectedCallbackCount)) + g.Expect(callBackCount.Load()).To(gomega.Equal(expectedCallbackCount.Load())) // 3. Generate cluster update events for i := 0; i < 5; i++ { - generation++ - cluster.SetGeneration(generation) + generation.Add(1) + cluster.SetGeneration(generation.Load()) if i%2 == 0 { cluster.SetAnnotations(map[string]string{"predicate": predicateFalse}) } else { cluster.SetAnnotations(map[string]string{"predicate": predicateTrue}) - expectedCallbackCount++ + expectedCallbackCount.Add(1) } var err error @@ -1402,7 +1407,7 @@ func TestFederatedInformerManager(t *testing.T) { fn = <-assertionCh fn() - g.Expect(callBackCount.Load()).To(gomega.Equal(expectedCallbackCount)) + g.Expect(callBackCount.Load()).To(gomega.Equal(expectedCallbackCount.Load())) } }) } diff --git a/pkg/util/informermanager/informermanager_test.go b/pkg/util/informermanager/informermanager_test.go index 0441860b..e4cf4ae0 100644 --- a/pkg/util/informermanager/informermanager_test.go +++ b/pkg/util/informermanager/informermanager_test.go @@ -18,6 +18,7 @@ package informermanager import ( "context" + "sync/atomic" "testing" "time" @@ -300,27 +301,30 @@ func TestInformerManager(t *testing.T) { // 1. Bootstrap environment - var generation int64 = 1 + generation := &atomic.Int64{} + generation.Store(1) - // assertionCh is used to achieve 2 things: + // assertionCh is used to achieve 3 things: // 1. It is used to pass assertions to the main goroutine. // 2. It is used as an implicit lock to ensure FTC events are not squashed by the InformerManager. + // 3. It is used to ensure that the last event has been processed before the main goroutine sends an update. assertionCh := make(chan func()) ftc := deploymentFTC.DeepCopy() - ftc.SetGeneration(generation) + ftc.SetGeneration(generation.Load()) generator := &EventHandlerGenerator{ Predicate: func(lastApplied, latest *fedcorev1a1.FederatedTypeConfig) bool { - if generation == 1 { + curGeneration := generation.Load() + if curGeneration == 1 { assertionCh <- func() { g.Expect(lastApplied).To(gomega.BeNil()) g.Expect(latest.GetGeneration()).To(gomega.BeNumerically("==", 1)) } } else { assertionCh <- func() { - g.Expect(lastApplied.GetGeneration()).To(gomega.BeNumerically("==", generation-1)) - g.Expect(latest.GetGeneration()).To(gomega.BeNumerically("==", generation)) + g.Expect(lastApplied.GetGeneration()).To(gomega.BeNumerically("==", curGeneration-1)) + g.Expect(latest.GetGeneration()).To(gomega.BeNumerically("==", curGeneration)) } } @@ -346,8 +350,8 @@ func TestInformerManager(t *testing.T) { // 3. Generate FTC update events for i := 0; i < 5; i++ { - generation++ - ftc.SetGeneration(generation) + generation.Add(1) + ftc.SetGeneration(generation.Load()) var err error ftc, err = fedClient.CoreV1alpha1().FederatedTypeConfigs().Update(ctx, ftc, metav1.UpdateOptions{}) From 4cfcbf876dd377c838f8d3cb28f43c79cf4d35c6 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Gary=20Liu=20=28=E5=88=98=E5=B9=BF=E6=BA=90=29?= Date: Thu, 13 Jul 2023 08:14:10 +0000 Subject: [PATCH 033/173] refactor(reconcileworker): accept ctx in reconcile --- pkg/controllers/util/worker/worker.go | 19 ++++++++++--------- 1 file changed, 10 insertions(+), 9 deletions(-) diff --git a/pkg/controllers/util/worker/worker.go b/pkg/controllers/util/worker/worker.go index 3b104366..2cb24766 100644 --- a/pkg/controllers/util/worker/worker.go +++ b/pkg/controllers/util/worker/worker.go @@ -21,6 +21,7 @@ are Copyright 2023 The KubeAdmiral Authors. package worker import ( + "context" "math" "time" @@ -31,7 +32,7 @@ import ( "github.com/kubewharf/kubeadmiral/pkg/stats" ) -type ReconcileFunc[Key any] func(Key) Result +type ReconcileFunc[Key any] func(context.Context, Key) Result type KeyFunc[Key any] func(metav1.Object) Key @@ -40,7 +41,7 @@ type ReconcileWorker[Key any] interface { EnqueueObject(obj metav1.Object) EnqueueWithBackoff(key Key) EnqueueWithDelay(key Key, delay time.Duration) - Run(stopChan <-chan struct{}) + Run(ctx context.Context) } type RateLimiterOptions struct { @@ -132,26 +133,26 @@ func (w *asyncWorker[Key]) EnqueueWithDelay(key Key, delay time.Duration) { w.queue.AddAfter(key, delay) } -func (w *asyncWorker[Key]) Run(stopChan <-chan struct{}) { +func (w *asyncWorker[Key]) Run(ctx context.Context) { for i := 0; i < w.workerCount; i++ { - go w.worker() + go w.worker(ctx) } // Ensure all goroutines are cleaned up when the stop channel closes go func() { - <-stopChan + <-ctx.Done() w.queue.ShutDown() }() } -func (w *asyncWorker[Key]) processNextItem() bool { +func (w *asyncWorker[Key]) processNextItem(ctx context.Context) bool { keyAny, quit := w.queue.Get() if quit { return false } key := keyAny.(Key) - result := w.reconcile(key) + result := w.reconcile(ctx, key) w.queue.Done(keyAny) if result.Backoff { @@ -167,7 +168,7 @@ func (w *asyncWorker[Key]) processNextItem() bool { return true } -func (w *asyncWorker[Key]) worker() { - for w.processNextItem() { +func (w *asyncWorker[Key]) worker(ctx context.Context) { + for w.processNextItem(ctx) { } } From 8c4417d17a2e8d315ba238302c2b804328ffb348 Mon Sep 17 00:00:00 2001 From: "hawjia.lim" Date: Thu, 13 Jul 2023 19:54:55 +0800 Subject: [PATCH 034/173] refactor(informer-manager): use gvk as keys and support TweakListOptions --- .../extensions_federatedtypeconfig.go | 9 +++ pkg/util/cluster/util.go | 46 ++++++++++++ pkg/util/informermanager/common.go | 25 +++++++ .../federatedinformermanager.go | 26 +++++-- .../federatedinformermanager_test.go | 24 +++---- pkg/util/informermanager/informermanager.go | 70 +++++++++++++------ .../informermanager/informermanager_test.go | 14 ++-- pkg/util/informermanager/interface.go | 6 +- pkg/util/informermanager/testutils_test.go | 13 ++++ .../util/managedlabel/managedlabel.go | 0 10 files changed, 186 insertions(+), 47 deletions(-) create mode 100644 pkg/util/cluster/util.go create mode 100644 pkg/util/informermanager/common.go rename pkg/{controllers => }/util/managedlabel/managedlabel.go (100%) diff --git a/pkg/apis/core/v1alpha1/extensions_federatedtypeconfig.go b/pkg/apis/core/v1alpha1/extensions_federatedtypeconfig.go index f1634890..cdeb2d3d 100644 --- a/pkg/apis/core/v1alpha1/extensions_federatedtypeconfig.go +++ b/pkg/apis/core/v1alpha1/extensions_federatedtypeconfig.go @@ -55,6 +55,15 @@ func (f *FederatedTypeConfig) GetSourceTypeGVR() schema.GroupVersionResource { } } +func (f *FederatedTypeConfig) GetSourceTypeGVK() schema.GroupVersionKind { + apiResource := f.GetSourceType() + return schema.GroupVersionKind{ + Group: apiResource.Group, + Version: apiResource.Version, + Kind: apiResource.Kind, + } +} + func (f *FederatedTypeConfig) GetStatusCollectionEnabled() bool { return f.Spec.StatusCollection != nil } diff --git a/pkg/util/cluster/util.go b/pkg/util/cluster/util.go new file mode 100644 index 00000000..00734405 --- /dev/null +++ b/pkg/util/cluster/util.go @@ -0,0 +1,46 @@ +/* +Copyright 2023 The KubeAdmiral Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cluster + +import ( + corev1 "k8s.io/api/core/v1" + + fedcorev1a1 "github.com/kubewharf/kubeadmiral/pkg/apis/core/v1alpha1" +) + + +func IsClusterReady(clusterStatus *fedcorev1a1.FederatedClusterStatus) bool { + for _, condition := range clusterStatus.Conditions { + if condition.Type == fedcorev1a1.ClusterReady { + if condition.Status == corev1.ConditionTrue { + return true + } + } + } + return false +} + +func IsClusterJoined(clusterStatus *fedcorev1a1.FederatedClusterStatus) bool { + for _, condition := range clusterStatus.Conditions { + if condition.Type == fedcorev1a1.ClusterJoined { + if condition.Status == corev1.ConditionTrue { + return true + } + } + } + return false +} diff --git a/pkg/util/informermanager/common.go b/pkg/util/informermanager/common.go new file mode 100644 index 00000000..84195264 --- /dev/null +++ b/pkg/util/informermanager/common.go @@ -0,0 +1,25 @@ +/* +Copyright 2023 The KubeAdmiral Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package informermanager + +import fedcorev1a1 "github.com/kubewharf/kubeadmiral/pkg/apis/core/v1alpha1" + +// RegisterOncePredicate can be used to as an EventHandlerGenerator predicate +// to generate and register event handlers exactly once for each FTC. +func RegisterOncePredicate(old, _ *fedcorev1a1.FederatedTypeConfig) bool { + return old == nil +} diff --git a/pkg/util/informermanager/federatedinformermanager.go b/pkg/util/informermanager/federatedinformermanager.go index d4e342f5..1fc7e442 100644 --- a/pkg/util/informermanager/federatedinformermanager.go +++ b/pkg/util/informermanager/federatedinformermanager.go @@ -24,6 +24,7 @@ import ( "sync" apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/util/wait" "k8s.io/client-go/dynamic" @@ -34,8 +35,9 @@ import ( fedcorev1a1 "github.com/kubewharf/kubeadmiral/pkg/apis/core/v1alpha1" fedcorev1a1informers "github.com/kubewharf/kubeadmiral/pkg/client/informers/externalversions/core/v1alpha1" fedcorev1a1listers "github.com/kubewharf/kubeadmiral/pkg/client/listers/core/v1alpha1" - "github.com/kubewharf/kubeadmiral/pkg/controllers/util" + clusterutil "github.com/kubewharf/kubeadmiral/pkg/util/cluster" "github.com/kubewharf/kubeadmiral/pkg/util/logging" + "github.com/kubewharf/kubeadmiral/pkg/util/managedlabel" ) type federatedInformerManager struct { @@ -83,7 +85,7 @@ func NewFederatedInformerManager( clusterInformer.Informer().AddEventHandler(cache.FilteringResourceEventHandler{ FilterFunc: func(obj interface{}) bool { cluster := obj.(*fedcorev1a1.FederatedCluster) - return util.IsClusterJoined(&cluster.Status) + return clusterutil.IsClusterJoined(&cluster.Status) }, Handler: cache.ResourceEventHandlerFuncs{ AddFunc: func(obj interface{}) { manager.enqueue(obj) }, @@ -129,7 +131,7 @@ func (m *federatedInformerManager) worker(ctx context.Context) { m.queue.AddRateLimited(key) return } - if apierrors.IsNotFound(err) || !util.IsClusterJoined(&cluster.Status) { + if apierrors.IsNotFound(err) || !clusterutil.IsClusterJoined(&cluster.Status) { if err := m.processClusterDeletion(ctx, name); err != nil { logger.Error(err, "Failed to process FederatedCluster, will retry") m.queue.AddRateLimited(key) @@ -185,7 +187,19 @@ func (m *federatedInformerManager) processCluster( return fmt.Errorf("failed to get client for cluster %s: %w", clusterName, err), true } - manager := NewInformerManager(clusterClient, m.ftcInformer) + manager := NewInformerManager( + clusterClient, + m.ftcInformer, + func(opts *metav1.ListOptions) { + selector := &metav1.LabelSelector{} + metav1.AddLabelToSelector( + selector, + managedlabel.ManagedByKubeAdmiralLabelKey, + managedlabel.ManagedByKubeAdmiralLabelValue, + ) + opts.LabelSelector = metav1.FormatLabelSelector(selector) + }, + ) ctx, cancel := context.WithCancel(ctx) for _, generator := range m.eventHandlerGenerators { @@ -268,7 +282,7 @@ func (m *federatedInformerManager) GetFederatedTypeConfigLister() fedcorev1a1lis } func (m *federatedInformerManager) GetResourceLister( - gvr schema.GroupVersionResource, + gvk schema.GroupVersionKind, cluster string, ) (lister cache.GenericLister, informerSynced cache.InformerSynced, exists bool) { m.lock.RLock() @@ -279,7 +293,7 @@ func (m *federatedInformerManager) GetResourceLister( return nil, nil, false } - return manager.GetResourceLister(gvr) + return manager.GetResourceLister(gvk) } func (m *federatedInformerManager) HasSynced() bool { diff --git a/pkg/util/informermanager/federatedinformermanager_test.go b/pkg/util/informermanager/federatedinformermanager_test.go index 45c40ed6..0d651a62 100644 --- a/pkg/util/informermanager/federatedinformermanager_test.go +++ b/pkg/util/informermanager/federatedinformermanager_test.go @@ -182,11 +182,11 @@ func TestFederatedInformerManager(t *testing.T) { // 2. Verify that listers for existing FTCs and clusters are eventually available for _, ftc := range defaultFTCs { - gvr := ftc.GetSourceTypeGVR() + gvk := ftc.GetSourceTypeGVK() for _, cluster := range defaultClusters { g.Eventually(func(g gomega.Gomega) { - lister, informerSynced, exists := manager.GetResourceLister(gvr, cluster.Name) + lister, informerSynced, exists := manager.GetResourceLister(gvk, cluster.Name) g.Expect(exists).To(gomega.BeTrue()) g.Expect(lister).ToNot(gomega.BeNil()) @@ -197,17 +197,17 @@ func TestFederatedInformerManager(t *testing.T) { // 3. Verify that the lister for non-existent FTCs or clusters are not available - lister, informerSynced, exists := manager.GetResourceLister(common.DaemonSetGVR, "cluster-1") + lister, informerSynced, exists := manager.GetResourceLister(daemonsetGVK, "cluster-1") g.Expect(exists).To(gomega.BeFalse()) g.Expect(lister).To(gomega.BeNil()) g.Expect(informerSynced).To(gomega.BeNil()) - lister, informerSynced, exists = manager.GetResourceLister(common.DeploymentGVR, "cluster-4") + lister, informerSynced, exists = manager.GetResourceLister(deploymentGVK, "cluster-4") g.Expect(exists).To(gomega.BeFalse()) g.Expect(lister).To(gomega.BeNil()) g.Expect(informerSynced).To(gomega.BeNil()) - lister, informerSynced, exists = manager.GetResourceLister(common.DaemonSetGVR, "cluster-4") + lister, informerSynced, exists = manager.GetResourceLister(daemonsetGVK, "cluster-4") g.Expect(exists).To(gomega.BeFalse()) g.Expect(lister).To(gomega.BeNil()) g.Expect(informerSynced).To(gomega.BeNil()) @@ -245,13 +245,13 @@ func TestFederatedInformerManager(t *testing.T) { }() ftc := daemonsetFTC - gvr := ftc.GetSourceTypeGVR() + gvk := ftc.GetSourceTypeGVK() // 2. Verify that listers for daemonsets FTCs is not available at the start g.Consistently(func(g gomega.Gomega) { for _, cluster := range defaultClusters { - lister, informerSynced, exists := manager.GetResourceLister(common.DeploymentGVR, cluster.Name) + lister, informerSynced, exists := manager.GetResourceLister(deploymentGVK, cluster.Name) g.Expect(exists).To(gomega.BeFalse()) g.Expect(lister).To(gomega.BeNil()) g.Expect(informerSynced).To(gomega.BeNil()) @@ -267,7 +267,7 @@ func TestFederatedInformerManager(t *testing.T) { g.Eventually(func(g gomega.Gomega) { for _, cluster := range defaultClusters { - lister, informerSynced, exists := manager.GetResourceLister(gvr, cluster.Name) + lister, informerSynced, exists := manager.GetResourceLister(gvk, cluster.Name) g.Expect(exists).To(gomega.BeTrue()) g.Expect(lister).ToNot(gomega.BeNil()) g.Expect(informerSynced()).To(gomega.BeTrue()) @@ -308,9 +308,9 @@ func TestFederatedInformerManager(t *testing.T) { g.Consistently(func(g gomega.Gomega) { for _, ftc := range defaultFTCs { - gvr := ftc.GetSourceTypeGVR() + gvk := ftc.GetSourceTypeGVK() - lister, informerSynced, exists := manager.GetResourceLister(gvr, cluster.Name) + lister, informerSynced, exists := manager.GetResourceLister(gvk, cluster.Name) g.Expect(exists).To(gomega.BeFalse()) g.Expect(lister).To(gomega.BeNil()) g.Expect(informerSynced).To(gomega.BeNil()) @@ -326,9 +326,9 @@ func TestFederatedInformerManager(t *testing.T) { g.Eventually(func(g gomega.Gomega) { for _, ftc := range defaultFTCs { - gvr := ftc.GetSourceTypeGVR() + gvk := ftc.GetSourceTypeGVK() - lister, informerSynced, exists := manager.GetResourceLister(gvr, cluster.Name) + lister, informerSynced, exists := manager.GetResourceLister(gvk, cluster.Name) g.Expect(exists).To(gomega.BeTrue()) g.Expect(lister).ToNot(gomega.BeNil()) g.Expect(informerSynced()).To(gomega.BeTrue()) diff --git a/pkg/util/informermanager/informermanager.go b/pkg/util/informermanager/informermanager.go index 64323e48..b7ca7bb5 100644 --- a/pkg/util/informermanager/informermanager.go +++ b/pkg/util/informermanager/informermanager.go @@ -45,13 +45,15 @@ type informerManager struct { started bool shutdown bool - client dynamic.Interface - ftcInformer fedcorev1a1informers.FederatedTypeConfigInformer + client dynamic.Interface + informerTweakListOptions dynamicinformer.TweakListOptionsFunc + ftcInformer fedcorev1a1informers.FederatedTypeConfigInformer eventHandlerGenerators []*EventHandlerGenerator - gvrMapping *bijection.Bijection[string, schema.GroupVersionResource] + gvkMapping *bijection.Bijection[string, schema.GroupVersionKind] + lastObservedFTCs map[string]*fedcorev1a1.FederatedTypeConfig informers map[string]informers.GenericInformer informerCancelFuncs map[string]context.CancelFunc eventHandlerRegistrations map[string]map[*EventHandlerGenerator]cache.ResourceEventHandlerRegistration @@ -60,14 +62,20 @@ type informerManager struct { queue workqueue.RateLimitingInterface } -func NewInformerManager(client dynamic.Interface, ftcInformer fedcorev1a1informers.FederatedTypeConfigInformer) InformerManager { +func NewInformerManager( + client dynamic.Interface, + ftcInformer fedcorev1a1informers.FederatedTypeConfigInformer, + informerTweakListOptions dynamicinformer.TweakListOptionsFunc, +) InformerManager { manager := &informerManager{ lock: sync.RWMutex{}, started: false, client: client, + informerTweakListOptions: informerTweakListOptions, ftcInformer: ftcInformer, eventHandlerGenerators: []*EventHandlerGenerator{}, - gvrMapping: bijection.NewBijection[string, schema.GroupVersionResource](), + gvkMapping: bijection.NewBijection[string, schema.GroupVersionKind](), + lastObservedFTCs: map[string]*fedcorev1a1.FederatedTypeConfig{}, informers: map[string]informers.GenericInformer{}, informerCancelFuncs: map[string]context.CancelFunc{}, eventHandlerRegistrations: map[string]map[*EventHandlerGenerator]cache.ResourceEventHandlerRegistration{}, @@ -144,21 +152,26 @@ func (m *informerManager) worker(ctx context.Context) { } } -func (m *informerManager) processFTC(ctx context.Context, ftc *fedcorev1a1.FederatedTypeConfig) (err error, needReenqueue bool) { +func (m *informerManager) processFTC( + ctx context.Context, + ftc *fedcorev1a1.FederatedTypeConfig, +) (err error, needReenqueue bool) { m.lock.Lock() defer m.lock.Unlock() + ftc = ftc.DeepCopy() ftcName := ftc.Name + gvk := ftc.GetSourceTypeGVK() gvr := ftc.GetSourceTypeGVR() - ctx, logger := logging.InjectLoggerValues(ctx, "gvr", gvr.String()) + ctx, logger := logging.InjectLoggerValues(ctx, "gvk", gvk.String()) var informer informers.GenericInformer - if oldGVR, exists := m.gvrMapping.LookupByT1(ftcName); exists { - ctx, _ := logging.InjectLoggerValues(ctx, "old-gvr", oldGVR.String()) + if oldGVK, exists := m.gvkMapping.LookupByT1(ftcName); exists { + ctx, _ := logging.InjectLoggerValues(ctx, "old-gvk", oldGVK.String()) - if oldGVR != gvr { + if oldGVK != gvk { // This might occur if a ftc was deleted and recreated with a different source type within a short period of // time and we missed processing the deletion. We simply process the ftc deletion and reenqueue. Note: // updating of ftc source types, however, is still not a supported use case. @@ -168,8 +181,8 @@ func (m *informerManager) processFTC(ctx context.Context, ftc *fedcorev1a1.Feder informer = m.informers[ftcName] } else { - if err := m.gvrMapping.Add(ftcName, gvr); err != nil { - // There must be another ftc with the same source type GVR. + if err := m.gvkMapping.Add(ftcName, gvk); err != nil { + // There must be another ftc with the same source type GVK. return fmt.Errorf("source type is already referenced by another FederatedTypeConfig: %w", err), false } @@ -181,11 +194,12 @@ func (m *informerManager) processFTC(ctx context.Context, ftc *fedcorev1a1.Feder metav1.NamespaceAll, 0, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, - nil, + m.informerTweakListOptions, ) ctx, cancel := context.WithCancel(ctx) go informer.Informer().Run(ctx.Done()) + m.lastObservedFTCs[ftcName] = ftc m.informers[ftcName] = informer m.informerCancelFuncs[ftcName] = cancel m.eventHandlerRegistrations[ftcName] = map[*EventHandlerGenerator]cache.ResourceEventHandlerRegistration{} @@ -195,8 +209,6 @@ func (m *informerManager) processFTC(ctx context.Context, ftc *fedcorev1a1.Feder registrations := m.eventHandlerRegistrations[ftcName] lastAppliedFTCs := m.lastAppliedFTCsCache[ftcName] - ftc = ftc.DeepCopy() - for _, generator := range m.eventHandlerGenerators { lastApplied := lastAppliedFTCs[generator] if !generator.Predicate(lastApplied, ftc) { @@ -230,8 +242,8 @@ func (m *informerManager) processFTCDeletion(ctx context.Context, ftcName string m.lock.Lock() defer m.lock.Unlock() - if gvr, exists := m.gvrMapping.LookupByT1(ftcName); exists { - ctx, _ = logging.InjectLoggerValues(ctx, "gvr", gvr.String()) + if gvk, exists := m.gvkMapping.LookupByT1(ftcName); exists { + ctx, _ = logging.InjectLoggerValues(ctx, "gvk", gvk.String()) } return m.processFTCDeletionUnlocked(ctx, ftcName) @@ -243,8 +255,9 @@ func (m *informerManager) processFTCDeletionUnlocked(ctx context.Context, ftcNam cancel() } - m.gvrMapping.DeleteT1(ftcName) + m.gvkMapping.DeleteT1(ftcName) + delete(m.lastObservedFTCs, ftcName) delete(m.informers, ftcName) delete(m.informerCancelFuncs, ftcName) delete(m.eventHandlerRegistrations, ftcName) @@ -269,12 +282,12 @@ func (m *informerManager) GetFederatedTypeConfigLister() fedcorev1a1listers.Fede } func (m *informerManager) GetResourceLister( - gvr schema.GroupVersionResource, + gvk schema.GroupVersionKind, ) (lister cache.GenericLister, informerSynced cache.InformerSynced, exists bool) { m.lock.RLock() defer m.lock.RUnlock() - ftc, ok := m.gvrMapping.LookupByT2(gvr) + ftc, ok := m.gvkMapping.LookupByT2(gvk) if !ok { return nil, nil, false } @@ -287,6 +300,23 @@ func (m *informerManager) GetResourceLister( return informer.Lister(), informer.Informer().HasSynced, true } +func (m *informerManager) GetResourceFTC(gvk schema.GroupVersionKind) (*fedcorev1a1.FederatedTypeConfig, bool) { + m.lock.RLock() + defer m.lock.RUnlock() + + ftcName, ok := m.gvkMapping.LookupByT2(gvk) + if !ok { + return nil, false + } + + ftc := m.lastObservedFTCs[ftcName] + if ftc == nil { + return nil, false + } + + return ftc, true +} + func (m *informerManager) HasSynced() bool { return m.ftcInformer.Informer().HasSynced() } diff --git a/pkg/util/informermanager/informermanager_test.go b/pkg/util/informermanager/informermanager_test.go index e4cf4ae0..5897b238 100644 --- a/pkg/util/informermanager/informermanager_test.go +++ b/pkg/util/informermanager/informermanager_test.go @@ -66,10 +66,10 @@ func TestInformerManager(t *testing.T) { // 2. Verify that the listers for each FTC is eventually available for _, ftc := range defaultFTCs { - gvr := ftc.GetSourceTypeGVR() + gvk := ftc.GetSourceTypeGVK() g.Eventually(func(g gomega.Gomega) { - lister, informerSynced, exists := manager.GetResourceLister(gvr) + lister, informerSynced, exists := manager.GetResourceLister(gvk) g.Expect(exists).To(gomega.BeTrue()) g.Expect(lister).ToNot(gomega.BeNil()) g.Expect(informerSynced()).To(gomega.BeTrue()) @@ -78,7 +78,7 @@ func TestInformerManager(t *testing.T) { // 3. Verify that the lister for a non-existent FTC is not available - lister, informerSynced, exists := manager.GetResourceLister(common.DaemonSetGVR) + lister, informerSynced, exists := manager.GetResourceLister(daemonsetGVK) g.Expect(exists).To(gomega.BeFalse()) g.Expect(lister).To(gomega.BeNil()) g.Expect(informerSynced).To(gomega.BeNil()) @@ -102,12 +102,12 @@ func TestInformerManager(t *testing.T) { }() ftc := daemonsetFTC - gvr := ftc.GetSourceTypeGVR() + gvk := ftc.GetSourceTypeGVK() // 2. Verify that the lister for daemonsets is not available at the start g.Consistently(func(g gomega.Gomega) { - lister, informerSynced, exists := manager.GetResourceLister(gvr) + lister, informerSynced, exists := manager.GetResourceLister(gvk) g.Expect(exists).To(gomega.BeFalse()) g.Expect(lister).To(gomega.BeNil()) g.Expect(informerSynced).To(gomega.BeNil()) @@ -121,7 +121,7 @@ func TestInformerManager(t *testing.T) { // 4. Verify that the lister for daemonsets is eventually available g.Eventually(func(g gomega.Gomega) { - lister, informerSynced, exists := manager.GetResourceLister(gvr) + lister, informerSynced, exists := manager.GetResourceLister(gvk) g.Expect(exists).To(gomega.BeTrue()) g.Expect(lister).ToNot(gomega.BeNil()) g.Expect(informerSynced()).To(gomega.BeTrue()) @@ -775,7 +775,7 @@ func bootstrapInformerManagerWithFakeClients( fedClient := fake.NewSimpleClientset(fedObjects...) factory := fedinformers.NewSharedInformerFactory(fedClient, 0) - informerManager := NewInformerManager(dynamicClient, factory.Core().V1alpha1().FederatedTypeConfigs()) + informerManager := NewInformerManager(dynamicClient, factory.Core().V1alpha1().FederatedTypeConfigs(), nil) for _, generator := range eventHandlerGenerators { err := informerManager.AddEventHandlerGenerator(generator) diff --git a/pkg/util/informermanager/interface.go b/pkg/util/informermanager/interface.go index d9d15910..88bc1c15 100644 --- a/pkg/util/informermanager/interface.go +++ b/pkg/util/informermanager/interface.go @@ -50,7 +50,9 @@ type InformerManager interface { AddEventHandlerGenerator(generator *EventHandlerGenerator) error // Returns a lister for the given GroupResourceVersion if it exists. The lister for each FTC's source type will // eventually exist. - GetResourceLister(gvr schema.GroupVersionResource) (lister cache.GenericLister, informerSynced cache.InformerSynced, exists bool) + GetResourceLister(gvk schema.GroupVersionKind) (lister cache.GenericLister, informerSynced cache.InformerSynced, exists bool) + // Returns the known FTC mapping for the given GVK if it exists. + GetResourceFTC(gvk schema.GroupVersionKind) (ftc *fedcorev1a1.FederatedTypeConfig, exists bool) // Returns the FederatedTypeConfig lister used by the InformerManager. GetFederatedTypeConfigLister() fedcorev1a1listers.FederatedTypeConfigLister @@ -93,7 +95,7 @@ type FederatedInformerManager interface { // Returns a lister for the given GroupResourceVersion and cluster if it exists. The lister for each FTC's source // type and cluster will eventually exist. GetResourceLister( - gvr schema.GroupVersionResource, + gvk schema.GroupVersionKind, cluster string, ) (lister cache.GenericLister, informerSynced cache.InformerSynced, exists bool) // Returns a client for the given cluster if it exists. The client for each cluster will eventually exist. diff --git a/pkg/util/informermanager/testutils_test.go b/pkg/util/informermanager/testutils_test.go index 044b6579..08a4df11 100644 --- a/pkg/util/informermanager/testutils_test.go +++ b/pkg/util/informermanager/testutils_test.go @@ -37,6 +37,7 @@ import ( "k8s.io/client-go/tools/cache" fedcorev1a1 "github.com/kubewharf/kubeadmiral/pkg/apis/core/v1alpha1" + "github.com/kubewharf/kubeadmiral/pkg/util/managedlabel" ) var ( @@ -112,6 +113,9 @@ func getTestDeployment(name, namespace string) *unstructured.Unstructured { ObjectMeta: metav1.ObjectMeta{ Name: name, Namespace: namespace, + Labels: map[string]string{ + managedlabel.ManagedByKubeAdmiralLabelKey: managedlabel.ManagedByKubeAdmiralLabelValue, + }, }, } @@ -132,6 +136,9 @@ func getTestConfigMap(name, namespace string) *unstructured.Unstructured { ObjectMeta: metav1.ObjectMeta{ Name: name, Namespace: namespace, + Labels: map[string]string{ + managedlabel.ManagedByKubeAdmiralLabelKey: managedlabel.ManagedByKubeAdmiralLabelValue, + }, }, } @@ -152,6 +159,9 @@ func getTestSecret(name, namespace string) *unstructured.Unstructured { ObjectMeta: metav1.ObjectMeta{ Name: name, Namespace: namespace, + Labels: map[string]string{ + managedlabel.ManagedByKubeAdmiralLabelKey: managedlabel.ManagedByKubeAdmiralLabelValue, + }, }, } @@ -172,6 +182,9 @@ func getTestDaemonSet(name, namespace string) *unstructured.Unstructured { ObjectMeta: metav1.ObjectMeta{ Name: name, Namespace: namespace, + Labels: map[string]string{ + managedlabel.ManagedByKubeAdmiralLabelKey: managedlabel.ManagedByKubeAdmiralLabelValue, + }, }, } diff --git a/pkg/controllers/util/managedlabel/managedlabel.go b/pkg/util/managedlabel/managedlabel.go similarity index 100% rename from pkg/controllers/util/managedlabel/managedlabel.go rename to pkg/util/managedlabel/managedlabel.go From a055a5975dd5864d2c407dd34c4bd6f943ae15d4 Mon Sep 17 00:00:00 2001 From: "hawjia.lim" Date: Thu, 13 Jul 2023 20:04:59 +0800 Subject: [PATCH 035/173] add tests for gvk mapping --- .../informermanager/informermanager_test.go | 146 ++++++++++++++++-- 1 file changed, 137 insertions(+), 9 deletions(-) diff --git a/pkg/util/informermanager/informermanager_test.go b/pkg/util/informermanager/informermanager_test.go index 5897b238..8e1cb36d 100644 --- a/pkg/util/informermanager/informermanager_test.go +++ b/pkg/util/informermanager/informermanager_test.go @@ -46,6 +46,84 @@ func TestInformerManager(t *testing.T) { t.Parallel() ctx := klog.NewContext(context.Background(), ktesting.NewLogger(t, ktesting.NewConfig(ktesting.Verbosity(2)))) + t.Run("GVK mappings for existing FTCs should be available eventually", func(t *testing.T) { + t.Parallel() + g := gomega.NewWithT(t) + + // 1. Bootstrap environment + + defaultFTCs := []*fedcorev1a1.FederatedTypeConfig{deploymentFTC, configmapFTC, secretFTC} + defaultObjs := []*unstructured.Unstructured{} + generators := []*EventHandlerGenerator{} + + ctx, cancel := context.WithCancel(ctx) + manager, _, _ := bootstrapInformerManagerWithFakeClients(g, ctx, defaultFTCs, defaultObjs, generators) + defer func() { + cancel() + _ = wait.PollInfinite(time.Millisecond, func() (done bool, err error) { return manager.IsShutdown(), nil }) + }() + + // 2. Verify that the GVK mapping for each FTC is eventually available + + for _, ftc := range defaultFTCs { + gvk := ftc.GetSourceTypeGVK() + + g.Eventually(func(g gomega.Gomega) { + resourceFTC, exists := manager.GetResourceFTC(gvk) + g.Expect(exists).To(gomega.BeTrue()) + g.Expect(resourceFTC).To(gomega.Equal(ftc)) + }).WithTimeout(time.Second * 2).Should(gomega.Succeed()) + } + + // 3. Verify that the GVK mapping for a non-existent FTC is not available + + ftc, exists := manager.GetResourceFTC(daemonsetGVK) + g.Expect(exists).To(gomega.BeFalse()) + g.Expect(ftc).To(gomega.BeNil()) + }) + + t.Run("GVK mapping for new FTC should be available eventually", func(t *testing.T) { + t.Parallel() + g := gomega.NewWithT(t) + + // 1. Bootstrap environment + + defaultFTCs := []*fedcorev1a1.FederatedTypeConfig{} + defaultObjs := []*unstructured.Unstructured{} + generators := []*EventHandlerGenerator{} + + ctx, cancel := context.WithCancel(ctx) + manager, _, fedClient := bootstrapInformerManagerWithFakeClients(g, ctx, defaultFTCs, defaultObjs, generators) + defer func() { + cancel() + _ = wait.PollInfinite(time.Millisecond, func() (done bool, err error) { return manager.IsShutdown(), nil }) + }() + + ftc := daemonsetFTC + gvk := ftc.GetSourceTypeGVK() + + // 2. Verify that the GVK mapping for daemonsets is not available at the start + + g.Consistently(func(g gomega.Gomega) { + resourceFTC, exists := manager.GetResourceFTC(gvk) + g.Expect(exists).To(gomega.BeFalse()) + g.Expect(resourceFTC).To(gomega.BeNil()) + }).WithTimeout(time.Second * 2).Should(gomega.Succeed()) + + // 3. Create the daemonset FTC. + + _, err := fedClient.CoreV1alpha1().FederatedTypeConfigs().Create(ctx, ftc, metav1.CreateOptions{}) + g.Expect(err).ToNot(gomega.HaveOccurred()) + + // 4. Verify that the GVK mapping for daemonsets is eventually available + + g.Eventually(func(g gomega.Gomega) { + resourceFTC, exists := manager.GetResourceFTC(gvk) + g.Expect(exists).To(gomega.BeTrue()) + g.Expect(resourceFTC).To(gomega.Equal(ftc)) + }).WithTimeout(time.Second * 2).Should(gomega.Succeed()) + }) + t.Run("listers for existing FTCs should be available eventually", func(t *testing.T) { t.Parallel() g := gomega.NewWithT(t) @@ -155,7 +233,13 @@ func TestInformerManager(t *testing.T) { } ctx, cancel := context.WithCancel(ctx) - manager, dynamicClient, _ := bootstrapInformerManagerWithFakeClients(g, ctx, defaultFTCs, defaultObjs, generators) + manager, dynamicClient, _ := bootstrapInformerManagerWithFakeClients( + g, + ctx, + defaultFTCs, + defaultObjs, + generators, + ) defer func() { cancel() _ = wait.PollInfinite(time.Millisecond, func() (done bool, err error) { return manager.IsShutdown(), nil }) @@ -246,7 +330,13 @@ func TestInformerManager(t *testing.T) { } ctx, cancel := context.WithCancel(ctx) - manager, dynamicClient, fedClient := bootstrapInformerManagerWithFakeClients(g, ctx, defaultFTCs, defaultObjs, generators) + manager, dynamicClient, fedClient := bootstrapInformerManagerWithFakeClients( + g, + ctx, + defaultFTCs, + defaultObjs, + generators, + ) defer func() { cancel() _ = wait.PollInfinite(time.Millisecond, func() (done bool, err error) { return manager.IsShutdown(), nil }) @@ -381,7 +471,13 @@ func TestInformerManager(t *testing.T) { generators := []*EventHandlerGenerator{generator} ctx, cancel := context.WithCancel(ctx) - manager, dynamicClient, fedClient := bootstrapInformerManagerWithFakeClients(g, ctx, defaultFTCs, defaultObjs, generators) + manager, dynamicClient, fedClient := bootstrapInformerManagerWithFakeClients( + g, + ctx, + defaultFTCs, + defaultObjs, + generators, + ) defer func() { cancel() _ = wait.PollInfinite(time.Millisecond, func() (done bool, err error) { return manager.IsShutdown(), nil }) @@ -434,7 +530,13 @@ func TestInformerManager(t *testing.T) { generators := []*EventHandlerGenerator{generator} ctx, cancel := context.WithCancel(ctx) - manager, dynamicClient, fedClient := bootstrapInformerManagerWithFakeClients(g, ctx, defaultFTCs, defaultObjs, generators) + manager, dynamicClient, fedClient := bootstrapInformerManagerWithFakeClients( + g, + ctx, + defaultFTCs, + defaultObjs, + generators, + ) defer func() { cancel() _ = wait.PollInfinite(time.Millisecond, func() (done bool, err error) { return manager.IsShutdown(), nil }) @@ -486,7 +588,13 @@ func TestInformerManager(t *testing.T) { generators := []*EventHandlerGenerator{generator} ctx, cancel := context.WithCancel(ctx) - manager, dynamicClient, fedClient := bootstrapInformerManagerWithFakeClients(g, ctx, defaultFTCs, defaultObjs, generators) + manager, dynamicClient, fedClient := bootstrapInformerManagerWithFakeClients( + g, + ctx, + defaultFTCs, + defaultObjs, + generators, + ) defer func() { cancel() _ = wait.PollInfinite(time.Millisecond, func() (done bool, err error) { return manager.IsShutdown(), nil }) @@ -539,7 +647,13 @@ func TestInformerManager(t *testing.T) { generators := []*EventHandlerGenerator{generator} ctx, cancel := context.WithCancel(ctx) - manager, dynamicClient, fedClient := bootstrapInformerManagerWithFakeClients(g, ctx, defaultFTCs, defaultObjs, generators) + manager, dynamicClient, fedClient := bootstrapInformerManagerWithFakeClients( + g, + ctx, + defaultFTCs, + defaultObjs, + generators, + ) defer func() { cancel() _ = wait.PollInfinite(time.Millisecond, func() (done bool, err error) { return manager.IsShutdown(), nil }) @@ -598,7 +712,13 @@ func TestInformerManager(t *testing.T) { generators := []*EventHandlerGenerator{generator1, generator2} ctx, cancel := context.WithCancel(ctx) - manager, dynamicClient, fedClient := bootstrapInformerManagerWithFakeClients(g, ctx, defaultFTCs, defaultObjs, generators) + manager, dynamicClient, fedClient := bootstrapInformerManagerWithFakeClients( + g, + ctx, + defaultFTCs, + defaultObjs, + generators, + ) defer func() { cancel() _ = wait.PollInfinite(time.Millisecond, func() (done bool, err error) { return manager.IsShutdown(), nil }) @@ -622,7 +742,9 @@ func TestInformerManager(t *testing.T) { // 3. Delete the deployment FTC - err := fedClient.CoreV1alpha1().FederatedTypeConfigs().Delete(ctx, deploymentFTC.GetName(), metav1.DeleteOptions{}) + err := fedClient.CoreV1alpha1(). + FederatedTypeConfigs(). + Delete(ctx, deploymentFTC.GetName(), metav1.DeleteOptions{}) g.Expect(err).ToNot(gomega.HaveOccurred()) <-time.After(time.Second) @@ -691,7 +813,13 @@ func TestInformerManager(t *testing.T) { managerCtx, managerCancel := context.WithCancel(ctx) ctx, cancel := context.WithCancel(ctx) - manager, dynamicClient, _ := bootstrapInformerManagerWithFakeClients(g, managerCtx, defaultFTCs, defaultObjs, generators) + manager, dynamicClient, _ := bootstrapInformerManagerWithFakeClients( + g, + managerCtx, + defaultFTCs, + defaultObjs, + generators, + ) defer func() { cancel() _ = wait.PollInfinite(time.Millisecond, func() (done bool, err error) { return manager.IsShutdown(), nil }) From 7ce0e2e2b4865ea9213ca15da83223866ff07101 Mon Sep 17 00:00:00 2001 From: "hawjia.lim" Date: Fri, 14 Jul 2023 10:37:06 +0800 Subject: [PATCH 036/173] remove old function definitions --- pkg/controllers/util/federatedinformer.go | 22 ---------------------- 1 file changed, 22 deletions(-) diff --git a/pkg/controllers/util/federatedinformer.go b/pkg/controllers/util/federatedinformer.go index 95c28928..a57ce03b 100644 --- a/pkg/controllers/util/federatedinformer.go +++ b/pkg/controllers/util/federatedinformer.go @@ -289,28 +289,6 @@ func NewFederatedInformer( return federatedInformer, err } -func IsClusterReady(clusterStatus *fedcorev1a1.FederatedClusterStatus) bool { - for _, condition := range clusterStatus.Conditions { - if condition.Type == fedcorev1a1.ClusterReady { - if condition.Status == corev1.ConditionTrue { - return true - } - } - } - return false -} - -func IsClusterJoined(clusterStatus *fedcorev1a1.FederatedClusterStatus) bool { - for _, condition := range clusterStatus.Conditions { - if condition.Type == fedcorev1a1.ClusterJoined { - if condition.Status == corev1.ConditionTrue { - return true - } - } - } - return false -} - type informer struct { controller cache.Controller store cache.Store From e91a2fb03a52b048f0f66a783064fb5de782384b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Gary=20Liu=20=28=E5=88=98=E5=B9=BF=E6=BA=90=29?= Date: Fri, 14 Jul 2023 03:44:40 +0000 Subject: [PATCH 037/173] refactor(api): unify the go structs for (Cluster)?FederatedObject --- hack/generate-groups.sh | 3 ++ .../core/v1alpha1/types_federatedobject.go | 38 +++++++------------ .../core/v1alpha1/zz_generated.deepcopy.go | 20 ++++++++++ 3 files changed, 37 insertions(+), 24 deletions(-) diff --git a/hack/generate-groups.sh b/hack/generate-groups.sh index 5d0fd12a..f6e8a9a6 100644 --- a/hack/generate-groups.sh +++ b/hack/generate-groups.sh @@ -71,6 +71,9 @@ for patch_file in config/crds/patches/*.sh; do PATH="$GOBIN:$PATH" bash $patch_file $crd_file done +# remove the CRD for GenericFederatedObject. +# It's not needed and there's no way to suppress its generation. +rm -v config/crds/core.kubeadmiral.io_genericfederatedobjects.yaml || true # generate deepcopy echo "Generating deepcopy funcs" diff --git a/pkg/apis/core/v1alpha1/types_federatedobject.go b/pkg/apis/core/v1alpha1/types_federatedobject.go index 2ceb348c..60078f16 100644 --- a/pkg/apis/core/v1alpha1/types_federatedobject.go +++ b/pkg/apis/core/v1alpha1/types_federatedobject.go @@ -22,16 +22,8 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -// +genclient -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -// +kubebuilder:validation:Required -// +kubebuilder:resource:path=federatedobjects,shortName=fo,singular=federatedobject -// +kubebuilder:subresource:status -// +kubebuilder:object:root=true - -// FederatedObject describes a namespace-scoped Kubernetes object and how it should be propagated to different member -// clusters. -type FederatedObject struct { +// Schema shared by both FederatedObject and ClusterFederatedObject. +type GenericFederatedObject struct { metav1.TypeMeta `json:",inline"` // Standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata @@ -46,6 +38,17 @@ type FederatedObject struct { Status GenericFederatedObjectStatus `json:"status,omitempty"` } +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +kubebuilder:validation:Required +// +kubebuilder:resource:path=federatedobjects,shortName=fo,singular=federatedobject +// +kubebuilder:subresource:status +// +kubebuilder:object:root=true + +// FederatedObject describes a namespace-scoped Kubernetes object and how it should be propagated to different member +// clusters. +type FederatedObject GenericFederatedObject + // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // +kubebuilder:object:root=true @@ -66,20 +69,7 @@ type FederatedObjectList struct { // ClusterFederatedObject describes a cluster-scoped Kubernetes object and how it should be propagated to different // member clusters. -type ClusterFederatedObject struct { - metav1.TypeMeta `json:",inline"` - // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata - // +optional - metav1.ObjectMeta `json:"metadata,omitempty"` - - // Spec defines the desired behavior of the ClusterFederatedObject. - Spec GenericFederatedObjectSpec `json:"spec"` - - // Status describes the most recently observed status of the ClusterFederatedObject. - // +optional - Status GenericFederatedObjectStatus `json:"status,omitempty"` -} +type ClusterFederatedObject GenericFederatedObject // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object diff --git a/pkg/apis/core/v1alpha1/zz_generated.deepcopy.go b/pkg/apis/core/v1alpha1/zz_generated.deepcopy.go index 1226a6c0..2f09791b 100644 --- a/pkg/apis/core/v1alpha1/zz_generated.deepcopy.go +++ b/pkg/apis/core/v1alpha1/zz_generated.deepcopy.go @@ -915,6 +915,26 @@ func (in *GenericCollectedStatus) DeepCopy() *GenericCollectedStatus { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GenericFederatedObject) DeepCopyInto(out *GenericFederatedObject) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GenericFederatedObject. +func (in *GenericFederatedObject) DeepCopy() *GenericFederatedObject { + if in == nil { + return nil + } + out := new(GenericFederatedObject) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *GenericFederatedObjectCondition) DeepCopyInto(out *GenericFederatedObjectCondition) { *out = *in From d1e4fe36990c5cf24d0d724ef5ed0fae9f60c8a7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Gary=20Liu=20=28=E5=88=98=E5=B9=BF=E6=BA=90=29?= Date: Fri, 14 Jul 2023 09:15:12 +0000 Subject: [PATCH 038/173] feat: GenericFederatedObject implements runtime.Object --- pkg/apis/core/v1alpha1/types_federatedobject.go | 1 + pkg/apis/core/v1alpha1/zz_generated.deepcopy.go | 8 ++++++++ 2 files changed, 9 insertions(+) diff --git a/pkg/apis/core/v1alpha1/types_federatedobject.go b/pkg/apis/core/v1alpha1/types_federatedobject.go index 60078f16..62fa2a58 100644 --- a/pkg/apis/core/v1alpha1/types_federatedobject.go +++ b/pkg/apis/core/v1alpha1/types_federatedobject.go @@ -22,6 +22,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // Schema shared by both FederatedObject and ClusterFederatedObject. type GenericFederatedObject struct { metav1.TypeMeta `json:",inline"` diff --git a/pkg/apis/core/v1alpha1/zz_generated.deepcopy.go b/pkg/apis/core/v1alpha1/zz_generated.deepcopy.go index 2f09791b..250629be 100644 --- a/pkg/apis/core/v1alpha1/zz_generated.deepcopy.go +++ b/pkg/apis/core/v1alpha1/zz_generated.deepcopy.go @@ -935,6 +935,14 @@ func (in *GenericFederatedObject) DeepCopy() *GenericFederatedObject { return out } +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *GenericFederatedObject) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *GenericFederatedObjectCondition) DeepCopyInto(out *GenericFederatedObjectCondition) { *out = *in From b7f430006d71c95f355c1d0874f90dd42f023481 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Gary=20Liu=20=28=E5=88=98=E5=B9=BF=E6=BA=90=29?= Date: Fri, 14 Jul 2023 09:15:57 +0000 Subject: [PATCH 039/173] feat: fedeated adapters --- pkg/util/fedobjectadapters/adapters.go | 97 ++++++++++++++++++++++++++ 1 file changed, 97 insertions(+) create mode 100644 pkg/util/fedobjectadapters/adapters.go diff --git a/pkg/util/fedobjectadapters/adapters.go b/pkg/util/fedobjectadapters/adapters.go new file mode 100644 index 00000000..583dce3e --- /dev/null +++ b/pkg/util/fedobjectadapters/adapters.go @@ -0,0 +1,97 @@ +package fedobjectadapters + +import ( + "context" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + fedcorev1a1 "github.com/kubewharf/kubeadmiral/pkg/apis/core/v1alpha1" + fedcorev1a1client "github.com/kubewharf/kubeadmiral/pkg/client/clientset/versioned/typed/core/v1alpha1" + fedcorev1a1listers "github.com/kubewharf/kubeadmiral/pkg/client/listers/core/v1alpha1" +) + +func convertToGeneric[Type *fedcorev1a1.ClusterFederatedObject | *fedcorev1a1.FederatedObject]( + obj Type, err error, +) (*fedcorev1a1.GenericFederatedObject, error) { + return (*fedcorev1a1.GenericFederatedObject)(obj), err +} + +func GetFromLister( + fedObjectLister fedcorev1a1listers.FederatedObjectLister, + clusterFedObjectLister fedcorev1a1listers.ClusterFederatedObjectLister, + namespace, name string, +) (*fedcorev1a1.GenericFederatedObject, error) { + if namespace == "" { + return convertToGeneric(clusterFedObjectLister.Get(name)) + } else { + return convertToGeneric(fedObjectLister.FederatedObjects(namespace).Get(name)) + } +} + +func Create( + ctx context.Context, + fedObjectClient fedcorev1a1client.FederatedObjectsGetter, + clusterFedObjectClient fedcorev1a1client.ClusterFederatedObjectsGetter, + obj *fedcorev1a1.GenericFederatedObject, + opts metav1.CreateOptions, +) (*fedcorev1a1.GenericFederatedObject, error) { + if obj.GetNamespace() == "" { + return convertToGeneric( + clusterFedObjectClient.ClusterFederatedObjects().Create(ctx, (*fedcorev1a1.ClusterFederatedObject)(obj), opts), + ) + } else { + return convertToGeneric( + fedObjectClient.FederatedObjects(obj.GetNamespace()).Create(ctx, (*fedcorev1a1.FederatedObject)(obj), opts), + ) + } +} + +func Update( + ctx context.Context, + fedObjectClient fedcorev1a1client.FederatedObjectsGetter, + clusterFedObjectClient fedcorev1a1client.ClusterFederatedObjectsGetter, + obj *fedcorev1a1.GenericFederatedObject, + opts metav1.UpdateOptions, +) (*fedcorev1a1.GenericFederatedObject, error) { + if obj.GetNamespace() == "" { + return convertToGeneric( + clusterFedObjectClient.ClusterFederatedObjects().Update(ctx, (*fedcorev1a1.ClusterFederatedObject)(obj), opts), + ) + } else { + return convertToGeneric( + fedObjectClient.FederatedObjects(obj.GetNamespace()).Update(ctx, (*fedcorev1a1.FederatedObject)(obj), opts), + ) + } +} + +func UpdateStatus( + ctx context.Context, + fedObjectClient fedcorev1a1client.FederatedObjectsGetter, + clusterFedObjectClient fedcorev1a1client.ClusterFederatedObjectsGetter, + obj *fedcorev1a1.GenericFederatedObject, + opts metav1.UpdateOptions, +) (*fedcorev1a1.GenericFederatedObject, error) { + if obj.GetNamespace() == "" { + return convertToGeneric( + clusterFedObjectClient.ClusterFederatedObjects().UpdateStatus(ctx, (*fedcorev1a1.ClusterFederatedObject)(obj), opts), + ) + } else { + return convertToGeneric( + fedObjectClient.FederatedObjects(obj.GetNamespace()).UpdateStatus(ctx, (*fedcorev1a1.FederatedObject)(obj), opts), + ) + } +} + +func Delete( + ctx context.Context, + fedObjectClient fedcorev1a1client.FederatedObjectsGetter, + clusterFedObjectClient fedcorev1a1client.ClusterFederatedObjectsGetter, + namespace, name string, + opts metav1.DeleteOptions, +) error { + if namespace == "" { + return clusterFedObjectClient.ClusterFederatedObjects().Delete(ctx, name, opts) + } else { + return fedObjectClient.FederatedObjects(namespace).Delete(ctx, name, opts) + } +} From 8752aad0cd8f663126769dab34ae10c8e182124e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Gary=20Liu=20=28=E5=88=98=E5=B9=BF=E6=BA=90=29?= Date: Fri, 14 Jul 2023 09:23:14 +0000 Subject: [PATCH 040/173] feat: util to get GVK from federated object --- .../v1alpha1/extensions_federatedobject.go | 27 ++++++++++++++----- 1 file changed, 20 insertions(+), 7 deletions(-) diff --git a/pkg/apis/core/v1alpha1/extensions_federatedobject.go b/pkg/apis/core/v1alpha1/extensions_federatedobject.go index af48d8f5..d62ab2f1 100644 --- a/pkg/apis/core/v1alpha1/extensions_federatedobject.go +++ b/pkg/apis/core/v1alpha1/extensions_federatedobject.go @@ -17,17 +17,19 @@ limitations under the License. package v1alpha1 import ( + "encoding/json" "reflect" "sort" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/util/sets" ) // Placement extensions -// GetPlacementUnion returns the union of all clusters listed under the Placement field of the FederatedObject. -func (o *FederatedObject) GetPlacementUnion() sets.Set[string] { +// GetPlacementUnion returns the union of all clusters listed under the Placement field of the GenericFederatedObject. +func (o *GenericFederatedObject) GetPlacementUnion() sets.Set[string] { set := sets.New[string]() for _, placement := range o.Spec.Placements { for _, cluster := range placement.Placement { @@ -39,7 +41,7 @@ func (o *FederatedObject) GetPlacementUnion() sets.Set[string] { // GetControllerPlacement returns the slice containing all the ClusterPlacements from a given controller. Returns nil if // the controller is not present. -func (o *FederatedObject) GetControllerPlacement(controller string) []ClusterReference { +func (o *GenericFederatedObject) GetControllerPlacement(controller string) []ClusterReference { for _, placement := range o.Spec.Placements { if placement.Controller == controller { return placement.Placement @@ -49,8 +51,8 @@ func (o *FederatedObject) GetControllerPlacement(controller string) []ClusterRef } // SetControllerPlacement sets the ClusterPlacements for a given controller. If clusterNames is nil or empty, the previous -// placement for the given controller will be deleted. Returns a bool indicating if the FederatedObject has changed. -func (o *FederatedObject) SetControllerPlacement(controller string, clusterNames []string) bool { +// placement for the given controller will be deleted. Returns a bool indicating if the GenericFederatedObject has changed. +func (o *GenericFederatedObject) SetControllerPlacement(controller string, clusterNames []string) bool { if len(clusterNames) == 0 { return o.DeleteControllerPlacement(controller) } @@ -88,9 +90,9 @@ func (o *FederatedObject) SetControllerPlacement(controller string, clusterNames return false } -// DeleteClusterPlacement deletes a controller's placement, returning a bool to indicate if the FederatedObject has +// DeleteClusterPlacement deletes a controller's placement, returning a bool to indicate if the GenericFederatedObject has // changed. -func (o *FederatedObject) DeleteControllerPlacement(controller string) bool { +func (o *GenericFederatedObject) DeleteControllerPlacement(controller string) bool { oldPlacementIdx := -1 for i := range o.Spec.Placements { if o.Spec.Placements[i].Controller == controller { @@ -107,6 +109,17 @@ func (o *FederatedObject) DeleteControllerPlacement(controller string) bool { return true } +func (o *GenericFederatedObject) GetTemplateGVK() (schema.GroupVersionKind, error) { + type partialTypeMetadata struct { + metav1.TypeMeta `json:",inline"` + } + metadata := &partialTypeMetadata{} + if err := json.Unmarshal(o.Spec.Template.Raw, metadata); err != nil { + return schema.GroupVersionKind{}, nil + } + return metadata.GroupVersionKind(), nil +} + // Follower extensions func (l *LeaderReference) GroupKind() schema.GroupKind { From ab463c4db4d25ece10a58734cfce6443ba3a4564 Mon Sep 17 00:00:00 2001 From: "Liu Guangyuan (Gary)" Date: Thu, 13 Jul 2023 14:29:44 +0800 Subject: [PATCH 041/173] Merge branch 'main' into fix/ergonomic-annotation-handling --- pkg/controllers/util/annotation/annotation.go | 43 ++++++------------- .../util/annotation/annotation_test.go | 12 +++--- pkg/controllers/util/sourcefeedback/util.go | 4 +- 3 files changed, 21 insertions(+), 38 deletions(-) diff --git a/pkg/controllers/util/annotation/annotation.go b/pkg/controllers/util/annotation/annotation.go index c2bcb2dd..108b7d60 100644 --- a/pkg/controllers/util/annotation/annotation.go +++ b/pkg/controllers/util/annotation/annotation.go @@ -20,26 +20,21 @@ import ( "fmt" "reflect" - "k8s.io/apimachinery/pkg/api/meta" - "k8s.io/apimachinery/pkg/runtime" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) const ( - SyncSuccessTimestamp = "syncSuccessTimestamp" - LastGeneration = "lastGeneration" + SyncSuccessTimestamp = "syncSuccessTimestamp" + LastGeneration = "lastGeneration" LastSyncSuccessGeneration = "lastSyncSuccessGeneration" ) // HasAnnotationKey returns true if the given object has the given annotation key in its ObjectMeta. -func HasAnnotationKey(obj runtime.Object, key string) (bool, error) { +func HasAnnotationKey(obj metav1.Object, key string) (bool, error) { if IsNilPointer(obj) { return false, fmt.Errorf("object(%T) is nil pointer", obj) } - accessor, err := meta.Accessor(obj) - if err != nil { - return false, err - } - annotations := accessor.GetAnnotations() + annotations := obj.GetAnnotations() if annotations == nil { return false, nil } @@ -48,15 +43,11 @@ func HasAnnotationKey(obj runtime.Object, key string) (bool, error) { } // HasAnnotationKeyValue returns true if the given object has the given annotation key and value in its ObjectMeta. -func HasAnnotationKeyValue(obj runtime.Object, key, value string) (bool, error) { +func HasAnnotationKeyValue(obj metav1.Object, key, value string) (bool, error) { if IsNilPointer(obj) { return false, fmt.Errorf("object(%T) is nil pointer", obj) } - accessor, err := meta.Accessor(obj) - if err != nil { - return false, err - } - annotations := accessor.GetAnnotations() + annotations := obj.GetAnnotations() if annotations == nil { return false, nil } @@ -67,7 +58,7 @@ func HasAnnotationKeyValue(obj runtime.Object, key, value string) (bool, error) // AddAnnotation adds the given annotation key and value to the given objects ObjectMeta, // and overwrites the annotation value if it already exists. // Returns true if the object was updated. -func AddAnnotation(obj runtime.Object, key, value string) (bool, error) { +func AddAnnotation(obj metav1.Object, key, value string) (bool, error) { if IsNilPointer(obj) { return false, fmt.Errorf("object(%T) is nil pointer", obj) } @@ -76,10 +67,6 @@ func AddAnnotation(obj runtime.Object, key, value string) (bool, error) { return false, fmt.Errorf("key is a empty string.") } - accessor, err := meta.Accessor(obj) - if err != nil { - return false, err - } has, err := HasAnnotationKeyValue(obj, key, value) if has && err == nil { return false, nil @@ -87,25 +74,21 @@ func AddAnnotation(obj runtime.Object, key, value string) (bool, error) { if err != nil { return false, err } - annotations := accessor.GetAnnotations() + annotations := obj.GetAnnotations() if annotations == nil { annotations = make(map[string]string) } annotations[key] = value - accessor.SetAnnotations(annotations) + obj.SetAnnotations(annotations) return true, nil } // RemoveAnnotation removes the given annotation key from the given objects ObjectMeta. // Returns true if the object was updated. -func RemoveAnnotation(obj runtime.Object, key string) (bool, error) { +func RemoveAnnotation(obj metav1.Object, key string) (bool, error) { if IsNilPointer(obj) { return false, fmt.Errorf("object(%T) is nil pointer", obj) } - accessor, err := meta.Accessor(obj) - if err != nil { - return false, err - } has, err := HasAnnotationKey(obj, key) if !has && err == nil { return false, nil @@ -114,13 +97,13 @@ func RemoveAnnotation(obj runtime.Object, key string) (bool, error) { return false, err } - annotations := accessor.GetAnnotations() + annotations := obj.GetAnnotations() if annotations == nil { return false, nil } delete(annotations, key) - accessor.SetAnnotations(annotations) + obj.SetAnnotations(annotations) return true, nil } diff --git a/pkg/controllers/util/annotation/annotation_test.go b/pkg/controllers/util/annotation/annotation_test.go index 4dd87d0f..7e8c59d4 100644 --- a/pkg/controllers/util/annotation/annotation_test.go +++ b/pkg/controllers/util/annotation/annotation_test.go @@ -23,10 +23,10 @@ import ( "github.com/stretchr/testify/assert" corev1 "k8s.io/api/core/v1" meta "k8s.io/apimachinery/pkg/api/meta" - "k8s.io/apimachinery/pkg/runtime" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -func newObj(annotation map[string]string) runtime.Object { +func newObj(annotation map[string]string) metav1.Object { pod := corev1.Pod{} pod.ObjectMeta.Annotations = annotation return &pod @@ -34,7 +34,7 @@ func newObj(annotation map[string]string) runtime.Object { func TestHasAnnotationKey(t *testing.T) { testCases := []struct { - obj runtime.Object + obj metav1.Object annotation string result bool }{ @@ -82,7 +82,7 @@ func TestHasAnnotationKey(t *testing.T) { func TestHasAnnotationKeyValue(t *testing.T) { testCases := []struct { - obj runtime.Object + obj metav1.Object key string value string result bool @@ -149,7 +149,7 @@ func TestHasAnnotationKeyValue(t *testing.T) { func TestAddAnnotation(t *testing.T) { testCases := []struct { - obj runtime.Object + obj metav1.Object key string value string isUpdated bool @@ -231,7 +231,7 @@ func TestAddAnnotation(t *testing.T) { func TestRemoveAnnotation(t *testing.T) { testCases := []struct { - obj runtime.Object + obj metav1.Object key string isUpdated bool newAnnotations map[string]string diff --git a/pkg/controllers/util/sourcefeedback/util.go b/pkg/controllers/util/sourcefeedback/util.go index 76ca2158..aeae5af8 100644 --- a/pkg/controllers/util/sourcefeedback/util.go +++ b/pkg/controllers/util/sourcefeedback/util.go @@ -19,13 +19,13 @@ package sourcefeedback import ( "encoding/json" - "k8s.io/apimachinery/pkg/runtime" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/klog/v2" "github.com/kubewharf/kubeadmiral/pkg/controllers/util/annotation" ) -func setAnnotation(object runtime.Object, key string, value interface{}, hasChanged *bool) { +func setAnnotation(object metav1.Object, key string, value interface{}, hasChanged *bool) { jsonBuf, err := json.Marshal(value) if err != nil { klog.Errorf("Cannot marshal JSON: %v", err) From 9d43262d4e3c71a013db0b14f1f427cfd3ca7e4c Mon Sep 17 00:00:00 2001 From: "hawjia.lim" Date: Mon, 17 Jul 2023 15:49:15 +0800 Subject: [PATCH 042/173] refactor: pendingcontrollers package --- .../util/pendingcontrollers/pendingcontrollers.go | 11 +++++------ 1 file changed, 5 insertions(+), 6 deletions(-) rename pkg/{controllers => }/util/pendingcontrollers/pendingcontrollers.go (91%) diff --git a/pkg/controllers/util/pendingcontrollers/pendingcontrollers.go b/pkg/util/pendingcontrollers/pendingcontrollers.go similarity index 91% rename from pkg/controllers/util/pendingcontrollers/pendingcontrollers.go rename to pkg/util/pendingcontrollers/pendingcontrollers.go index 1c3b799a..a2f02841 100644 --- a/pkg/controllers/util/pendingcontrollers/pendingcontrollers.go +++ b/pkg/util/pendingcontrollers/pendingcontrollers.go @@ -20,8 +20,7 @@ import ( "encoding/json" "fmt" - "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" - + fedcorev1a1 "github.com/kubewharf/kubeadmiral/pkg/apis/core/v1alpha1" "github.com/kubewharf/kubeadmiral/pkg/controllers/common" annotationutil "github.com/kubewharf/kubeadmiral/pkg/controllers/util/annotation" ) @@ -32,7 +31,7 @@ const ( PendingControllersAnnotation = common.DefaultPrefix + "pending-controllers" ) -func GetPendingControllers(fedObject *unstructured.Unstructured) (PendingControllers, error) { +func GetPendingControllers(fedObject *fedcorev1a1.GenericFederatedObject) (PendingControllers, error) { value, exists := fedObject.GetAnnotations()[PendingControllersAnnotation] if !exists { return nil, fmt.Errorf("annotation %v does not exist", PendingControllersAnnotation) @@ -64,7 +63,7 @@ func NormalizeControllers(controllers PendingControllers) PendingControllers { } func SetPendingControllers( - fedObject *unstructured.Unstructured, + fedObject *fedcorev1a1.GenericFederatedObject, controllers PendingControllers, ) (updated bool, err error) { controllers = NormalizeControllers(controllers) @@ -91,7 +90,7 @@ func getDownstreamControllers(allControllers PendingControllers, current string) } func UpdatePendingControllers( - fedObject *unstructured.Unstructured, + fedObject *fedcorev1a1.GenericFederatedObject, toRemove string, shouldSetDownstream bool, allControllers PendingControllers, @@ -125,7 +124,7 @@ func UpdatePendingControllers( return SetPendingControllers(fedObject, newPendingControllers) } -func ControllerDependenciesFulfilled(fedObject *unstructured.Unstructured, controllerName string) (bool, error) { +func ControllerDependenciesFulfilled(fedObject *fedcorev1a1.GenericFederatedObject, controllerName string) (bool, error) { pendingControllers, err := GetPendingControllers(fedObject) if err != nil { return false, err From 27258de0ea7e195a63c696cfcc649e90ca8e828c Mon Sep 17 00:00:00 2001 From: "hawjia.lim" Date: Mon, 17 Jul 2023 15:56:48 +0800 Subject: [PATCH 043/173] refactor: move annotation utils --- pkg/{controllers => }/util/annotation/annotation.go | 0 pkg/{controllers => }/util/annotation/annotation_test.go | 0 pkg/{controllers => }/util/annotation/submap.go | 0 pkg/{controllers => }/util/annotation/submap_test.go | 0 pkg/util/pendingcontrollers/pendingcontrollers.go | 2 +- 5 files changed, 1 insertion(+), 1 deletion(-) rename pkg/{controllers => }/util/annotation/annotation.go (100%) rename pkg/{controllers => }/util/annotation/annotation_test.go (100%) rename pkg/{controllers => }/util/annotation/submap.go (100%) rename pkg/{controllers => }/util/annotation/submap_test.go (100%) diff --git a/pkg/controllers/util/annotation/annotation.go b/pkg/util/annotation/annotation.go similarity index 100% rename from pkg/controllers/util/annotation/annotation.go rename to pkg/util/annotation/annotation.go diff --git a/pkg/controllers/util/annotation/annotation_test.go b/pkg/util/annotation/annotation_test.go similarity index 100% rename from pkg/controllers/util/annotation/annotation_test.go rename to pkg/util/annotation/annotation_test.go diff --git a/pkg/controllers/util/annotation/submap.go b/pkg/util/annotation/submap.go similarity index 100% rename from pkg/controllers/util/annotation/submap.go rename to pkg/util/annotation/submap.go diff --git a/pkg/controllers/util/annotation/submap_test.go b/pkg/util/annotation/submap_test.go similarity index 100% rename from pkg/controllers/util/annotation/submap_test.go rename to pkg/util/annotation/submap_test.go diff --git a/pkg/util/pendingcontrollers/pendingcontrollers.go b/pkg/util/pendingcontrollers/pendingcontrollers.go index a2f02841..98df5d84 100644 --- a/pkg/util/pendingcontrollers/pendingcontrollers.go +++ b/pkg/util/pendingcontrollers/pendingcontrollers.go @@ -22,7 +22,7 @@ import ( fedcorev1a1 "github.com/kubewharf/kubeadmiral/pkg/apis/core/v1alpha1" "github.com/kubewharf/kubeadmiral/pkg/controllers/common" - annotationutil "github.com/kubewharf/kubeadmiral/pkg/controllers/util/annotation" + annotationutil "github.com/kubewharf/kubeadmiral/pkg/util/annotation" ) type PendingControllers [][]string From 49baa42db7525badba9ce6270ab922597e146a1f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Gary=20Liu=20=28=E5=88=98=E5=B9=BF=E6=BA=90=29?= Date: Mon, 17 Jul 2023 12:37:13 +0000 Subject: [PATCH 044/173] refactor: GenericFederatedObject interface --- ...ubeadmiral.io_clusterfederatedobjects.yaml | 4 +- hack/generate-groups.sh | 9 ++- .../v1alpha1/extensions_federatedobject.go | 65 +++++++++++---- pkg/apis/core/v1alpha1/interface.go | 7 ++ .../core/v1alpha1/types_federatedobject.go | 37 +++++---- .../core/v1alpha1/zz_generated.deepcopy.go | 28 ------- pkg/util/fedobjectadapters/adapters.go | 80 +++++++++++++------ .../pendingcontrollers/pendingcontrollers.go | 8 +- 8 files changed, 144 insertions(+), 94 deletions(-) diff --git a/config/crds/core.kubeadmiral.io_clusterfederatedobjects.yaml b/config/crds/core.kubeadmiral.io_clusterfederatedobjects.yaml index 33dade49..c89902e0 100644 --- a/config/crds/core.kubeadmiral.io_clusterfederatedobjects.yaml +++ b/config/crds/core.kubeadmiral.io_clusterfederatedobjects.yaml @@ -36,7 +36,7 @@ spec: metadata: type: object spec: - description: Spec defines the desired behavior of the ClusterFederatedObject. + description: Spec defines the desired behavior of the FederatedObject. properties: follows: description: Follows defines other objects, or "leaders", that the @@ -150,7 +150,7 @@ spec: type: object status: description: Status describes the most recently observed status of the - ClusterFederatedObject. + FederatedObject. properties: clusters: description: Clusters contains the propagation status of the Kubernetes diff --git a/hack/generate-groups.sh b/hack/generate-groups.sh index f6e8a9a6..a11d2df2 100644 --- a/hack/generate-groups.sh +++ b/hack/generate-groups.sh @@ -52,7 +52,11 @@ for group in "${groups[@]}"; do done # generate code -function codegen::join() { local IFS="$1"; shift; echo "$*"; } +function codegen::join() { + local IFS="$1" + shift + echo "$*" +} # generate manifests echo "Generating manifests" @@ -71,9 +75,6 @@ for patch_file in config/crds/patches/*.sh; do PATH="$GOBIN:$PATH" bash $patch_file $crd_file done -# remove the CRD for GenericFederatedObject. -# It's not needed and there's no way to suppress its generation. -rm -v config/crds/core.kubeadmiral.io_genericfederatedobjects.yaml || true # generate deepcopy echo "Generating deepcopy funcs" diff --git a/pkg/apis/core/v1alpha1/extensions_federatedobject.go b/pkg/apis/core/v1alpha1/extensions_federatedobject.go index d62ab2f1..a7c3646f 100644 --- a/pkg/apis/core/v1alpha1/extensions_federatedobject.go +++ b/pkg/apis/core/v1alpha1/extensions_federatedobject.go @@ -22,16 +22,39 @@ import ( "sort" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/util/sets" ) +// Implementations for GenericFederatedObject + +func (o *FederatedObject) GetSpec() *GenericFederatedObjectSpec { + return &o.Spec +} + +func (o *FederatedObject) GetStatus() *GenericFederatedObjectStatus { + return &o.Status +} + +var _ GenericFederatedObject = &FederatedObject{} + +func (o *ClusterFederatedObject) GetSpec() *GenericFederatedObjectSpec { + return &o.Spec +} + +func (o *ClusterFederatedObject) GetStatus() *GenericFederatedObjectStatus { + return &o.Status +} + +var _ GenericFederatedObject = &ClusterFederatedObject{} + // Placement extensions // GetPlacementUnion returns the union of all clusters listed under the Placement field of the GenericFederatedObject. -func (o *GenericFederatedObject) GetPlacementUnion() sets.Set[string] { +func (spec *GenericFederatedObjectSpec) GetPlacementUnion() sets.Set[string] { set := sets.New[string]() - for _, placement := range o.Spec.Placements { + for _, placement := range spec.Placements { for _, cluster := range placement.Placement { set.Insert(cluster.Cluster) } @@ -41,8 +64,8 @@ func (o *GenericFederatedObject) GetPlacementUnion() sets.Set[string] { // GetControllerPlacement returns the slice containing all the ClusterPlacements from a given controller. Returns nil if // the controller is not present. -func (o *GenericFederatedObject) GetControllerPlacement(controller string) []ClusterReference { - for _, placement := range o.Spec.Placements { +func (spec *GenericFederatedObjectSpec) GetControllerPlacement(controller string) []ClusterReference { + for _, placement := range spec.Placements { if placement.Controller == controller { return placement.Placement } @@ -52,9 +75,9 @@ func (o *GenericFederatedObject) GetControllerPlacement(controller string) []Clu // SetControllerPlacement sets the ClusterPlacements for a given controller. If clusterNames is nil or empty, the previous // placement for the given controller will be deleted. Returns a bool indicating if the GenericFederatedObject has changed. -func (o *GenericFederatedObject) SetControllerPlacement(controller string, clusterNames []string) bool { +func (spec *GenericFederatedObjectSpec) SetControllerPlacement(controller string, clusterNames []string) bool { if len(clusterNames) == 0 { - return o.DeleteControllerPlacement(controller) + return spec.DeleteControllerPlacement(controller) } newPlacement := make([]ClusterReference, len(clusterNames)) @@ -67,8 +90,8 @@ func (o *GenericFederatedObject) SetControllerPlacement(controller string, clust }) oldPlacementWithControllerIdx := -1 - for i := range o.Spec.Placements { - if o.Spec.Placements[i].Controller == controller { + for i := range spec.Placements { + if spec.Placements[i].Controller == controller { oldPlacementWithControllerIdx = i break } @@ -79,11 +102,11 @@ func (o *GenericFederatedObject) SetControllerPlacement(controller string, clust Placement: newPlacement, } if oldPlacementWithControllerIdx == -1 { - o.Spec.Placements = append(o.Spec.Placements, newPlacmentWithController) + spec.Placements = append(spec.Placements, newPlacmentWithController) return true } - if !reflect.DeepEqual(newPlacmentWithController, o.Spec.Placements[oldPlacementWithControllerIdx]) { - o.Spec.Placements[oldPlacementWithControllerIdx] = newPlacmentWithController + if !reflect.DeepEqual(newPlacmentWithController, spec.Placements[oldPlacementWithControllerIdx]) { + spec.Placements[oldPlacementWithControllerIdx] = newPlacmentWithController return true } @@ -92,10 +115,10 @@ func (o *GenericFederatedObject) SetControllerPlacement(controller string, clust // DeleteClusterPlacement deletes a controller's placement, returning a bool to indicate if the GenericFederatedObject has // changed. -func (o *GenericFederatedObject) DeleteControllerPlacement(controller string) bool { +func (spec *GenericFederatedObjectSpec) DeleteControllerPlacement(controller string) bool { oldPlacementIdx := -1 - for i := range o.Spec.Placements { - if o.Spec.Placements[i].Controller == controller { + for i := range spec.Placements { + if spec.Placements[i].Controller == controller { oldPlacementIdx = i break } @@ -105,16 +128,24 @@ func (o *GenericFederatedObject) DeleteControllerPlacement(controller string) bo return false } - o.Spec.Placements = append(o.Spec.Placements[:oldPlacementIdx], o.Spec.Placements[(oldPlacementIdx+1):]...) + spec.Placements = append(spec.Placements[:oldPlacementIdx], spec.Placements[(oldPlacementIdx+1):]...) return true } -func (o *GenericFederatedObject) GetTemplateGVK() (schema.GroupVersionKind, error) { +func (spec *GenericFederatedObjectSpec) GetTemplateAsUnstructured() (*unstructured.Unstructured, error) { + template := &unstructured.Unstructured{} + if err := template.UnmarshalJSON(spec.Template.Raw); err != nil { + return nil, err + } + return template, nil +} + +func (spec *GenericFederatedObjectSpec) GetTemplateGVK() (schema.GroupVersionKind, error) { type partialTypeMetadata struct { metav1.TypeMeta `json:",inline"` } metadata := &partialTypeMetadata{} - if err := json.Unmarshal(o.Spec.Template.Raw, metadata); err != nil { + if err := json.Unmarshal(spec.Template.Raw, metadata); err != nil { return schema.GroupVersionKind{}, nil } return metadata.GroupVersionKind(), nil diff --git a/pkg/apis/core/v1alpha1/interface.go b/pkg/apis/core/v1alpha1/interface.go index 9c1b8c3a..9de16456 100644 --- a/pkg/apis/core/v1alpha1/interface.go +++ b/pkg/apis/core/v1alpha1/interface.go @@ -41,3 +41,10 @@ type GenericPropagationPolicy interface { GetSpec() *PropagationPolicySpec GenericRefCountedPolicy } + +type GenericFederatedObject interface { + metav1.Object + pkgruntime.Object + GetSpec() *GenericFederatedObjectSpec + GetStatus() *GenericFederatedObjectStatus +} diff --git a/pkg/apis/core/v1alpha1/types_federatedobject.go b/pkg/apis/core/v1alpha1/types_federatedobject.go index 62fa2a58..d8d35c7f 100644 --- a/pkg/apis/core/v1alpha1/types_federatedobject.go +++ b/pkg/apis/core/v1alpha1/types_federatedobject.go @@ -22,9 +22,16 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) +// +genclient // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -// Schema shared by both FederatedObject and ClusterFederatedObject. -type GenericFederatedObject struct { +// +kubebuilder:validation:Required +// +kubebuilder:resource:path=federatedobjects,shortName=fo,singular=federatedobject +// +kubebuilder:subresource:status +// +kubebuilder:object:root=true + +// FederatedObject describes a namespace-scoped Kubernetes object and how it should be propagated to different member +// clusters. +type FederatedObject struct { metav1.TypeMeta `json:",inline"` // Standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata @@ -39,17 +46,6 @@ type GenericFederatedObject struct { Status GenericFederatedObjectStatus `json:"status,omitempty"` } -// +genclient -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -// +kubebuilder:validation:Required -// +kubebuilder:resource:path=federatedobjects,shortName=fo,singular=federatedobject -// +kubebuilder:subresource:status -// +kubebuilder:object:root=true - -// FederatedObject describes a namespace-scoped Kubernetes object and how it should be propagated to different member -// clusters. -type FederatedObject GenericFederatedObject - // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // +kubebuilder:object:root=true @@ -70,7 +66,20 @@ type FederatedObjectList struct { // ClusterFederatedObject describes a cluster-scoped Kubernetes object and how it should be propagated to different // member clusters. -type ClusterFederatedObject GenericFederatedObject +type ClusterFederatedObject struct { + metav1.TypeMeta `json:",inline"` + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + metav1.ObjectMeta `json:"metadata,omitempty"` + + // Spec defines the desired behavior of the FederatedObject. + Spec GenericFederatedObjectSpec `json:"spec"` + + // Status describes the most recently observed status of the FederatedObject. + // +optional + Status GenericFederatedObjectStatus `json:"status,omitempty"` +} // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object diff --git a/pkg/apis/core/v1alpha1/zz_generated.deepcopy.go b/pkg/apis/core/v1alpha1/zz_generated.deepcopy.go index 250629be..1226a6c0 100644 --- a/pkg/apis/core/v1alpha1/zz_generated.deepcopy.go +++ b/pkg/apis/core/v1alpha1/zz_generated.deepcopy.go @@ -915,34 +915,6 @@ func (in *GenericCollectedStatus) DeepCopy() *GenericCollectedStatus { return out } -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *GenericFederatedObject) DeepCopyInto(out *GenericFederatedObject) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Spec.DeepCopyInto(&out.Spec) - in.Status.DeepCopyInto(&out.Status) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GenericFederatedObject. -func (in *GenericFederatedObject) DeepCopy() *GenericFederatedObject { - if in == nil { - return nil - } - out := new(GenericFederatedObject) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *GenericFederatedObject) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *GenericFederatedObjectCondition) DeepCopyInto(out *GenericFederatedObjectCondition) { *out = *in diff --git a/pkg/util/fedobjectadapters/adapters.go b/pkg/util/fedobjectadapters/adapters.go index 583dce3e..5691428e 100644 --- a/pkg/util/fedobjectadapters/adapters.go +++ b/pkg/util/fedobjectadapters/adapters.go @@ -2,6 +2,7 @@ package fedobjectadapters import ( "context" + "fmt" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -10,21 +11,26 @@ import ( fedcorev1a1listers "github.com/kubewharf/kubeadmiral/pkg/client/listers/core/v1alpha1" ) -func convertToGeneric[Type *fedcorev1a1.ClusterFederatedObject | *fedcorev1a1.FederatedObject]( - obj Type, err error, -) (*fedcorev1a1.GenericFederatedObject, error) { - return (*fedcorev1a1.GenericFederatedObject)(obj), err +func ensureNilInterface( + obj fedcorev1a1.GenericFederatedObject, err error, +) (fedcorev1a1.GenericFederatedObject, error) { + if err != nil { + // Returning a non-nil interface value with nil concrete type can be confusing. + // We make sure the returned interface value is nil if there's an error. + return nil, err + } + return obj, nil } func GetFromLister( fedObjectLister fedcorev1a1listers.FederatedObjectLister, clusterFedObjectLister fedcorev1a1listers.ClusterFederatedObjectLister, namespace, name string, -) (*fedcorev1a1.GenericFederatedObject, error) { +) (fedcorev1a1.GenericFederatedObject, error) { if namespace == "" { - return convertToGeneric(clusterFedObjectLister.Get(name)) + return ensureNilInterface(clusterFedObjectLister.Get(name)) } else { - return convertToGeneric(fedObjectLister.FederatedObjects(namespace).Get(name)) + return ensureNilInterface(fedObjectLister.FederatedObjects(namespace).Get(name)) } } @@ -32,16 +38,24 @@ func Create( ctx context.Context, fedObjectClient fedcorev1a1client.FederatedObjectsGetter, clusterFedObjectClient fedcorev1a1client.ClusterFederatedObjectsGetter, - obj *fedcorev1a1.GenericFederatedObject, + obj fedcorev1a1.GenericFederatedObject, opts metav1.CreateOptions, -) (*fedcorev1a1.GenericFederatedObject, error) { +) (fedcorev1a1.GenericFederatedObject, error) { if obj.GetNamespace() == "" { - return convertToGeneric( - clusterFedObjectClient.ClusterFederatedObjects().Create(ctx, (*fedcorev1a1.ClusterFederatedObject)(obj), opts), + clusterFedObject, ok := obj.(*fedcorev1a1.ClusterFederatedObject) + if !ok { + return nil, fmt.Errorf("expected ClusterFederatedObject but got %T", obj) + } + return ensureNilInterface( + clusterFedObjectClient.ClusterFederatedObjects().Create(ctx, clusterFedObject, opts), ) } else { - return convertToGeneric( - fedObjectClient.FederatedObjects(obj.GetNamespace()).Create(ctx, (*fedcorev1a1.FederatedObject)(obj), opts), + fedObject, ok := obj.(*fedcorev1a1.FederatedObject) + if !ok { + return nil, fmt.Errorf("expected FederatedObject but got %T", obj) + } + return ensureNilInterface( + fedObjectClient.FederatedObjects(obj.GetNamespace()).Create(ctx, fedObject, opts), ) } } @@ -50,16 +64,24 @@ func Update( ctx context.Context, fedObjectClient fedcorev1a1client.FederatedObjectsGetter, clusterFedObjectClient fedcorev1a1client.ClusterFederatedObjectsGetter, - obj *fedcorev1a1.GenericFederatedObject, + obj fedcorev1a1.GenericFederatedObject, opts metav1.UpdateOptions, -) (*fedcorev1a1.GenericFederatedObject, error) { +) (fedcorev1a1.GenericFederatedObject, error) { if obj.GetNamespace() == "" { - return convertToGeneric( - clusterFedObjectClient.ClusterFederatedObjects().Update(ctx, (*fedcorev1a1.ClusterFederatedObject)(obj), opts), + clusterFedObject, ok := obj.(*fedcorev1a1.ClusterFederatedObject) + if !ok { + return nil, fmt.Errorf("expected ClusterFederatedObject but got %T", obj) + } + return ensureNilInterface( + clusterFedObjectClient.ClusterFederatedObjects().Update(ctx, clusterFedObject, opts), ) } else { - return convertToGeneric( - fedObjectClient.FederatedObjects(obj.GetNamespace()).Update(ctx, (*fedcorev1a1.FederatedObject)(obj), opts), + fedObject, ok := obj.(*fedcorev1a1.FederatedObject) + if !ok { + return nil, fmt.Errorf("expected FederatedObject but got %T", obj) + } + return ensureNilInterface( + fedObjectClient.FederatedObjects(obj.GetNamespace()).Update(ctx, fedObject, opts), ) } } @@ -68,16 +90,24 @@ func UpdateStatus( ctx context.Context, fedObjectClient fedcorev1a1client.FederatedObjectsGetter, clusterFedObjectClient fedcorev1a1client.ClusterFederatedObjectsGetter, - obj *fedcorev1a1.GenericFederatedObject, + obj fedcorev1a1.GenericFederatedObject, opts metav1.UpdateOptions, -) (*fedcorev1a1.GenericFederatedObject, error) { +) (fedcorev1a1.GenericFederatedObject, error) { if obj.GetNamespace() == "" { - return convertToGeneric( - clusterFedObjectClient.ClusterFederatedObjects().UpdateStatus(ctx, (*fedcorev1a1.ClusterFederatedObject)(obj), opts), + clusterFedObject, ok := obj.(*fedcorev1a1.ClusterFederatedObject) + if !ok { + return nil, fmt.Errorf("expected ClusterFederatedObject but got %T", obj) + } + return ensureNilInterface( + clusterFedObjectClient.ClusterFederatedObjects().UpdateStatus(ctx, clusterFedObject, opts), ) } else { - return convertToGeneric( - fedObjectClient.FederatedObjects(obj.GetNamespace()).UpdateStatus(ctx, (*fedcorev1a1.FederatedObject)(obj), opts), + fedObject, ok := obj.(*fedcorev1a1.FederatedObject) + if !ok { + return nil, fmt.Errorf("expected FederatedObject but got %T", obj) + } + return ensureNilInterface( + fedObjectClient.FederatedObjects(obj.GetNamespace()).UpdateStatus(ctx, fedObject, opts), ) } } diff --git a/pkg/util/pendingcontrollers/pendingcontrollers.go b/pkg/util/pendingcontrollers/pendingcontrollers.go index 98df5d84..def421b1 100644 --- a/pkg/util/pendingcontrollers/pendingcontrollers.go +++ b/pkg/util/pendingcontrollers/pendingcontrollers.go @@ -31,7 +31,7 @@ const ( PendingControllersAnnotation = common.DefaultPrefix + "pending-controllers" ) -func GetPendingControllers(fedObject *fedcorev1a1.GenericFederatedObject) (PendingControllers, error) { +func GetPendingControllers(fedObject fedcorev1a1.GenericFederatedObject) (PendingControllers, error) { value, exists := fedObject.GetAnnotations()[PendingControllersAnnotation] if !exists { return nil, fmt.Errorf("annotation %v does not exist", PendingControllersAnnotation) @@ -63,7 +63,7 @@ func NormalizeControllers(controllers PendingControllers) PendingControllers { } func SetPendingControllers( - fedObject *fedcorev1a1.GenericFederatedObject, + fedObject fedcorev1a1.GenericFederatedObject, controllers PendingControllers, ) (updated bool, err error) { controllers = NormalizeControllers(controllers) @@ -90,7 +90,7 @@ func getDownstreamControllers(allControllers PendingControllers, current string) } func UpdatePendingControllers( - fedObject *fedcorev1a1.GenericFederatedObject, + fedObject fedcorev1a1.GenericFederatedObject, toRemove string, shouldSetDownstream bool, allControllers PendingControllers, @@ -124,7 +124,7 @@ func UpdatePendingControllers( return SetPendingControllers(fedObject, newPendingControllers) } -func ControllerDependenciesFulfilled(fedObject *fedcorev1a1.GenericFederatedObject, controllerName string) (bool, error) { +func ControllerDependenciesFulfilled(fedObject fedcorev1a1.GenericFederatedObject, controllerName string) (bool, error) { pendingControllers, err := GetPendingControllers(fedObject) if err != nil { return false, err From 7e10425a75a258e214ea67a3dff7e029e7963b8b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Gary=20Liu=20=28=E5=88=98=E5=B9=BF=E6=BA=90=29?= Date: Tue, 18 Jul 2023 03:18:53 +0000 Subject: [PATCH 045/173] feat: deepcopy genericfederatedobject --- pkg/apis/core/v1alpha1/extensions_federatedobject.go | 8 ++++++++ pkg/apis/core/v1alpha1/interface.go | 1 + 2 files changed, 9 insertions(+) diff --git a/pkg/apis/core/v1alpha1/extensions_federatedobject.go b/pkg/apis/core/v1alpha1/extensions_federatedobject.go index a7c3646f..92d4df7a 100644 --- a/pkg/apis/core/v1alpha1/extensions_federatedobject.go +++ b/pkg/apis/core/v1alpha1/extensions_federatedobject.go @@ -37,6 +37,10 @@ func (o *FederatedObject) GetStatus() *GenericFederatedObjectStatus { return &o.Status } +func (o *FederatedObject) DeepCopyGenericFederatedObject() GenericFederatedObject { + return o.DeepCopy() +} + var _ GenericFederatedObject = &FederatedObject{} func (o *ClusterFederatedObject) GetSpec() *GenericFederatedObjectSpec { @@ -47,6 +51,10 @@ func (o *ClusterFederatedObject) GetStatus() *GenericFederatedObjectStatus { return &o.Status } +func (o *ClusterFederatedObject) DeepCopyGenericFederatedObject() GenericFederatedObject { + return o.DeepCopy() +} + var _ GenericFederatedObject = &ClusterFederatedObject{} // Placement extensions diff --git a/pkg/apis/core/v1alpha1/interface.go b/pkg/apis/core/v1alpha1/interface.go index 9de16456..fdcef995 100644 --- a/pkg/apis/core/v1alpha1/interface.go +++ b/pkg/apis/core/v1alpha1/interface.go @@ -47,4 +47,5 @@ type GenericFederatedObject interface { pkgruntime.Object GetSpec() *GenericFederatedObjectSpec GetStatus() *GenericFederatedObjectStatus + DeepCopyGenericFederatedObject() GenericFederatedObject } From 18cb40b5155789e2b980b10953ac3715f22d0a9f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Gary=20Liu=20=28=E5=88=98=E5=B9=BF=E6=BA=90=29?= Date: Tue, 18 Jul 2023 03:48:05 +0000 Subject: [PATCH 046/173] fix: fedobjectadapters should accept single client --- pkg/util/fedobjectadapters/adapters.go | 28 +++++++++++--------------- 1 file changed, 12 insertions(+), 16 deletions(-) diff --git a/pkg/util/fedobjectadapters/adapters.go b/pkg/util/fedobjectadapters/adapters.go index 5691428e..7aa1878e 100644 --- a/pkg/util/fedobjectadapters/adapters.go +++ b/pkg/util/fedobjectadapters/adapters.go @@ -36,8 +36,7 @@ func GetFromLister( func Create( ctx context.Context, - fedObjectClient fedcorev1a1client.FederatedObjectsGetter, - clusterFedObjectClient fedcorev1a1client.ClusterFederatedObjectsGetter, + fedv1a1Client fedcorev1a1client.CoreV1alpha1Client, obj fedcorev1a1.GenericFederatedObject, opts metav1.CreateOptions, ) (fedcorev1a1.GenericFederatedObject, error) { @@ -47,7 +46,7 @@ func Create( return nil, fmt.Errorf("expected ClusterFederatedObject but got %T", obj) } return ensureNilInterface( - clusterFedObjectClient.ClusterFederatedObjects().Create(ctx, clusterFedObject, opts), + fedv1a1Client.ClusterFederatedObjects().Create(ctx, clusterFedObject, opts), ) } else { fedObject, ok := obj.(*fedcorev1a1.FederatedObject) @@ -55,15 +54,14 @@ func Create( return nil, fmt.Errorf("expected FederatedObject but got %T", obj) } return ensureNilInterface( - fedObjectClient.FederatedObjects(obj.GetNamespace()).Create(ctx, fedObject, opts), + fedv1a1Client.FederatedObjects(obj.GetNamespace()).Create(ctx, fedObject, opts), ) } } func Update( ctx context.Context, - fedObjectClient fedcorev1a1client.FederatedObjectsGetter, - clusterFedObjectClient fedcorev1a1client.ClusterFederatedObjectsGetter, + fedv1a1Client fedcorev1a1client.CoreV1alpha1Client, obj fedcorev1a1.GenericFederatedObject, opts metav1.UpdateOptions, ) (fedcorev1a1.GenericFederatedObject, error) { @@ -73,7 +71,7 @@ func Update( return nil, fmt.Errorf("expected ClusterFederatedObject but got %T", obj) } return ensureNilInterface( - clusterFedObjectClient.ClusterFederatedObjects().Update(ctx, clusterFedObject, opts), + fedv1a1Client.ClusterFederatedObjects().Update(ctx, clusterFedObject, opts), ) } else { fedObject, ok := obj.(*fedcorev1a1.FederatedObject) @@ -81,15 +79,14 @@ func Update( return nil, fmt.Errorf("expected FederatedObject but got %T", obj) } return ensureNilInterface( - fedObjectClient.FederatedObjects(obj.GetNamespace()).Update(ctx, fedObject, opts), + fedv1a1Client.FederatedObjects(obj.GetNamespace()).Update(ctx, fedObject, opts), ) } } func UpdateStatus( ctx context.Context, - fedObjectClient fedcorev1a1client.FederatedObjectsGetter, - clusterFedObjectClient fedcorev1a1client.ClusterFederatedObjectsGetter, + fedv1a1Client fedcorev1a1client.CoreV1alpha1Client, obj fedcorev1a1.GenericFederatedObject, opts metav1.UpdateOptions, ) (fedcorev1a1.GenericFederatedObject, error) { @@ -99,7 +96,7 @@ func UpdateStatus( return nil, fmt.Errorf("expected ClusterFederatedObject but got %T", obj) } return ensureNilInterface( - clusterFedObjectClient.ClusterFederatedObjects().UpdateStatus(ctx, clusterFedObject, opts), + fedv1a1Client.ClusterFederatedObjects().UpdateStatus(ctx, clusterFedObject, opts), ) } else { fedObject, ok := obj.(*fedcorev1a1.FederatedObject) @@ -107,21 +104,20 @@ func UpdateStatus( return nil, fmt.Errorf("expected FederatedObject but got %T", obj) } return ensureNilInterface( - fedObjectClient.FederatedObjects(obj.GetNamespace()).UpdateStatus(ctx, fedObject, opts), + fedv1a1Client.FederatedObjects(obj.GetNamespace()).UpdateStatus(ctx, fedObject, opts), ) } } func Delete( ctx context.Context, - fedObjectClient fedcorev1a1client.FederatedObjectsGetter, - clusterFedObjectClient fedcorev1a1client.ClusterFederatedObjectsGetter, + fedv1a1Client fedcorev1a1client.CoreV1alpha1Client, namespace, name string, opts metav1.DeleteOptions, ) error { if namespace == "" { - return clusterFedObjectClient.ClusterFederatedObjects().Delete(ctx, name, opts) + return fedv1a1Client.ClusterFederatedObjects().Delete(ctx, name, opts) } else { - return fedObjectClient.FederatedObjects(namespace).Delete(ctx, name, opts) + return fedv1a1Client.FederatedObjects(namespace).Delete(ctx, name, opts) } } From 44b32e061c07647e3cda1b95c0ea26129d77dddd Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Gary=20Liu=20=28=E5=88=98=E5=B9=BF=E6=BA=90=29?= Date: Tue, 18 Jul 2023 03:58:54 +0000 Subject: [PATCH 047/173] fix: fedobjectadapters should accept interfaces --- pkg/util/fedobjectadapters/adapters.go | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/pkg/util/fedobjectadapters/adapters.go b/pkg/util/fedobjectadapters/adapters.go index 7aa1878e..7b4b02ad 100644 --- a/pkg/util/fedobjectadapters/adapters.go +++ b/pkg/util/fedobjectadapters/adapters.go @@ -36,7 +36,7 @@ func GetFromLister( func Create( ctx context.Context, - fedv1a1Client fedcorev1a1client.CoreV1alpha1Client, + fedv1a1Client fedcorev1a1client.CoreV1alpha1Interface, obj fedcorev1a1.GenericFederatedObject, opts metav1.CreateOptions, ) (fedcorev1a1.GenericFederatedObject, error) { @@ -61,7 +61,7 @@ func Create( func Update( ctx context.Context, - fedv1a1Client fedcorev1a1client.CoreV1alpha1Client, + fedv1a1Client fedcorev1a1client.CoreV1alpha1Interface, obj fedcorev1a1.GenericFederatedObject, opts metav1.UpdateOptions, ) (fedcorev1a1.GenericFederatedObject, error) { @@ -86,7 +86,7 @@ func Update( func UpdateStatus( ctx context.Context, - fedv1a1Client fedcorev1a1client.CoreV1alpha1Client, + fedv1a1Client fedcorev1a1client.CoreV1alpha1Interface, obj fedcorev1a1.GenericFederatedObject, opts metav1.UpdateOptions, ) (fedcorev1a1.GenericFederatedObject, error) { @@ -111,7 +111,7 @@ func UpdateStatus( func Delete( ctx context.Context, - fedv1a1Client fedcorev1a1client.CoreV1alpha1Client, + fedv1a1Client fedcorev1a1client.CoreV1alpha1Interface, namespace, name string, opts metav1.DeleteOptions, ) error { From df5e18dd29bf3fc5b03ded2e97ecfabc2287f898 Mon Sep 17 00:00:00 2001 From: "hawjia.lim" Date: Tue, 18 Jul 2023 14:52:33 +0800 Subject: [PATCH 048/173] refactor(informer-manager): wait for cache sync before adding eventhandlers --- pkg/util/informermanager/informermanager.go | 23 ++++++++++++++++++--- 1 file changed, 20 insertions(+), 3 deletions(-) diff --git a/pkg/util/informermanager/informermanager.go b/pkg/util/informermanager/informermanager.go index b7ca7bb5..fab5e283 100644 --- a/pkg/util/informermanager/informermanager.go +++ b/pkg/util/informermanager/informermanager.go @@ -20,6 +20,7 @@ import ( "context" "fmt" "sync" + "time" apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -59,7 +60,8 @@ type informerManager struct { eventHandlerRegistrations map[string]map[*EventHandlerGenerator]cache.ResourceEventHandlerRegistration lastAppliedFTCsCache map[string]map[*EventHandlerGenerator]*fedcorev1a1.FederatedTypeConfig - queue workqueue.RateLimitingInterface + queue workqueue.RateLimitingInterface + workerCount int64 } func NewInformerManager( @@ -81,6 +83,7 @@ func NewInformerManager( eventHandlerRegistrations: map[string]map[*EventHandlerGenerator]cache.ResourceEventHandlerRegistration{}, lastAppliedFTCsCache: map[string]map[*EventHandlerGenerator]*fedcorev1a1.FederatedTypeConfig{}, queue: workqueue.NewRateLimitingQueue(workqueue.DefaultItemBasedRateLimiter()), + workerCount: 3, } ftcInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{ @@ -157,7 +160,6 @@ func (m *informerManager) processFTC( ftc *fedcorev1a1.FederatedTypeConfig, ) (err error, needReenqueue bool) { m.lock.Lock() - defer m.lock.Unlock() ftc = ftc.DeepCopy() ftcName := ftc.Name @@ -206,6 +208,18 @@ func (m *informerManager) processFTC( m.lastAppliedFTCsCache[ftcName] = map[*EventHandlerGenerator]*fedcorev1a1.FederatedTypeConfig{} } + m.lock.Unlock() + + // Wait for cache sync before registering the event handlers + cacheSyncTimeout, cancel := context.WithTimeout(ctx, time.Second) + defer cancel() + if !cache.WaitForCacheSync(cacheSyncTimeout.Done(), informer.Informer().HasSynced) { + return fmt.Errorf("timed out waiting for informer to sync, event handlers not yet registered"), true + } + + m.lock.Lock() + defer m.lock.Unlock() + registrations := m.eventHandlerRegistrations[ftcName] lastAppliedFTCs := m.lastAppliedFTCsCache[ftcName] @@ -341,7 +355,10 @@ func (m *informerManager) Start(ctx context.Context) { return } - go wait.UntilWithContext(ctx, m.worker, 0) + for i := 0; i < int(m.workerCount); i++ { + go wait.UntilWithContext(ctx, m.worker, 0) + } + go func() { <-ctx.Done() From 57af0f9540894e33dd88a8bf07f0878beab83695 Mon Sep 17 00:00:00 2001 From: "hawjia.lim" Date: Tue, 18 Jul 2023 15:15:38 +0800 Subject: [PATCH 049/173] avoid busy waiting when processing ftc --- pkg/util/informermanager/informermanager.go | 48 ++++++++++----------- 1 file changed, 23 insertions(+), 25 deletions(-) diff --git a/pkg/util/informermanager/informermanager.go b/pkg/util/informermanager/informermanager.go index fab5e283..a534d07e 100644 --- a/pkg/util/informermanager/informermanager.go +++ b/pkg/util/informermanager/informermanager.go @@ -60,8 +60,7 @@ type informerManager struct { eventHandlerRegistrations map[string]map[*EventHandlerGenerator]cache.ResourceEventHandlerRegistration lastAppliedFTCsCache map[string]map[*EventHandlerGenerator]*fedcorev1a1.FederatedTypeConfig - queue workqueue.RateLimitingInterface - workerCount int64 + queue workqueue.RateLimitingInterface } func NewInformerManager( @@ -83,7 +82,6 @@ func NewInformerManager( eventHandlerRegistrations: map[string]map[*EventHandlerGenerator]cache.ResourceEventHandlerRegistration{}, lastAppliedFTCsCache: map[string]map[*EventHandlerGenerator]*fedcorev1a1.FederatedTypeConfig{}, queue: workqueue.NewRateLimitingQueue(workqueue.DefaultItemBasedRateLimiter()), - workerCount: 3, } ftcInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{ @@ -137,11 +135,15 @@ func (m *informerManager) worker(ctx context.Context) { return } - err, needReenqueue := m.processFTC(ctx, ftc) + err, needReenqueue, delay := m.processFTC(ctx, ftc) if err != nil { if needReenqueue { logger.Error(err, "Failed to process FederatedTypeConfig, will retry") - m.queue.AddRateLimited(key) + if delay > 0 { + m.queue.AddAfter(key, delay) + } else { + m.queue.AddRateLimited(key) + } } else { logger.Error(err, "Failed to process FederatedTypeConfig") m.queue.Forget(key) @@ -151,15 +153,20 @@ func (m *informerManager) worker(ctx context.Context) { m.queue.Forget(key) if needReenqueue { - m.queue.Add(key) + if delay > 0 { + m.queue.AddAfter(key, delay) + } else { + m.queue.AddRateLimited(key) + } } } func (m *informerManager) processFTC( ctx context.Context, ftc *fedcorev1a1.FederatedTypeConfig, -) (err error, needReenqueue bool) { +) (err error, needReenqueue bool, delay time.Duration) { m.lock.Lock() + defer m.lock.Unlock() ftc = ftc.DeepCopy() ftcName := ftc.Name @@ -178,14 +185,14 @@ func (m *informerManager) processFTC( // time and we missed processing the deletion. We simply process the ftc deletion and reenqueue. Note: // updating of ftc source types, however, is still not a supported use case. err := m.processFTCDeletionUnlocked(ctx, ftcName) - return err, true + return err, true, 0 } informer = m.informers[ftcName] } else { if err := m.gvkMapping.Add(ftcName, gvk); err != nil { // There must be another ftc with the same source type GVK. - return fmt.Errorf("source type is already referenced by another FederatedTypeConfig: %w", err), false + return fmt.Errorf("source type is already referenced by another FederatedTypeConfig: %w", err), false, 0 } logger.V(2).Info("Starting new informer for FederatedTypeConfig") @@ -208,18 +215,11 @@ func (m *informerManager) processFTC( m.lastAppliedFTCsCache[ftcName] = map[*EventHandlerGenerator]*fedcorev1a1.FederatedTypeConfig{} } - m.lock.Unlock() - - // Wait for cache sync before registering the event handlers - cacheSyncTimeout, cancel := context.WithTimeout(ctx, time.Second) - defer cancel() - if !cache.WaitForCacheSync(cacheSyncTimeout.Done(), informer.Informer().HasSynced) { - return fmt.Errorf("timed out waiting for informer to sync, event handlers not yet registered"), true + if !informer.Informer().HasSynced() { + logger.V(3).Info("Informer for FederatedTypeConfig not synced, will not register event handlers yet") + return nil, true, 100 * time.Millisecond } - m.lock.Lock() - defer m.lock.Unlock() - registrations := m.eventHandlerRegistrations[ftcName] lastAppliedFTCs := m.lastAppliedFTCsCache[ftcName] @@ -232,7 +232,7 @@ func (m *informerManager) processFTC( if oldRegistration := registrations[generator]; oldRegistration != nil { if err := informer.Informer().RemoveEventHandler(oldRegistration); err != nil { - return fmt.Errorf("failed to unregister event handler: %w", err), true + return fmt.Errorf("failed to unregister event handler: %w", err), true, 0 } delete(registrations, generator) } @@ -241,7 +241,7 @@ func (m *informerManager) processFTC( if handler := generator.Generator(ftc); handler != nil { newRegistration, err := informer.Informer().AddEventHandler(handler) if err != nil { - return fmt.Errorf("failed to register event handler: %w", err), true + return fmt.Errorf("failed to register event handler: %w", err), true, 0 } registrations[generator] = newRegistration } @@ -249,7 +249,7 @@ func (m *informerManager) processFTC( lastAppliedFTCs[generator] = ftc } - return nil, false + return nil, false, 0 } func (m *informerManager) processFTCDeletion(ctx context.Context, ftcName string) error { @@ -355,9 +355,7 @@ func (m *informerManager) Start(ctx context.Context) { return } - for i := 0; i < int(m.workerCount); i++ { - go wait.UntilWithContext(ctx, m.worker, 0) - } + go wait.UntilWithContext(ctx, m.worker, 0) go func() { <-ctx.Done() From 14578c1f1e693fed74609a315774887f6cade64c Mon Sep 17 00:00:00 2001 From: "hawjia.lim" Date: Tue, 18 Jul 2023 15:30:02 +0800 Subject: [PATCH 050/173] fix reenqueue logic --- pkg/util/informermanager/informermanager.go | 14 +++----------- 1 file changed, 3 insertions(+), 11 deletions(-) diff --git a/pkg/util/informermanager/informermanager.go b/pkg/util/informermanager/informermanager.go index a534d07e..de29fc7c 100644 --- a/pkg/util/informermanager/informermanager.go +++ b/pkg/util/informermanager/informermanager.go @@ -139,11 +139,7 @@ func (m *informerManager) worker(ctx context.Context) { if err != nil { if needReenqueue { logger.Error(err, "Failed to process FederatedTypeConfig, will retry") - if delay > 0 { - m.queue.AddAfter(key, delay) - } else { - m.queue.AddRateLimited(key) - } + m.queue.AddRateLimited(key) } else { logger.Error(err, "Failed to process FederatedTypeConfig") m.queue.Forget(key) @@ -153,18 +149,14 @@ func (m *informerManager) worker(ctx context.Context) { m.queue.Forget(key) if needReenqueue { - if delay > 0 { - m.queue.AddAfter(key, delay) - } else { - m.queue.AddRateLimited(key) - } + m.queue.AddAfter(key, delay) } } func (m *informerManager) processFTC( ctx context.Context, ftc *fedcorev1a1.FederatedTypeConfig, -) (err error, needReenqueue bool, delay time.Duration) { +) (err error, needReenqueue bool, reenqueueDelay time.Duration) { m.lock.Lock() defer m.lock.Unlock() From 69f76ab7d923cf924c562b2748fbcd56eadff7bc Mon Sep 17 00:00:00 2001 From: "hawjia.lim" Date: Tue, 18 Jul 2023 15:43:43 +0800 Subject: [PATCH 051/173] add log for event handler registration --- pkg/util/informermanager/informermanager.go | 2 ++ 1 file changed, 2 insertions(+) diff --git a/pkg/util/informermanager/informermanager.go b/pkg/util/informermanager/informermanager.go index de29fc7c..f5afccc5 100644 --- a/pkg/util/informermanager/informermanager.go +++ b/pkg/util/informermanager/informermanager.go @@ -215,6 +215,8 @@ func (m *informerManager) processFTC( registrations := m.eventHandlerRegistrations[ftcName] lastAppliedFTCs := m.lastAppliedFTCsCache[ftcName] + logger.V(2).Info("Registering event handlers for FederatedTypeConfig") + for _, generator := range m.eventHandlerGenerators { lastApplied := lastAppliedFTCs[generator] if !generator.Predicate(lastApplied, ftc) { From 4f0558107b5d3390adcbcaa473debe6277f7978a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Gary=20Liu=20=28=E5=88=98=E5=B9=BF=E6=BA=90=29?= Date: Wed, 19 Jul 2023 10:21:13 +0000 Subject: [PATCH 052/173] feat(informermanager): allow adding multiple cluster event handlers --- pkg/util/informermanager/federatedinformermanager.go | 4 ++-- pkg/util/informermanager/federatedinformermanager_test.go | 2 +- pkg/util/informermanager/interface.go | 4 ++-- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/pkg/util/informermanager/federatedinformermanager.go b/pkg/util/informermanager/federatedinformermanager.go index 1fc7e442..f5100add 100644 --- a/pkg/util/informermanager/federatedinformermanager.go +++ b/pkg/util/informermanager/federatedinformermanager.go @@ -242,7 +242,7 @@ func (m *federatedInformerManager) processClusterDeletionUnlocked(ctx context.Co return nil } -func (m *federatedInformerManager) AddClusterEventHandler(handler *ClusterEventHandler) error { +func (m *federatedInformerManager) AddClusterEventHandlers(handlers ...*ClusterEventHandler) error { m.lock.Lock() defer m.lock.Unlock() @@ -250,7 +250,7 @@ func (m *federatedInformerManager) AddClusterEventHandler(handler *ClusterEventH return fmt.Errorf("failed to add ClusterEventHandler: FederatedInformerManager is already started") } - m.clusterEventHandlers = append(m.clusterEventHandlers, handler) + m.clusterEventHandlers = append(m.clusterEventHandlers, handlers...) return nil } diff --git a/pkg/util/informermanager/federatedinformermanager_test.go b/pkg/util/informermanager/federatedinformermanager_test.go index 0d651a62..dc13a1a5 100644 --- a/pkg/util/informermanager/federatedinformermanager_test.go +++ b/pkg/util/informermanager/federatedinformermanager_test.go @@ -1465,7 +1465,7 @@ func bootstrapFederatedInformerManagerWithFakeClients( g.Expect(err).ToNot(gomega.HaveOccurred()) } for _, handler := range clusterEventHandlers { - err := informerManager.AddClusterEventHandler(handler) + err := informerManager.AddClusterEventHandlers(handler) g.Expect(err).ToNot(gomega.HaveOccurred()) } diff --git a/pkg/util/informermanager/interface.go b/pkg/util/informermanager/interface.go index 88bc1c15..42ef46f6 100644 --- a/pkg/util/informermanager/interface.go +++ b/pkg/util/informermanager/interface.go @@ -108,9 +108,9 @@ type FederatedInformerManager interface { // Returns true if the FederatedInformerManager's view of FederatedTypeConfigs and FederatedClusters is synced. HasSynced() bool - // Adds a ClusterEventHandler that can be used by controllers to hook into the cluster events received by the + // Adds ClusterEventHandlers that can be used by controllers to hook into the cluster events received by the // FederatedInformerManager. - AddClusterEventHandler(handler *ClusterEventHandler) error + AddClusterEventHandlers(handlers ...*ClusterEventHandler) error // Starts processing FederatedTypeConfig and FederatedCluster events. Start(ctx context.Context) From b415e9a04f090edc72ab6454352608e97cd321a1 Mon Sep 17 00:00:00 2001 From: "hawjia.lim" Date: Mon, 17 Jul 2023 15:43:04 +0800 Subject: [PATCH 053/173] progress --- pkg/controllers/federate/controller.go | 379 +++++++++--------- pkg/controllers/federate/util.go | 79 ++-- pkg/util/eventhandlers/eventhandler.go | 35 ++ pkg/util/logging/logging.go | 5 + pkg/util/meta/federatedobject.go | 38 ++ .../util => util/naming}/naming.go | 2 +- .../util => util/naming}/naming_test.go | 18 +- pkg/{controllers => }/util/worker/result.go | 0 pkg/{controllers => }/util/worker/worker.go | 0 9 files changed, 336 insertions(+), 220 deletions(-) create mode 100644 pkg/util/eventhandlers/eventhandler.go create mode 100644 pkg/util/meta/federatedobject.go rename pkg/{controllers/util => util/naming}/naming.go (99%) rename pkg/{controllers/util => util/naming}/naming_test.go (81%) rename pkg/{controllers => }/util/worker/result.go (100%) rename pkg/{controllers => }/util/worker/worker.go (100%) diff --git a/pkg/controllers/federate/controller.go b/pkg/controllers/federate/controller.go index f8aa2c09..854dfdec 100644 --- a/pkg/controllers/federate/controller.go +++ b/pkg/controllers/federate/controller.go @@ -19,36 +19,40 @@ package federate import ( "context" "fmt" - "strconv" "time" appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" - pkgruntime "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/client-go/dynamic" dynamicclient "k8s.io/client-go/dynamic" - "k8s.io/client-go/informers" kubeclient "k8s.io/client-go/kubernetes" "k8s.io/client-go/tools/cache" "k8s.io/client-go/tools/record" "k8s.io/klog/v2" fedcorev1a1 "github.com/kubewharf/kubeadmiral/pkg/apis/core/v1alpha1" + fedclient "github.com/kubewharf/kubeadmiral/pkg/client/clientset/versioned" + fedcorev1a1informers "github.com/kubewharf/kubeadmiral/pkg/client/informers/externalversions/core/v1alpha1" "github.com/kubewharf/kubeadmiral/pkg/controllers/common" - "github.com/kubewharf/kubeadmiral/pkg/controllers/util" "github.com/kubewharf/kubeadmiral/pkg/controllers/util/annotation" - "github.com/kubewharf/kubeadmiral/pkg/controllers/util/delayingdeliver" "github.com/kubewharf/kubeadmiral/pkg/controllers/util/eventsink" finalizersutil "github.com/kubewharf/kubeadmiral/pkg/controllers/util/finalizers" "github.com/kubewharf/kubeadmiral/pkg/controllers/util/pendingcontrollers" - schemautil "github.com/kubewharf/kubeadmiral/pkg/controllers/util/schema" "github.com/kubewharf/kubeadmiral/pkg/controllers/util/sourcefeedback" - "github.com/kubewharf/kubeadmiral/pkg/controllers/util/worker" "github.com/kubewharf/kubeadmiral/pkg/stats" + "github.com/kubewharf/kubeadmiral/pkg/util/eventhandlers" + "github.com/kubewharf/kubeadmiral/pkg/util/fedobjectadapters" + "github.com/kubewharf/kubeadmiral/pkg/util/informermanager" + "github.com/kubewharf/kubeadmiral/pkg/util/logging" + "github.com/kubewharf/kubeadmiral/pkg/util/meta" + "github.com/kubewharf/kubeadmiral/pkg/util/naming" + "github.com/kubewharf/kubeadmiral/pkg/util/worker" ) const ( @@ -69,19 +73,14 @@ const ( // FederateController federates objects of source type to objects of federated type type FederateController struct { - typeConfig *fedcorev1a1.FederatedTypeConfig - name string - fedSystemNamespace string + informerManager informermanager.InformerManager + fedObjectInformer fedcorev1a1informers.FederatedObjectInformer + clusterFedObjectInformer fedcorev1a1informers.ClusterFederatedObjectInformer - federatedObjectClient dynamicclient.NamespaceableResourceInterface - federatedObjectLister cache.GenericLister - federatedObjectSynced cache.InformerSynced + fedClient fedclient.Interface + dynamicClient dynamic.Interface - sourceObjectClient dynamicclient.NamespaceableResourceInterface - sourceObjectLister cache.GenericLister - sourceObjectSynced cache.InformerSynced - - worker worker.ReconcileWorker + worker worker.ReconcileWorker[workerKey] eventRecorder record.EventRecorder metrics stats.Metrics @@ -93,117 +92,148 @@ func (c *FederateController) IsControllerReady() bool { } func NewFederateController( - typeConfig *fedcorev1a1.FederatedTypeConfig, kubeClient kubeclient.Interface, dynamicClient dynamicclient.Interface, - federatedObjectInformer informers.GenericInformer, - sourceObjectInformer informers.GenericInformer, + fedClient fedclient.Interface, + fedObjectInformer fedcorev1a1informers.FederatedObjectInformer, + clusterFedObjecInformer fedcorev1a1informers.ClusterFederatedObjectInformer, + clusterFedObjectInformer fedcorev1a1informers.ClusterFederatedObjectInformer, + informerManager informermanager.InformerManager, metrics stats.Metrics, workerCount int, fedSystemNamespace string, ) (*FederateController, error) { - controllerName := fmt.Sprintf("%s-federate-controller", typeConfig.GetFederatedType().Name) - logger := klog.LoggerWithValues(klog.Background(), "controller", FederateControllerName, "ftc", typeConfig.Name) - c := &FederateController{ - typeConfig: typeConfig, - name: controllerName, - fedSystemNamespace: fedSystemNamespace, - metrics: metrics, - logger: logger, - } - - c.worker = worker.NewReconcileWorker( + informerManager: informerManager, + fedObjectInformer: fedObjectInformer, + clusterFedObjectInformer: clusterFedObjectInformer, + fedClient: fedClient, + dynamicClient: dynamicClient, + metrics: metrics, + logger: klog.Background().WithValues("controller", FederateControllerName), + } + + c.eventRecorder = eventsink.NewDefederatingRecorderMux(kubeClient, FederateControllerName, 6) + c.worker = worker.NewReconcileWorker[workerKey]( + FederateControllerName, + nil, c.reconcile, worker.RateLimiterOptions{}, workerCount, metrics, - delayingdeliver.NewMetricTags("federate-controller-worker", c.typeConfig.GetFederatedType().Kind), ) - c.eventRecorder = eventsink.NewDefederatingRecorderMux(kubeClient, c.name, 6) - - federatedAPIResource := typeConfig.GetFederatedType() - c.federatedObjectClient = dynamicClient.Resource(schemautil.APIResourceToGVR(&federatedAPIResource)) - - sourceAPIResource := typeConfig.GetSourceType() - c.sourceObjectClient = dynamicClient.Resource(schemautil.APIResourceToGVR(sourceAPIResource)) - - c.sourceObjectLister = sourceObjectInformer.Lister() - c.sourceObjectSynced = sourceObjectInformer.Informer().HasSynced - sourceObjectInformer.Informer().AddEventHandler(cache.FilteringResourceEventHandler{ - FilterFunc: func(obj interface{}) bool { - if tombstone, ok := obj.(cache.DeletedFinalStateUnknown); ok { - obj = tombstone.Obj - if obj == nil { - return false - } - } - metaObj, err := meta.Accessor(obj) + if err := informerManager.AddEventHandlerGenerator(&informermanager.EventHandlerGenerator{ + Predicate: informermanager.RegisterOncePredicate, + Generator: func(ftc *fedcorev1a1.FederatedTypeConfig) cache.ResourceEventHandler { + return eventhandlers.NewTriggerOnAllChanges(func(obj runtime.Object) { + uns := obj.(*unstructured.Unstructured) + c.worker.Enqueue(workerKey{ + name: uns.GetName(), + namespace: uns.GetNamespace(), + ftc: ftc, + }) + }) + }, + }); err != nil { + return nil, err + } + + if _, err := fedObjectInformer.Informer().AddEventHandler( + eventhandlers.NewTriggerOnAllChanges(func(o runtime.Object) { + fedObj := o.(*fedcorev1a1.FederatedObject) + logger := c.logger.WithValues("federated-object", common.NewQualifiedName(fedObj)) + + srcMeta, err := meta.GetSourceObjectMeta(&fedObj.Spec) if err != nil { - c.logger.Error(err, fmt.Sprintf("Received source object with invalid type %T", obj)) - return false + logger.Error(err, "Failed to get source object's metadata from FederatedObject") + return } - return metaObj.GetNamespace() != c.fedSystemNamespace - }, - Handler: util.NewTriggerOnAllChanges(c.worker.EnqueueObject), - }) - - c.federatedObjectLister = federatedObjectInformer.Lister() - c.federatedObjectSynced = federatedObjectInformer.Informer().HasSynced - federatedObjectInformer.Informer().AddEventHandler(cache.FilteringResourceEventHandler{ - FilterFunc: func(obj interface{}) bool { - if tombstone, ok := obj.(cache.DeletedFinalStateUnknown); ok { - obj = tombstone.Obj - if obj == nil { - return false - } + + gvk := srcMeta.GroupVersionKind + logger = logger.WithValues("gvk", gvk) + + ftc, exists := c.informerManager.GetResourceFTC(srcMeta.GroupVersionKind()) + if !exists { + logger.Error(nil, "Received event for FederatedObject without FederatedTypeConfig") } - metaObj, err := meta.Accessor(obj) + c.worker.Enqueue(workerKey{ + name: srcMeta.GetName(), + namespace: srcMeta.GetNamespace(), + ftc: ftc, + }) + }), + ); err != nil { + return nil, err + } + + if _, err := clusterFedObjecInformer.Informer().AddEventHandler( + eventhandlers.NewTriggerOnAllChanges(func(o runtime.Object) { + fedObj := o.(*fedcorev1a1.ClusterFederatedObject) + logger := c.logger.WithValues("cluster-federated-object", common.NewQualifiedName(fedObj)) + + srcMeta, err := meta.GetSourceObjectMeta(&fedObj.Spec) if err != nil { - c.logger.Error(err, fmt.Sprintf("Received federated object with invalid type %T", obj)) - return false + logger.Error(err, "Failed to get source object's metadata from ClusterFederatedObject") + return + } + + gvk := srcMeta.GroupVersionKind + logger = logger.WithValues("gvk", gvk) + + ftc, exists := c.informerManager.GetResourceFTC(srcMeta.GroupVersionKind()) + if !exists { + logger.Error(nil, "Received event for ClusterFederatedObject without FederatedTypeConfig") } - return metaObj.GetNamespace() != c.fedSystemNamespace - }, - Handler: util.NewTriggerOnAllChanges(c.worker.EnqueueObject), - }) + + c.worker.Enqueue(workerKey{ + name: srcMeta.GetName(), + namespace: srcMeta.GetNamespace(), + ftc: ftc, + }) + }), + ); err != nil { + return nil, err + } return c, nil } func (c *FederateController) Run(ctx context.Context) { - c.logger.Info("Starting controller") - defer c.logger.Info("Stopping controller") + ctx, logger := logging.InjectLoggerValues(ctx, "controller", FederateControllerName) + + logger.Info("Starting controller") + defer logger.Info("Stopping controller") - if !cache.WaitForNamedCacheSync(c.name, ctx.Done(), c.HasSynced) { + if !cache.WaitForNamedCacheSync(FederateControllerName, ctx.Done(), c.HasSynced) { return } - c.worker.Run(ctx.Done()) + c.worker.Run(ctx) <-ctx.Done() } func (c *FederateController) HasSynced() bool { - return c.sourceObjectSynced() && c.federatedObjectSynced() + return c.informerManager.HasSynced() && c.fedObjectInformer.Informer().HasSynced() } -func (c *FederateController) reconcile(qualifiedName common.QualifiedName) (status worker.Result) { +func (c *FederateController) reconcile(ctx context.Context, key workerKey) (status worker.Result) { _ = c.metrics.Rate("federate.throughput", 1) - logger := c.logger.WithValues("object", qualifiedName.String()) - ctx := klog.NewContext(context.TODO(), logger) + ctx, logger := logging.InjectLogger(ctx, c.logger) + ctx, logger = logging.InjectLoggerValues(ctx, "source-object", key.String()) startTime := time.Now() logger.V(3).Info("Start reconcile") defer func() { - c.metrics.Duration(fmt.Sprintf("%s.latency", c.name), startTime) + c.metrics.Duration(fmt.Sprintf("%s.latency", FederateControllerName), startTime) logger.WithValues("duration", time.Since(startTime), "status", status.String()).V(3).Info("Finished reconcile") }() - sourceObject, err := c.sourceObjectFromStore(qualifiedName) + sourceGVR := key.ftc.GetSourceTypeGVR() + sourceObject, err := c.sourceObjectFromStore(key) if err != nil && apierrors.IsNotFound(err) { - logger.V(3).Info(fmt.Sprintf("No source object for %s found, skip federating", qualifiedName.String())) + logger.V(3).Info(fmt.Sprintf("No source object for found, skip federating")) return worker.StatusAllOK } if err != nil { @@ -212,7 +242,7 @@ func (c *FederateController) reconcile(qualifiedName common.QualifiedName) (stat } sourceObject = sourceObject.DeepCopy() - fedObject, err := c.federatedObjectFromStore(qualifiedName) + fedObject, err := c.federatedObjectFromStore(key) if err != nil && !apierrors.IsNotFound(err) { logger.Error(err, "Failed to get federated object from store") return worker.StatusError @@ -223,12 +253,9 @@ func (c *FederateController) reconcile(qualifiedName common.QualifiedName) (stat fedObject = fedObject.DeepCopy() } - federatedAPIResource := c.typeConfig.GetFederatedType() - federatedGVK := schemautil.APIResourceToGVK(&federatedAPIResource) - if sourceObject.GetDeletionTimestamp() != nil { logger.V(3).Info("Source object terminating") - if err := c.handleTerminatingSourceObject(ctx, sourceObject, fedObject); err != nil { + if err := c.handleTerminatingSourceObject(ctx, sourceGVR, sourceObject, fedObject); err != nil { logger.Error(err, "Failed to handle source object deletion") if apierrors.IsConflict(err) { return worker.StatusConflict @@ -245,7 +272,7 @@ func (c *FederateController) reconcile(qualifiedName common.QualifiedName) (stat } } - if sourceObject, err = c.ensureFinalizer(ctx, sourceObject); err != nil { + if sourceObject, err = c.ensureFinalizer(ctx, sourceGVR, sourceObject); err != nil { logger.Error(err, "Failed to ensure finalizer on source object") if apierrors.IsConflict(err) { return worker.StatusConflict @@ -261,9 +288,7 @@ func (c *FederateController) reconcile(qualifiedName common.QualifiedName) (stat sourceObject, corev1.EventTypeWarning, EventReasonCreateFederatedObject, - "Failed to create federated object: %s %s: %v", - federatedGVK.String(), - qualifiedName.String(), + "Failed to create federated object: %v", err, ) @@ -278,9 +303,7 @@ func (c *FederateController) reconcile(qualifiedName common.QualifiedName) (stat sourceObject, corev1.EventTypeNormal, EventReasonCreateFederatedObject, - "Federated object created: %s %s", - federatedGVK.String(), - qualifiedName.String(), + "Federated object created", ) return worker.StatusAllOK } @@ -296,9 +319,8 @@ func (c *FederateController) reconcile(qualifiedName common.QualifiedName) (stat sourceObject, corev1.EventTypeWarning, EventReasonUpdateFederatedObject, - "Failed to reconcile existing federated object: %s %s: %v", - federatedGVK, - qualifiedName.String(), + "Failed to reconcile existing federated object %s: %v", + fedObject.Name, err, ) return worker.StatusError @@ -308,9 +330,8 @@ func (c *FederateController) reconcile(qualifiedName common.QualifiedName) (stat sourceObject, corev1.EventTypeNormal, EventReasonUpdateFederatedObject, - "Federated object updated: %s %s", - federatedGVK, - qualifiedName.String(), + "Federated object updated: %s", + fedObject.Name, ) } else { logger.V(3).Info("No updates required to the federated object") @@ -327,34 +348,43 @@ func (c *FederateController) reconcile(qualifiedName common.QualifiedName) (stat return worker.StatusAllOK } -func (c *FederateController) federatedObjectFromStore(qualifiedName common.QualifiedName) (*unstructured.Unstructured, error) { - var obj pkgruntime.Object - var err error +func (c *FederateController) sourceObjectFromStore(key workerKey) (*unstructured.Unstructured, error) { + gvk := key.ftc.GetSourceTypeGVK() - if c.typeConfig.GetNamespaced() { - obj, err = c.federatedObjectLister.ByNamespace(qualifiedName.Namespace).Get(qualifiedName.Name) - } else { - obj, err = c.federatedObjectLister.Get(qualifiedName.Name) + lister, hasSynced, exists := c.informerManager.GetResourceLister(gvk) + if !exists { + return nil, fmt.Errorf("lister for %s does not exist", gvk) + } + if !hasSynced() { + return nil, fmt.Errorf("lister for %s not synced", gvk) } - return obj.(*unstructured.Unstructured), err -} - -func (c *FederateController) sourceObjectFromStore(qualifiedName common.QualifiedName) (*unstructured.Unstructured, error) { - var obj pkgruntime.Object + var obj runtime.Object var err error - if c.typeConfig.GetNamespaced() { - obj, err = c.sourceObjectLister.ByNamespace(qualifiedName.Namespace).Get(qualifiedName.Name) + if key.ftc.GetNamespaced() { + obj, err = lister.ByNamespace(key.namespace).Get(key.name) } else { - obj, err = c.sourceObjectLister.Get(qualifiedName.Name) + obj, err = lister.Get(key.name) } return obj.(*unstructured.Unstructured), err } +func (c *FederateController) federatedObjectFromStore(key workerKey) (*fedcorev1a1.GenericFederatedObject, error) { + fedName := naming.GenerateFederatedObjectName(key.name, key.ftc.Name) + + return fedobjectadapters.GetFromLister( + c.fedObjectInformer.Lister(), + c.clusterFedObjectInformer.Lister(), + key.namespace, + fedName, + ) +} + func (c *FederateController) ensureFinalizer( ctx context.Context, + sourceGVR schema.GroupVersionResource, sourceObj *unstructured.Unstructured, ) (*unstructured.Unstructured, error) { logger := klog.FromContext(ctx).WithValues("finalizer", FinalizerFederateController) @@ -368,7 +398,12 @@ func (c *FederateController) ensureFinalizer( } logger.V(1).Info("Adding finalizer to source object") - sourceObj, err = c.sourceObjectClient.Namespace(sourceObj.GetNamespace()).Update(ctx, sourceObj, metav1.UpdateOptions{}) + + sourceObj, err = c.dynamicClient.Resource(sourceGVR).Namespace(sourceObj.GetNamespace()).Update( + ctx, + sourceObj, + metav1.UpdateOptions{}, + ) if err != nil { return nil, fmt.Errorf("failed to update source object with finalizer: %w", err) } @@ -378,6 +413,7 @@ func (c *FederateController) ensureFinalizer( func (c *FederateController) removeFinalizer( ctx context.Context, + sourceGVR schema.GroupVersionResource, sourceObj *unstructured.Unstructured, ) (*unstructured.Unstructured, error) { logger := klog.FromContext(ctx).WithValues("finalizer", FinalizerFederateController) @@ -391,7 +427,12 @@ func (c *FederateController) removeFinalizer( } logger.V(1).Info("Removing finalizer from source object") - sourceObj, err = c.sourceObjectClient.Namespace(sourceObj.GetNamespace()).Update(ctx, sourceObj, metav1.UpdateOptions{}) + + sourceObj, err = c.dynamicClient.Resource(sourceGVR).Namespace(sourceObj.GetNamespace()).Update( + ctx, + sourceObj, + metav1.UpdateOptions{}, + ) if err != nil { return nil, fmt.Errorf("failed to update source object without finalizer: %w", err) } @@ -399,13 +440,19 @@ func (c *FederateController) removeFinalizer( return sourceObj, nil } -func (c *FederateController) handleTerminatingSourceObject(ctx context.Context, sourceObject, fedObject *unstructured.Unstructured) error { +func (c *FederateController) handleTerminatingSourceObject( + ctx context.Context, + sourceObject *unstructured.Unstructured, + fedObject *fedcorev1a1.GenericFederatedObject, + sourceGVR schema.GroupVersionResource, + isNamespaced bool, +) error { logger := klog.FromContext(ctx) if fedObject == nil { logger.V(3).Info("Federated object deleted") var err error - if _, err = c.removeFinalizer(ctx, sourceObject); err != nil { + if _, err = c.removeFinalizer(ctx, sourceGVR, sourceObject); err != nil { return fmt.Errorf("failed to remove finalizer from source object: %w", err) } return nil @@ -413,9 +460,12 @@ func (c *FederateController) handleTerminatingSourceObject(ctx context.Context, if fedObject.GetDeletionTimestamp() == nil { logger.V(1).Info("Deleting federated object") - if err := c.federatedObjectClient.Namespace(fedObject.GetNamespace()).Delete( + if err := fedobjectadapters.Delete( ctx, - fedObject.GetName(), + c.fedClient.CoreV1alpha1(), + c.fedClient.CoreV1alpha1(), + fedObject.Namespace, + fedObject.Name, metav1.DeleteOptions{}, ); err != nil { return fmt.Errorf("failed to delete federated object: %w", err) @@ -426,22 +476,28 @@ func (c *FederateController) handleTerminatingSourceObject(ctx context.Context, return nil } -func (c *FederateController) handleCreateFederatedObject(ctx context.Context, sourceObject *unstructured.Unstructured) error { +func (c *FederateController) handleCreateFederatedObject( + ctx context.Context, + ftc *fedcorev1a1.FederatedTypeConfig, + sourceObject *unstructured.Unstructured, +) error { logger := klog.FromContext(ctx) logger.V(2).Info("Generating federated object from source object") - fedObject, err := newFederatedObjectForSourceObject(c.typeConfig, sourceObject) + fedObject, err := newFederatedObjectForSourceObject(ftc, sourceObject) if err != nil { return fmt.Errorf("failed to generate federated object from source object: %w", err) } - if _, err = pendingcontrollers.SetPendingControllers(fedObject, c.typeConfig.GetControllers()); err != nil { + if _, err = pendingcontrollers.SetPendingControllers(fedObject, ftc.GetControllers()); err != nil { return fmt.Errorf("failed to set pending controllers on federated object: %w", err) } logger.V(1).Info("Creating federated object") - if _, err = c.federatedObjectClient.Namespace(fedObject.GetNamespace()).Create( + if _, err := fedobjectadapters.Create( ctx, + c.fedClient.CoreV1alpha1(), + c.fedClient.CoreV1alpha1(), fedObject, metav1.CreateOptions{}, ); err != nil { @@ -475,7 +531,10 @@ func (c *FederateController) handleExistingFederatedObject( return true, nil } -func (c *FederateController) updateFeedbackAnnotations(ctx context.Context, sourceObject, fedObject *unstructured.Unstructured) error { +func (c *FederateController) updateFeedbackAnnotations( + ctx context.Context, + sourceObject, fedObject *unstructured.Unstructured, +) error { // because this is not an officially supported feature (and this function is called quite often), we intentionally // inflate the log level to prevent it from obstructing other logs. logger := klog.FromContext(ctx).V(4) @@ -499,9 +558,11 @@ func (c *FederateController) updateFeedbackAnnotations(ctx context.Context, sour var err error logger.V(1).Info("Updating source object with feedback annotations") - if c.typeConfig.GetSourceType().Group == appsv1.GroupName && c.typeConfig.GetSourceType().Name == "deployments" { + if c.typeConfig.GetSourceType().Group == appsv1.GroupName && + c.typeConfig.GetSourceType().Name == "deployments" { // deployment bumps generation if annotations are updated - _, err = c.sourceObjectClient.Namespace(sourceObject.GetNamespace()).UpdateStatus(ctx, sourceObject, metav1.UpdateOptions{}) + _, err = c.sourceObjectClient.Namespace(sourceObject.GetNamespace()). + UpdateStatus(ctx, sourceObject, metav1.UpdateOptions{}) } else { _, err = c.sourceObjectClient.Namespace(sourceObject.GetNamespace()).Update(ctx, sourceObject, metav1.UpdateOptions{}) } @@ -513,55 +574,3 @@ func (c *FederateController) updateFeedbackAnnotations(ctx context.Context, sour return nil } - -func ensureDeploymentFields(sourceObj, fedObj *unstructured.Unstructured) (bool, error) { - isUpdate := false - anno := sourceObj.GetAnnotations() - - // for retainReplicas - retainReplicasString := anno[RetainReplicasAnnotation] - retainReplicas := false - if retainReplicasString == "true" { - retainReplicas = true - } - actualRetainReplicas, ok, err := unstructured.NestedBool( - fedObj.Object, - common.SpecField, - common.RetainReplicasField, - ) - if err != nil { - return isUpdate, err - } - if !ok || (retainReplicas != actualRetainReplicas) { - isUpdate = true - if err = unstructured.SetNestedField(fedObj.Object, retainReplicas, common.SpecField, common.RetainReplicasField); err != nil { - return isUpdate, err - } - } - - // for revisionHistoryLimit - revisionHistoryLimitString := anno[common.RevisionHistoryLimit] - revisionHistoryLimit := int64(1) - if revisionHistoryLimitString != "" { - revisionHistoryLimit, err = strconv.ParseInt(revisionHistoryLimitString, 10, 64) - if err != nil { - return isUpdate, err - } - } - actualRevisionHistoryLimit, ok, err := unstructured.NestedInt64( - fedObj.Object, - common.SpecField, - common.RevisionHistoryLimit, - ) - if err != nil { - return isUpdate, err - } - if !ok || (revisionHistoryLimit != actualRevisionHistoryLimit) { - isUpdate = true - if err = unstructured.SetNestedField(fedObj.Object, revisionHistoryLimit, common.SpecField, common.RevisionHistoryLimit); err != nil { - return isUpdate, err - } - } - - return isUpdate, nil -} diff --git a/pkg/controllers/federate/util.go b/pkg/controllers/federate/util.go index e0ba9b6d..19462161 100644 --- a/pkg/controllers/federate/util.go +++ b/pkg/controllers/federate/util.go @@ -28,7 +28,6 @@ import ( "k8s.io/apimachinery/pkg/api/equality" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" - "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/util/sets" fedcorev1a1 "github.com/kubewharf/kubeadmiral/pkg/apis/core/v1alpha1" @@ -40,9 +39,23 @@ import ( annotationutil "github.com/kubewharf/kubeadmiral/pkg/controllers/util/annotation" "github.com/kubewharf/kubeadmiral/pkg/controllers/util/pendingcontrollers" "github.com/kubewharf/kubeadmiral/pkg/controllers/util/sourcefeedback" + "github.com/kubewharf/kubeadmiral/pkg/util/naming" ) -func templateForSourceObject(sourceObj *unstructured.Unstructured, annotations, labels map[string]string) *unstructured.Unstructured { +type workerKey struct { + name string + namespace string + ftc *fedcorev1a1.FederatedTypeConfig +} + +func (k workerKey) String() string { + return fmt.Sprintf("%s/%s", k.namespace, k.name) +} + +func templateForSourceObject( + sourceObj *unstructured.Unstructured, + annotations, labels map[string]string, +) *unstructured.Unstructured { template := sourceObj.DeepCopy() template.SetSelfLink("") template.SetUID("") @@ -59,62 +72,59 @@ func templateForSourceObject(sourceObj *unstructured.Unstructured, annotations, return template } -func newFederatedObjectForSourceObject( - typeConfig *fedcorev1a1.FederatedTypeConfig, - sourceObj *unstructured.Unstructured, -) (*unstructured.Unstructured, error) { - fedType := typeConfig.GetFederatedType() - fedObj := &unstructured.Unstructured{ - Object: make(map[string]interface{}), - } - fedObj.SetAPIVersion(schema.GroupVersion{Group: fedType.Group, Version: fedType.Version}.String()) - fedObj.SetKind(fedType.Kind) - fedObj.SetName(sourceObj.GetName()) +func newFederatedObjectForSourceObject(ftc *fedcorev1a1.FederatedTypeConfig, sourceObj *unstructured.Unstructured) (*fedcorev1a1.GenericFederatedObject, error) { + fedObj := &fedcorev1a1.GenericFederatedObject{} + fedName := naming.GenerateFederatedObjectName(sourceObj.GetName(), ftc.Name) + + fedObj.SetName(fedName) fedObj.SetNamespace(sourceObj.GetNamespace()) fedObj.SetOwnerReferences( []metav1.OwnerReference{*metav1.NewControllerRef(sourceObj, sourceObj.GroupVersionKind())}, ) + // Classify labels into labels that should be copied onto the FederatedObject and labels that should be copied onto + // the FederatedObject's template. + federatedLabels, templateLabels := classifyLabels(sourceObj.GetLabels()) - fedObj.SetLabels(federatedLabels) - observedLabelKeys := generateObservedKeys(sourceObj.GetLabels(), federatedLabels) + // Classify annotations into annotations that should be copied onto the FederatedObject and labels that should be + // copied onto the FederatedObject's template. federatedAnnotations, templateAnnotations := classifyAnnotations(sourceObj.GetAnnotations()) if federatedAnnotations == nil { federatedAnnotations = make(map[string]string) } + // Record the observed label and annotation keys in an annotation on the FederatedObject. + + observedLabelKeys := generateObservedKeys(sourceObj.GetLabels(), federatedLabels) observedAnnotationKeys := generateObservedKeys(sourceObj.GetAnnotations(), federatedAnnotations) federatedAnnotations[common.ObservedAnnotationKeysAnnotation] = observedAnnotationKeys federatedAnnotations[common.ObservedLabelKeysAnnotation] = observedLabelKeys - templateObject := templateForSourceObject(sourceObj, templateAnnotations, templateLabels).Object + // Generate the FederatedObject's template and update the FederatedObject. - if err := unstructured.SetNestedMap( - fedObj.Object, - templateObject, - common.SpecField, - common.TemplateField, - ); err != nil { - return nil, err + templateObject := templateForSourceObject(sourceObj, templateAnnotations, templateLabels).Object + rawTemplate, err := json.Marshal(templateObject) + if err != nil { + return nil, fmt.Errorf("failed to marshal template: %w", err) } + fedObj.Spec.Template.Raw = rawTemplate + + // Generate the JSON patch required to convert the source object to the FederatedObject's template and store it as + // an annotation in the FederatedObject. templateGeneratorMergePatch, err := CreateMergePatch(sourceObj, &unstructured.Unstructured{Object: templateObject}) if err != nil { return nil, fmt.Errorf("failed to create merge patch for source object: %w", err) } - federatedAnnotations[common.TemplateGeneratorMergePatchAnnotation] = string(templateGeneratorMergePatch) + + // Update the FederatedObject with the final annotation and label sets. + + fedObj.SetLabels(federatedLabels) fedObj.SetAnnotations(federatedAnnotations) - // For deployment fields - if sourceObj.GroupVersionKind() == appsv1.SchemeGroupVersion.WithKind(common.DeploymentKind) { - _, err := ensureDeploymentFields(sourceObj, fedObj) - if err != nil { - return nil, err - } - } return fedObj, nil } @@ -230,12 +240,12 @@ var ( common.NoSchedulingAnnotation, scheduler.FollowsObjectAnnotation, common.FollowersAnnotation, + RetainReplicasAnnotation, ) // TODO: Do we need to specify the internal annotations here? // List of annotations that should be ignored on the source object ignoredAnnotationSet = sets.New( - RetainReplicasAnnotation, util.LatestReplicasetDigestsAnnotation, sourcefeedback.SchedulingAnnotation, sourcefeedback.SyncingAnnotation, @@ -326,7 +336,10 @@ func generateObservedKeys(sourceMap map[string]string, federatedMap map[string]s sort.Strings(observedFederatedKeys) sort.Strings(observedNonFederatedKeys) - return strings.Join([]string{strings.Join(observedFederatedKeys, ","), strings.Join(observedNonFederatedKeys, ",")}, "|") + return strings.Join( + []string{strings.Join(observedFederatedKeys, ","), strings.Join(observedNonFederatedKeys, ",")}, + "|", + ) } // CreateMergePatch will return a merge patch document capable of converting diff --git a/pkg/util/eventhandlers/eventhandler.go b/pkg/util/eventhandlers/eventhandler.go new file mode 100644 index 00000000..2aed6004 --- /dev/null +++ b/pkg/util/eventhandlers/eventhandler.go @@ -0,0 +1,35 @@ +package eventhandlers + +import ( + "reflect" + + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/client-go/tools/cache" +) + +// NewTriggerOnAllChanges returns a cache.ResourceEventHandlerFuncs that will call the given function on all object +// changes. The given function will also be called on receiving cache.DeletedFinalStateUnknown deletion events. +func NewTriggerOnAllChanges(triggerFunc func(runtime.Object)) *cache.ResourceEventHandlerFuncs { + return &cache.ResourceEventHandlerFuncs{ + DeleteFunc: func(old interface{}) { + if deleted, ok := old.(cache.DeletedFinalStateUnknown); ok { + old = deleted.Obj + if old == nil { + return + } + } + oldObj := old.(runtime.Object) + triggerFunc(oldObj) + }, + AddFunc: func(cur interface{}) { + curObj := cur.(runtime.Object) + triggerFunc(curObj) + }, + UpdateFunc: func(old, cur interface{}) { + if !reflect.DeepEqual(old, cur) { + curObj := cur.(runtime.Object) + triggerFunc(curObj) + } + }, + } +} diff --git a/pkg/util/logging/logging.go b/pkg/util/logging/logging.go index cf2d33d4..0c9e5090 100644 --- a/pkg/util/logging/logging.go +++ b/pkg/util/logging/logging.go @@ -23,6 +23,11 @@ import ( "k8s.io/klog/v2" ) +func InjectLogger(ctx context.Context, logger klog.Logger) (context.Context, logr.Logger) { + ctx = klog.NewContext(ctx, logger) + return ctx, logger +} + func InjectLoggerValues(ctx context.Context, values ...interface{}) (context.Context, logr.Logger) { logger := klog.FromContext(ctx).WithValues(values...) ctx = klog.NewContext(ctx, logger) diff --git a/pkg/util/meta/federatedobject.go b/pkg/util/meta/federatedobject.go new file mode 100644 index 00000000..36e2aaf2 --- /dev/null +++ b/pkg/util/meta/federatedobject.go @@ -0,0 +1,38 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +This file may have been modified by The KubeAdmiral Authors +("KubeAdmiral Modifications"). All KubeAdmiral Modifications +are Copyright 2023 The KubeAdmiral Authors. +*/ + +package meta + +import ( + "encoding/json" + "fmt" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + fedcorev1a1 "github.com/kubewharf/kubeadmiral/pkg/apis/core/v1alpha1" +) + +func GetSourceObjectMeta(spec *fedcorev1a1.GenericFederatedObjectSpec) (*metav1.PartialObjectMetadata, error) { + partialObjectMeta := &metav1.PartialObjectMetadata{} + if err := json.Unmarshal(spec.Template.Raw, partialObjectMeta); err != nil { + return nil, fmt.Errorf("failed to unmarshal FederatedObject's template: %w", err) + } + return partialObjectMeta, nil +} diff --git a/pkg/controllers/util/naming.go b/pkg/util/naming/naming.go similarity index 99% rename from pkg/controllers/util/naming.go rename to pkg/util/naming/naming.go index 809ea9f1..88667a09 100644 --- a/pkg/controllers/util/naming.go +++ b/pkg/util/naming/naming.go @@ -18,7 +18,7 @@ This file may have been modified by The KubeAdmiral Authors are Copyright 2023 The KubeAdmiral Authors. */ -package util +package naming import ( "fmt" diff --git a/pkg/controllers/util/naming_test.go b/pkg/util/naming/naming_test.go similarity index 81% rename from pkg/controllers/util/naming_test.go rename to pkg/util/naming/naming_test.go index abb8f4f8..ff6cd244 100644 --- a/pkg/controllers/util/naming_test.go +++ b/pkg/util/naming/naming_test.go @@ -1,4 +1,20 @@ -package util +/* +Copyright 2023 The KubeAdmiral Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package naming import ( "strings" diff --git a/pkg/controllers/util/worker/result.go b/pkg/util/worker/result.go similarity index 100% rename from pkg/controllers/util/worker/result.go rename to pkg/util/worker/result.go diff --git a/pkg/controllers/util/worker/worker.go b/pkg/util/worker/worker.go similarity index 100% rename from pkg/controllers/util/worker/worker.go rename to pkg/util/worker/worker.go From d163295f412aff260957310539b0cb5a9b742ef9 Mon Sep 17 00:00:00 2001 From: "hawjia.lim" Date: Mon, 17 Jul 2023 17:04:24 +0800 Subject: [PATCH 054/173] refactor: federate informer --- pkg/controllers/federate/controller.go | 86 ++++--------------- pkg/controllers/federate/util.go | 75 ++++++++-------- .../util/sourcefeedback/scheduling.go | 26 ++---- pkg/util/meta/federatedobject.go | 4 +- 4 files changed, 67 insertions(+), 124 deletions(-) diff --git a/pkg/controllers/federate/controller.go b/pkg/controllers/federate/controller.go index 854dfdec..0ef8b421 100644 --- a/pkg/controllers/federate/controller.go +++ b/pkg/controllers/federate/controller.go @@ -21,7 +21,6 @@ import ( "fmt" "time" - appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -40,11 +39,8 @@ import ( fedclient "github.com/kubewharf/kubeadmiral/pkg/client/clientset/versioned" fedcorev1a1informers "github.com/kubewharf/kubeadmiral/pkg/client/informers/externalversions/core/v1alpha1" "github.com/kubewharf/kubeadmiral/pkg/controllers/common" - "github.com/kubewharf/kubeadmiral/pkg/controllers/util/annotation" "github.com/kubewharf/kubeadmiral/pkg/controllers/util/eventsink" finalizersutil "github.com/kubewharf/kubeadmiral/pkg/controllers/util/finalizers" - "github.com/kubewharf/kubeadmiral/pkg/controllers/util/pendingcontrollers" - "github.com/kubewharf/kubeadmiral/pkg/controllers/util/sourcefeedback" "github.com/kubewharf/kubeadmiral/pkg/stats" "github.com/kubewharf/kubeadmiral/pkg/util/eventhandlers" "github.com/kubewharf/kubeadmiral/pkg/util/fedobjectadapters" @@ -52,6 +48,7 @@ import ( "github.com/kubewharf/kubeadmiral/pkg/util/logging" "github.com/kubewharf/kubeadmiral/pkg/util/meta" "github.com/kubewharf/kubeadmiral/pkg/util/naming" + "github.com/kubewharf/kubeadmiral/pkg/util/pendingcontrollers" "github.com/kubewharf/kubeadmiral/pkg/util/worker" ) @@ -141,10 +138,10 @@ func NewFederateController( if _, err := fedObjectInformer.Informer().AddEventHandler( eventhandlers.NewTriggerOnAllChanges(func(o runtime.Object) { - fedObj := o.(*fedcorev1a1.FederatedObject) + fedObj := o.(*fedcorev1a1.GenericFederatedObject) logger := c.logger.WithValues("federated-object", common.NewQualifiedName(fedObj)) - srcMeta, err := meta.GetSourceObjectMeta(&fedObj.Spec) + srcMeta, err := meta.GetSourceObjectMeta(fedObj) if err != nil { logger.Error(err, "Failed to get source object's metadata from FederatedObject") return @@ -170,10 +167,10 @@ func NewFederateController( if _, err := clusterFedObjecInformer.Informer().AddEventHandler( eventhandlers.NewTriggerOnAllChanges(func(o runtime.Object) { - fedObj := o.(*fedcorev1a1.ClusterFederatedObject) + fedObj := o.(*fedcorev1a1.GenericFederatedObject) logger := c.logger.WithValues("cluster-federated-object", common.NewQualifiedName(fedObj)) - srcMeta, err := meta.GetSourceObjectMeta(&fedObj.Spec) + srcMeta, err := meta.GetSourceObjectMeta(fedObj) if err != nil { logger.Error(err, "Failed to get source object's metadata from ClusterFederatedObject") return @@ -282,7 +279,7 @@ func (c *FederateController) reconcile(ctx context.Context, key workerKey) (stat if fedObject == nil { logger.V(3).Info("No federated object found") - if err := c.handleCreateFederatedObject(ctx, sourceObject); err != nil { + if err := c.handleCreateFederatedObject(ctx, key.ftc, sourceObject); err != nil { logger.Error(err, "Failed to create federated object") c.eventRecorder.Eventf( sourceObject, @@ -309,7 +306,7 @@ func (c *FederateController) reconcile(ctx context.Context, key workerKey) (stat } logger.V(3).Info("Federated object already exists") - updated, err := c.handleExistingFederatedObject(ctx, sourceObject, fedObject) + updated, err := c.handleExistingFederatedObject(ctx, key.ftc, sourceObject, fedObject) if err != nil { logger.Error(err, "Failed to reconcile existing federated object") if apierrors.IsConflict(err) { @@ -337,14 +334,6 @@ func (c *FederateController) reconcile(ctx context.Context, key workerKey) (stat logger.V(3).Info("No updates required to the federated object") } - if err := c.updateFeedbackAnnotations(ctx, sourceObject, fedObject); err != nil { - logger.Error(err, "Failed to sync feedback annotations to source object") - if apierrors.IsConflict(err) { - return worker.StatusConflict - } - return worker.StatusError - } - return worker.StatusAllOK } @@ -442,10 +431,9 @@ func (c *FederateController) removeFinalizer( func (c *FederateController) handleTerminatingSourceObject( ctx context.Context, + sourceGVR schema.GroupVersionResource, sourceObject *unstructured.Unstructured, fedObject *fedcorev1a1.GenericFederatedObject, - sourceGVR schema.GroupVersionResource, - isNamespaced bool, ) error { logger := klog.FromContext(ctx) @@ -509,12 +497,14 @@ func (c *FederateController) handleCreateFederatedObject( func (c *FederateController) handleExistingFederatedObject( ctx context.Context, - sourceObject, fedObject *unstructured.Unstructured, + ftc *fedcorev1a1.FederatedTypeConfig, + sourceObject *unstructured.Unstructured, + fedObject *fedcorev1a1.GenericFederatedObject, ) (bool, error) { logger := klog.FromContext(ctx) logger.V(3).Info("Checking if federated object needs update") - needsUpdate, err := updateFederatedObjectForSourceObject(fedObject, c.typeConfig, sourceObject) + needsUpdate, err := updateFederatedObjectForSourceObject(ftc,sourceObject, fedObject) if err != nil { return false, fmt.Errorf("failed to check if federated object needs update: %w", err) } @@ -524,53 +514,15 @@ func (c *FederateController) handleExistingFederatedObject( } logger.V(1).Info("Updating federated object") - if _, err = c.federatedObjectClient.Namespace(fedObject.GetNamespace()).Update(ctx, fedObject, metav1.UpdateOptions{}); err != nil { + if _, err = fedobjectadapters.Update( + ctx, + c.fedClient.CoreV1alpha1(), + c.fedClient.CoreV1alpha1(), + fedObject, + metav1.UpdateOptions{}, + ); err != nil { return false, fmt.Errorf("failed to update federated object: %w", err) } return true, nil } - -func (c *FederateController) updateFeedbackAnnotations( - ctx context.Context, - sourceObject, fedObject *unstructured.Unstructured, -) error { - // because this is not an officially supported feature (and this function is called quite often), we intentionally - // inflate the log level to prevent it from obstructing other logs. - logger := klog.FromContext(ctx).V(4) - hasChanged := false - - logger.V(2).Info("Sync scheduling annotation to source object") - if err := sourcefeedback.PopulateSchedulingAnnotation(sourceObject, fedObject, &hasChanged); err != nil { - return fmt.Errorf("failed to sync scheduling annotation to source object: %w", err) - } - - logger.V(2).Info("Sync syncing annotation to source object") - if value, exists := fedObject.GetAnnotations()[sourcefeedback.SyncingAnnotation]; exists { - hasAnnotationChanged, err := annotation.AddAnnotation(sourceObject, sourcefeedback.SyncingAnnotation, value) - if err != nil { - return fmt.Errorf("failed to sync syncing annotation to source object: %w", err) - } - hasChanged = hasChanged || hasAnnotationChanged - } - - if hasChanged { - var err error - - logger.V(1).Info("Updating source object with feedback annotations") - if c.typeConfig.GetSourceType().Group == appsv1.GroupName && - c.typeConfig.GetSourceType().Name == "deployments" { - // deployment bumps generation if annotations are updated - _, err = c.sourceObjectClient.Namespace(sourceObject.GetNamespace()). - UpdateStatus(ctx, sourceObject, metav1.UpdateOptions{}) - } else { - _, err = c.sourceObjectClient.Namespace(sourceObject.GetNamespace()).Update(ctx, sourceObject, metav1.UpdateOptions{}) - } - - if err != nil { - return fmt.Errorf("failed to update source object with feedback annotations: %w", err) - } - } - - return nil -} diff --git a/pkg/controllers/federate/util.go b/pkg/controllers/federate/util.go index 19462161..b7436f7b 100644 --- a/pkg/controllers/federate/util.go +++ b/pkg/controllers/federate/util.go @@ -24,7 +24,6 @@ import ( "strings" jsonpatch "github.com/evanphx/json-patch/v5" - appsv1 "k8s.io/api/apps/v1" "k8s.io/apimachinery/pkg/api/equality" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" @@ -36,8 +35,8 @@ import ( "github.com/kubewharf/kubeadmiral/pkg/controllers/override" "github.com/kubewharf/kubeadmiral/pkg/controllers/scheduler" "github.com/kubewharf/kubeadmiral/pkg/controllers/util" - annotationutil "github.com/kubewharf/kubeadmiral/pkg/controllers/util/annotation" - "github.com/kubewharf/kubeadmiral/pkg/controllers/util/pendingcontrollers" + annotationutil "github.com/kubewharf/kubeadmiral/pkg/util/annotation" + "github.com/kubewharf/kubeadmiral/pkg/util/pendingcontrollers" "github.com/kubewharf/kubeadmiral/pkg/controllers/util/sourcefeedback" "github.com/kubewharf/kubeadmiral/pkg/util/naming" ) @@ -129,13 +128,14 @@ func newFederatedObjectForSourceObject(ftc *fedcorev1a1.FederatedTypeConfig, sou } func updateFederatedObjectForSourceObject( - fedObject *unstructured.Unstructured, typeConfig *fedcorev1a1.FederatedTypeConfig, sourceObject *unstructured.Unstructured, + fedObject *fedcorev1a1.GenericFederatedObject, ) (bool, error) { isUpdated := false - // set federated object's owner references to source object + // Set federated object's owner references to source object + currentOwner := fedObject.GetOwnerReferences() desiredOwner := []metav1.OwnerReference{*metav1.NewControllerRef(sourceObject, sourceObject.GroupVersionKind())} if !reflect.DeepEqual(currentOwner, desiredOwner) { @@ -143,9 +143,8 @@ func updateFederatedObjectForSourceObject( isUpdated = true } - federatedAnnotations, templateAnnotations := classifyAnnotations(sourceObject.GetAnnotations()) - - observedAnnotationKeys := generateObservedKeys(sourceObject.GetAnnotations(), federatedAnnotations) + // Classify labels into labels that should be copied onto the FederatedObject and labels that should be copied onto + // the FederatedObject's template and update the FederatedObject's template. federatedLabels, templateLabels := classifyLabels(sourceObject.GetLabels()) if !equality.Semantic.DeepEqual(federatedLabels, fedObject.GetLabels()) { @@ -153,32 +152,31 @@ func updateFederatedObjectForSourceObject( isUpdated = true } - observedLabelKeys := generateObservedKeys(sourceObject.GetLabels(), federatedLabels) + // Classify annotations into annotations that should be copied onto the FederatedObject and labels that should be + // copied onto the FederatedObject's template. - // sync template - fedObjectTemplate, foundTemplate, err := unstructured.NestedMap( - fedObject.Object, - common.SpecField, - common.TemplateField, - ) - if err != nil { - return false, fmt.Errorf("failed to parse template from federated object: %w", err) - } + federatedAnnotations, templateAnnotations := classifyAnnotations(sourceObject.GetAnnotations()) - targetTemplate := templateForSourceObject(sourceObject, templateAnnotations, templateLabels).Object - if !foundTemplate || !reflect.DeepEqual(fedObjectTemplate, targetTemplate) { - if err := unstructured.SetNestedMap(fedObject.Object, targetTemplate, common.SpecField, common.TemplateField); err != nil { - return false, fmt.Errorf("failed to set federated object template: %w", err) - } - isUpdated = true + // Generate the FederatedObject's template and compare it to the template in the FederatedObject, updating the + // FederatedObject if necessary. + + targetTemplate := templateForSourceObject(sourceObject, templateAnnotations, templateLabels) + foundTemplate := &unstructured.Unstructured{} + if err := json.Unmarshal(fedObject.Spec.Template.Raw, foundTemplate); err != nil { + return false, fmt.Errorf("failed to unmarshal template from federated object: %w", err) } + if !reflect.DeepEqual(foundTemplate.Object, targetTemplate.Object) { + rawTargetTemplate, err := json.Marshal(targetTemplate) + if err != nil { + return false, fmt.Errorf("failed to marshal template: %w", err) + } - templateGeneratorMergePatch, err := CreateMergePatch(sourceObject, targetTemplate) - if err != nil { - return false, fmt.Errorf("failed to create merge patch for source object: %w", err) + fedObject.Spec.Template.Raw = rawTargetTemplate + isUpdated = true } // Merge annotations because other controllers may have added annotations to the federated object. + newAnnotations, annotationChanges := annotationutil.CopySubmap( federatedAnnotations, fedObject.GetAnnotations(), @@ -188,6 +186,20 @@ func updateFederatedObjectForSourceObject( }, ) + // Record the observed label and annotation keys in an annotation on the FederatedObject. + + observedAnnotationKeys := generateObservedKeys(sourceObject.GetAnnotations(), federatedAnnotations) + observedLabelKeys := generateObservedKeys(sourceObject.GetLabels(), federatedLabels) + + + // Generate the JSON patch required to convert the source object to the FederatedObject's template and store it as + // an annotation in the FederatedObject. + + templateGeneratorMergePatch, err := CreateMergePatch(sourceObject, targetTemplate) + if err != nil { + return false, fmt.Errorf("failed to create merge patch for source object: %w", err) + } + for key, desiredValue := range map[string]string{ common.ObservedAnnotationKeysAnnotation: observedAnnotationKeys, common.ObservedLabelKeysAnnotation: observedLabelKeys, @@ -205,15 +217,6 @@ func updateFederatedObjectForSourceObject( isUpdated = true } - // handle special deployment fields - if sourceObject.GroupVersionKind() == appsv1.SchemeGroupVersion.WithKind(common.DeploymentKind) { - deploymentFieldsUpdated, err := ensureDeploymentFields(sourceObject, fedObject) - if err != nil { - return false, fmt.Errorf("failed to ensure deployment fields: %w", err) - } - isUpdated = isUpdated || deploymentFieldsUpdated - } - if isUpdated { _, err = pendingcontrollers.SetPendingControllers(fedObject, typeConfig.GetControllers()) if err != nil { diff --git a/pkg/controllers/util/sourcefeedback/scheduling.go b/pkg/controllers/util/sourcefeedback/scheduling.go index d8507418..85e6f49a 100644 --- a/pkg/controllers/util/sourcefeedback/scheduling.go +++ b/pkg/controllers/util/sourcefeedback/scheduling.go @@ -20,9 +20,12 @@ import ( "sort" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/utils/pointer" + fedcorev1a1 "github.com/kubewharf/kubeadmiral/pkg/apis/core/v1alpha1" "github.com/kubewharf/kubeadmiral/pkg/controllers/common" "github.com/kubewharf/kubeadmiral/pkg/controllers/util" + "github.com/kubewharf/kubeadmiral/pkg/util/meta" ) var SchedulingAnnotation = common.DefaultPrefix + "scheduling" @@ -42,37 +45,22 @@ type Scheduling struct { Placement []string `json:"placement,omitempty"` } -func PopulateSchedulingAnnotation(sourceObject, fedObject *unstructured.Unstructured, hasChanged *bool) (err error) { +func PopulateSchedulingAnnotation(sourceObject *unstructured.Unstructured, fedObject *fedcorev1a1.GenericFederatedObject, hasChanged *bool) (err error) { scheduling := Scheduling{} - generation, exists, err := unstructured.NestedInt64( - fedObject.Object, - common.SpecField, - common.TemplateField, - common.MetadataField, - common.GenerationField, - ) + srcMeta, err := meta.GetSourceObjectMeta(fedObject) if err != nil { return err } - if exists { - generation := generation - scheduling.Generation = &generation - } + scheduling.Generation = pointer.Int64(srcMeta.GetGeneration()) scheduling.FederatedGeneration = fedObject.GetGeneration() - placement, err := util.UnmarshalGenericPlacements(fedObject) - if err != nil { - return err - } - - clusterNames := placement.ClusterNameUnion() + clusterNames := fedObject.GetPlacementUnion() if len(clusterNames) > 0 { for clusterName := range clusterNames { scheduling.Placement = append(scheduling.Placement, clusterName) } - sort.Strings(scheduling.Placement) } diff --git a/pkg/util/meta/federatedobject.go b/pkg/util/meta/federatedobject.go index 36e2aaf2..3d31fffd 100644 --- a/pkg/util/meta/federatedobject.go +++ b/pkg/util/meta/federatedobject.go @@ -29,9 +29,9 @@ import ( fedcorev1a1 "github.com/kubewharf/kubeadmiral/pkg/apis/core/v1alpha1" ) -func GetSourceObjectMeta(spec *fedcorev1a1.GenericFederatedObjectSpec) (*metav1.PartialObjectMetadata, error) { +func GetSourceObjectMeta(fedObject *fedcorev1a1.GenericFederatedObject) (*metav1.PartialObjectMetadata, error) { partialObjectMeta := &metav1.PartialObjectMetadata{} - if err := json.Unmarshal(spec.Template.Raw, partialObjectMeta); err != nil { + if err := json.Unmarshal(fedObject.Spec.Template.Raw, partialObjectMeta); err != nil { return nil, fmt.Errorf("failed to unmarshal FederatedObject's template: %w", err) } return partialObjectMeta, nil From 9a1cbbf981352ffa56186376d89a6fe5ca530d68 Mon Sep 17 00:00:00 2001 From: "hawjia.lim" Date: Mon, 17 Jul 2023 17:37:00 +0800 Subject: [PATCH 055/173] refactor(core): add informermanager to controllercontext --- .../app/controllermanager.go | 8 +- cmd/controller-manager/app/core.go | 237 +++++++++--------- cmd/controller-manager/app/ftcmanager.go | 42 ---- cmd/controller-manager/app/util.go | 36 ++- pkg/controllers/context/context.go | 12 +- pkg/controllers/federate/controller.go | 3 +- pkg/util/cluster/client.go | 164 ++++++++++++ pkg/util/cluster/util.go | 1 - 8 files changed, 317 insertions(+), 186 deletions(-) delete mode 100644 cmd/controller-manager/app/ftcmanager.go create mode 100644 pkg/util/cluster/client.go diff --git a/cmd/controller-manager/app/controllermanager.go b/cmd/controller-manager/app/controllermanager.go index 5b4c5cb1..9ebec699 100644 --- a/cmd/controller-manager/app/controllermanager.go +++ b/cmd/controller-manager/app/controllermanager.go @@ -36,14 +36,16 @@ import ( const ( FederatedClusterControllerName = "cluster" + FederateControllerName = "federate" MonitorControllerName = "monitor" FollowerControllerName = "follower" ) var knownControllers = map[string]controllermanager.StartControllerFunc{ - FederatedClusterControllerName: startFederatedClusterController, - MonitorControllerName: startMonitorController, - FollowerControllerName: startFollowerController, + // FederatedClusterControllerName: startFederatedClusterController, + FederateControllerName: startFederateController, + // MonitorControllerName: startMonitorController, + // FollowerControllerName: startFollowerController, } var controllersDisabledByDefault = sets.New(MonitorControllerName) diff --git a/cmd/controller-manager/app/core.go b/cmd/controller-manager/app/core.go index 6157efbd..d3630832 100644 --- a/cmd/controller-manager/app/core.go +++ b/cmd/controller-manager/app/core.go @@ -20,58 +20,79 @@ import ( "context" "fmt" - "k8s.io/klog/v2" + // "k8s.io/klog/v2" fedcorev1a1 "github.com/kubewharf/kubeadmiral/pkg/apis/core/v1alpha1" - "github.com/kubewharf/kubeadmiral/pkg/client/generic" + // "github.com/kubewharf/kubeadmiral/pkg/client/generic" "github.com/kubewharf/kubeadmiral/pkg/controllermanager" - "github.com/kubewharf/kubeadmiral/pkg/controllers/automigration" + // "github.com/kubewharf/kubeadmiral/pkg/controllers/automigration" controllercontext "github.com/kubewharf/kubeadmiral/pkg/controllers/context" "github.com/kubewharf/kubeadmiral/pkg/controllers/federate" - "github.com/kubewharf/kubeadmiral/pkg/controllers/federatedcluster" - "github.com/kubewharf/kubeadmiral/pkg/controllers/federatedtypeconfig" + // "github.com/kubewharf/kubeadmiral/pkg/controllers/federatedcluster" + // "github.com/kubewharf/kubeadmiral/pkg/controllers/federatedtypeconfig" "github.com/kubewharf/kubeadmiral/pkg/controllers/follower" - "github.com/kubewharf/kubeadmiral/pkg/controllers/monitor" + // "github.com/kubewharf/kubeadmiral/pkg/controllers/monitor" "github.com/kubewharf/kubeadmiral/pkg/controllers/scheduler" "github.com/kubewharf/kubeadmiral/pkg/controllers/util" - schemautil "github.com/kubewharf/kubeadmiral/pkg/controllers/util/schema" + // schemautil "github.com/kubewharf/kubeadmiral/pkg/controllers/util/schema" ) -func startFederatedClusterController(ctx context.Context, controllerCtx *controllercontext.Context) (controllermanager.Controller, error) { - clusterController, err := federatedcluster.NewFederatedClusterController( - controllerCtx.FedClientset, +// func startFederatedClusterController(ctx context.Context, controllerCtx *controllercontext.Context) (controllermanager.Controller, error) { +// clusterController, err := federatedcluster.NewFederatedClusterController( +// controllerCtx.FedClientset, +// controllerCtx.KubeClientset, +// controllerCtx.FedInformerFactory.Core().V1alpha1().FederatedClusters(), +// controllerCtx.FederatedClientFactory, +// controllerCtx.Metrics, +// controllerCtx.FedSystemNamespace, +// controllerCtx.RestConfig, +// controllerCtx.WorkerCount, +// controllerCtx.ComponentConfig.ClusterJoinTimeout, +// ) +// if err != nil { +// return nil, fmt.Errorf("error creating federated cluster controller: %w", err) +// } + +// go clusterController.Run(ctx) + +// return clusterController, nil +// } + +func startFederateController(ctx context.Context, controllerCtx *controllercontext.Context) (controllermanager.Controller, error) { + federateController, err := federate.NewFederateController( controllerCtx.KubeClientset, - controllerCtx.FedInformerFactory.Core().V1alpha1().FederatedClusters(), - controllerCtx.FederatedClientFactory, + controllerCtx.DynamicClientset, + controllerCtx.FedClientset, + controllerCtx.FedInformerFactory.Core().V1alpha1().FederatedObjects(), + controllerCtx.FedInformerFactory.Core().V1alpha1().ClusterFederatedObjects(), + controllerCtx.InformerManager, controllerCtx.Metrics, - controllerCtx.FedSystemNamespace, - controllerCtx.RestConfig, controllerCtx.WorkerCount, - controllerCtx.ComponentConfig.ClusterJoinTimeout, + controllerCtx.FedSystemNamespace, ) if err != nil { - return nil, fmt.Errorf("error creating federated cluster controller: %w", err) + return nil, fmt.Errorf("error creating federate controller: %w", err) } - go clusterController.Run(ctx) + go federateController.Run(ctx) - return clusterController, nil + return federateController, nil } -func startMonitorController(ctx context.Context, controllerCtx *controllercontext.Context) (controllermanager.Controller, error) { - controllerConfig := controllerConfigFromControllerContext(controllerCtx) - //nolint:contextcheck - monitorController, err := monitor.NewMonitorController(controllerConfig) - if err != nil { - return nil, fmt.Errorf("error creating monitor controller: %w", err) - } +//func startMonitorController(ctx context.Context, controllerCtx *controllercontext.Context) (controllermanager.Controller, error) { +// controllerConfig := controllerConfigFromControllerContext(controllerCtx) +// //nolint:contextcheck +// monitorController, err := monitor.NewMonitorController(controllerConfig) +// if err != nil { +// return nil, fmt.Errorf("error creating monitor controller: %w", err) +// } - if err = monitorController.Run(ctx.Done()); err != nil { - return nil, err - } +// if err = monitorController.Run(ctx.Done()); err != nil { +// return nil, err +// } - return monitorController, nil -} +// return monitorController, nil +//} //nolint:contextcheck func startFollowerController(ctx context.Context, controllerCtx *controllercontext.Context) (controllermanager.Controller, error) { @@ -110,37 +131,37 @@ func controllerConfigFromControllerContext(controllerCtx *controllercontext.Cont } } -func startGlobalScheduler( - ctx context.Context, - controllerCtx *controllercontext.Context, - typeConfig *fedcorev1a1.FederatedTypeConfig, -) (controllermanager.Controller, error) { - federatedAPIResource := typeConfig.GetFederatedType() - federatedGVR := schemautil.APIResourceToGVR(&federatedAPIResource) - - scheduler, err := scheduler.NewScheduler( - klog.FromContext(ctx), - typeConfig, - controllerCtx.KubeClientset, - controllerCtx.FedClientset, - controllerCtx.DynamicClientset, - controllerCtx.DynamicInformerFactory.ForResource(federatedGVR), - controllerCtx.FedInformerFactory.Core().V1alpha1().PropagationPolicies(), - controllerCtx.FedInformerFactory.Core().V1alpha1().ClusterPropagationPolicies(), - controllerCtx.FedInformerFactory.Core().V1alpha1().FederatedClusters(), - controllerCtx.FedInformerFactory.Core().V1alpha1().SchedulingProfiles(), - controllerCtx.FedInformerFactory.Core().V1alpha1().SchedulerPluginWebhookConfigurations(), - controllerCtx.Metrics, - controllerCtx.WorkerCount, - ) - if err != nil { - return nil, fmt.Errorf("error creating global scheduler: %w", err) - } - - go scheduler.Run(ctx) - - return scheduler, nil -} +// func startGlobalScheduler( +// ctx context.Context, +// controllerCtx *controllercontext.Context, +// typeConfig *fedcorev1a1.FederatedTypeConfig, +// ) (controllermanager.Controller, error) { +// federatedAPIResource := typeConfig.GetFederatedType() +// federatedGVR := schemautil.APIResourceToGVR(&federatedAPIResource) + +// scheduler, err := scheduler.NewScheduler( +// klog.FromContext(ctx), +// typeConfig, +// controllerCtx.KubeClientset, +// controllerCtx.FedClientset, +// controllerCtx.DynamicClientset, +// controllerCtx.DynamicInformerFactory.ForResource(federatedGVR), +// controllerCtx.FedInformerFactory.Core().V1alpha1().PropagationPolicies(), +// controllerCtx.FedInformerFactory.Core().V1alpha1().ClusterPropagationPolicies(), +// controllerCtx.FedInformerFactory.Core().V1alpha1().FederatedClusters(), +// controllerCtx.FedInformerFactory.Core().V1alpha1().SchedulingProfiles(), +// controllerCtx.FedInformerFactory.Core().V1alpha1().SchedulerPluginWebhookConfigurations(), +// controllerCtx.Metrics, +// controllerCtx.WorkerCount, +// ) +// if err != nil { +// return nil, fmt.Errorf("error creating global scheduler: %w", err) +// } + +// go scheduler.Run(ctx) + +// return scheduler, nil +// } func isGlobalSchedulerEnabled(typeConfig *fedcorev1a1.FederatedTypeConfig) bool { for _, controllerGroup := range typeConfig.GetControllers() { @@ -153,70 +174,36 @@ func isGlobalSchedulerEnabled(typeConfig *fedcorev1a1.FederatedTypeConfig) bool return false } -func startFederateController( - ctx context.Context, - controllerCtx *controllercontext.Context, - typeConfig *fedcorev1a1.FederatedTypeConfig, -) (controllermanager.Controller, error) { - federatedAPIResource := typeConfig.GetFederatedType() - federatedGVR := schemautil.APIResourceToGVR(&federatedAPIResource) - - sourceAPIResource := typeConfig.GetSourceType() - sourceGVR := schemautil.APIResourceToGVR(sourceAPIResource) - - federateController, err := federate.NewFederateController( - typeConfig, - controllerCtx.KubeClientset, - controllerCtx.DynamicClientset, - controllerCtx.DynamicInformerFactory.ForResource(federatedGVR), - controllerCtx.DynamicInformerFactory.ForResource(sourceGVR), - controllerCtx.Metrics, - controllerCtx.WorkerCount, - controllerCtx.FedSystemNamespace, - ) - if err != nil { - return nil, fmt.Errorf("error creating federate controller: %w", err) - } - - go federateController.Run(ctx) - - return federateController, nil -} - -func isFederateControllerEnabled(typeConfig *fedcorev1a1.FederatedTypeConfig) bool { - return typeConfig.GetSourceType() != nil -} - -func startAutoMigrationController( - ctx context.Context, - controllerCtx *controllercontext.Context, - typeConfig *fedcorev1a1.FederatedTypeConfig, -) (controllermanager.Controller, error) { - genericClient, err := generic.New(controllerCtx.RestConfig) - if err != nil { - return nil, fmt.Errorf("error creating generic client: %w", err) - } - - federatedAPIResource := typeConfig.GetFederatedType() - federatedGVR := schemautil.APIResourceToGVR(&federatedAPIResource) - - //nolint:contextcheck - controller, err := automigration.NewAutoMigrationController( - controllerConfigFromControllerContext(controllerCtx), - typeConfig, - genericClient, - controllerCtx.KubeClientset, - controllerCtx.DynamicClientset.Resource(federatedGVR), - controllerCtx.DynamicInformerFactory.ForResource(federatedGVR), - ) - if err != nil { - return nil, fmt.Errorf("error creating auto-migration controller: %w", err) - } - - go controller.Run(ctx) - - return controller, nil -} +//func startAutoMigrationController( +// ctx context.Context, +// controllerCtx *controllercontext.Context, +// typeConfig *fedcorev1a1.FederatedTypeConfig, +//) (controllermanager.Controller, error) { +// genericClient, err := generic.New(controllerCtx.RestConfig) +// if err != nil { +// return nil, fmt.Errorf("error creating generic client: %w", err) +// } + +// federatedAPIResource := typeConfig.GetFederatedType() +// federatedGVR := schemautil.APIResourceToGVR(&federatedAPIResource) + +// //nolint:contextcheck +// controller, err := automigration.NewAutoMigrationController( +// controllerConfigFromControllerContext(controllerCtx), +// typeConfig, +// genericClient, +// controllerCtx.KubeClientset, +// controllerCtx.DynamicClientset.Resource(federatedGVR), +// controllerCtx.DynamicInformerFactory.ForResource(federatedGVR), +// ) +// if err != nil { +// return nil, fmt.Errorf("error creating auto-migration controller: %w", err) +// } + +// go controller.Run(ctx) + +// return controller, nil +//} func isAutoMigrationControllerEnabled(typeConfig *fedcorev1a1.FederatedTypeConfig) bool { return typeConfig.Spec.AutoMigration != nil && typeConfig.Spec.AutoMigration.Enabled diff --git a/cmd/controller-manager/app/ftcmanager.go b/cmd/controller-manager/app/ftcmanager.go deleted file mode 100644 index 95773149..00000000 --- a/cmd/controller-manager/app/ftcmanager.go +++ /dev/null @@ -1,42 +0,0 @@ -/* -Copyright 2023 The KubeAdmiral Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package app - -import ( - "github.com/kubewharf/kubeadmiral/pkg/controllermanager" -) - -const ( - FederateControllerName = "federate" - GlobalSchedulerName = "scheduler" - AutoMigrationControllerName = "automigration" -) - -var knownFTCSubControllers = map[string]controllermanager.FTCSubControllerInitFuncs{ - GlobalSchedulerName: { - StartFunc: startGlobalScheduler, - IsEnabledFunc: isGlobalSchedulerEnabled, - }, - FederateControllerName: { - StartFunc: startFederateController, - IsEnabledFunc: isFederateControllerEnabled, - }, - AutoMigrationControllerName: { - StartFunc: startAutoMigrationController, - IsEnabledFunc: isAutoMigrationControllerEnabled, - }, -} diff --git a/cmd/controller-manager/app/util.go b/cmd/controller-manager/app/util.go index 6e05948c..3ea35de3 100644 --- a/cmd/controller-manager/app/util.go +++ b/cmd/controller-manager/app/util.go @@ -29,14 +29,16 @@ import ( "k8s.io/client-go/kubernetes" "k8s.io/client-go/tools/clientcmd" + fedcorev1a1 "github.com/kubewharf/kubeadmiral/pkg/apis/core/v1alpha1" "github.com/kubewharf/kubeadmiral/cmd/controller-manager/app/options" fedclient "github.com/kubewharf/kubeadmiral/pkg/client/clientset/versioned" fedinformers "github.com/kubewharf/kubeadmiral/pkg/client/informers/externalversions" "github.com/kubewharf/kubeadmiral/pkg/controllers/common" controllercontext "github.com/kubewharf/kubeadmiral/pkg/controllers/context" "github.com/kubewharf/kubeadmiral/pkg/controllers/util" - "github.com/kubewharf/kubeadmiral/pkg/controllers/util/federatedclient" + clusterutil "github.com/kubewharf/kubeadmiral/pkg/util/cluster" "github.com/kubewharf/kubeadmiral/pkg/stats" + "github.com/kubewharf/kubeadmiral/pkg/util/informermanager" ) // KnownControllers returns all well known controller names @@ -110,14 +112,29 @@ func createControllerContext(opts *options.Options) (*controllercontext.Context, dynamicInformerFactory := dynamicinformer.NewDynamicSharedInformerFactory(dynamicClientset, informerResyncPeriod) fedInformerFactory := fedinformers.NewSharedInformerFactory(fedClientset, informerResyncPeriod) - federatedClientFactory := federatedclient.NewFederatedClientsetFactory( - fedClientset, - kubeClientset, + informerManager := informermanager.NewInformerManager( + dynamicClientset, + fedInformerFactory.Core().V1alpha1().FederatedTypeConfigs(), + nil, + ) + federatedInformerManager := informermanager.NewFederatedInformerManager( + informermanager.ClusterClientGetter{ + ConnectionHash: informermanager.DefaultClusterConnectionHash, + ClientGetter: func(cluster *fedcorev1a1.FederatedCluster) (dynamic.Interface, error) { + restConfig, err := clusterutil.BuildClusterConfig( + cluster, + kubeClientset, + restConfig, + common.DefaultFedSystemNamespace, + ) + if err != nil { + return nil, err + } + return dynamic.NewForConfig(restConfig) + }, + }, + fedInformerFactory.Core().V1alpha1().FederatedTypeConfigs(), fedInformerFactory.Core().V1alpha1().FederatedClusters(), - common.DefaultFedSystemNamespace, - restConfig, - opts.MaxPodListers, - opts.EnablePodPruning, ) return &controllercontext.Context{ @@ -140,7 +157,8 @@ func createControllerContext(opts *options.Options) (*controllercontext.Context, DynamicInformerFactory: dynamicInformerFactory, FedInformerFactory: fedInformerFactory, - FederatedClientFactory: federatedClientFactory, + InformerManager: informerManager, + FederatedInformerManager: federatedInformerManager, }, nil } diff --git a/pkg/controllers/context/context.go b/pkg/controllers/context/context.go index 3755abec..e38f088d 100644 --- a/pkg/controllers/context/context.go +++ b/pkg/controllers/context/context.go @@ -29,8 +29,8 @@ import ( fedclient "github.com/kubewharf/kubeadmiral/pkg/client/clientset/versioned" fedinformers "github.com/kubewharf/kubeadmiral/pkg/client/informers/externalversions" - "github.com/kubewharf/kubeadmiral/pkg/controllers/util/federatedclient" "github.com/kubewharf/kubeadmiral/pkg/stats" + "github.com/kubewharf/kubeadmiral/pkg/util/informermanager" ) type Context struct { @@ -53,7 +53,8 @@ type Context struct { DynamicInformerFactory dynamicinformer.DynamicSharedInformerFactory FedInformerFactory fedinformers.SharedInformerFactory - FederatedClientFactory federatedclient.FederatedClientFactory + InformerManager informermanager.InformerManager + FederatedInformerManager informermanager.FederatedInformerManager } func (c *Context) StartFactories(ctx context.Context) { @@ -67,8 +68,11 @@ func (c *Context) StartFactories(ctx context.Context) { c.FedInformerFactory.Start(ctx.Done()) } - if c.FederatedClientFactory != nil { - c.FederatedClientFactory.Start(ctx) + if c.InformerManager != nil { + c.InformerManager.Start(ctx) + } + if c.FederatedInformerManager != nil { + c.FederatedInformerManager.Start(ctx) } } diff --git a/pkg/controllers/federate/controller.go b/pkg/controllers/federate/controller.go index 0ef8b421..7aceefa0 100644 --- a/pkg/controllers/federate/controller.go +++ b/pkg/controllers/federate/controller.go @@ -93,7 +93,6 @@ func NewFederateController( dynamicClient dynamicclient.Interface, fedClient fedclient.Interface, fedObjectInformer fedcorev1a1informers.FederatedObjectInformer, - clusterFedObjecInformer fedcorev1a1informers.ClusterFederatedObjectInformer, clusterFedObjectInformer fedcorev1a1informers.ClusterFederatedObjectInformer, informerManager informermanager.InformerManager, metrics stats.Metrics, @@ -165,7 +164,7 @@ func NewFederateController( return nil, err } - if _, err := clusterFedObjecInformer.Informer().AddEventHandler( + if _, err := clusterFedObjectInformer.Informer().AddEventHandler( eventhandlers.NewTriggerOnAllChanges(func(o runtime.Object) { fedObj := o.(*fedcorev1a1.GenericFederatedObject) logger := c.logger.WithValues("cluster-federated-object", common.NewQualifiedName(fedObj)) diff --git a/pkg/util/cluster/client.go b/pkg/util/cluster/client.go new file mode 100644 index 00000000..404e2baa --- /dev/null +++ b/pkg/util/cluster/client.go @@ -0,0 +1,164 @@ +/* +Copyright 2023 The KubeAdmiral Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cluster + +import ( + "context" + "fmt" + + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/rest" + "k8s.io/client-go/tools/clientcmd" + + fedcorev1a1 "github.com/kubewharf/kubeadmiral/pkg/apis/core/v1alpha1" +) + +// User account keys +const ( + ClientCertificateKey = "client-certificate-data" + ClientKeyKey = "client-key-data" + CertificateAuthorityKey = "certificate-authority-data" +) + +// Service account keys +const ( + ServiceAccountTokenKey = "service-account-token-data" + ServiceAccountCAKey = "service-account-ca-data" +) + +func BuildClusterConfig( + cluster *fedcorev1a1.FederatedCluster, + fedClient kubernetes.Interface, + restConfig *rest.Config, + fedSystemNamespace string, +) (*rest.Config, error) { + return buildClusterConfig( + cluster, + fedClient, + restConfig, + fedSystemNamespace, + cluster.Spec.UseServiceAccountToken, + ) +} + +// BuildRawClusterConfig returns a restclient.Config built using key and certificate +// credentials from the secret referenced in the FederatedCluster. +func BuildRawClusterConfig( + cluster *fedcorev1a1.FederatedCluster, + fedClient kubernetes.Interface, + restConfig *rest.Config, + fedSystemNamespace string, +) (*rest.Config, error) { + return buildClusterConfig( + cluster, + fedClient, + restConfig, + fedSystemNamespace, + false, + ) +} + +func buildClusterConfig( + cluster *fedcorev1a1.FederatedCluster, + fedClient kubernetes.Interface, + restConfig *rest.Config, + fedSystemNamespace string, + useServiceAccountToken bool, +) (*rest.Config, error) { + apiEndpoint := cluster.Spec.APIEndpoint + if len(apiEndpoint) == 0 { + return nil, fmt.Errorf("api endpoint of cluster %s is empty", cluster.Name) + } + + clusterConfig, err := clientcmd.BuildConfigFromFlags(apiEndpoint, "") + if err != nil { + return nil, err + } + + clusterConfig.QPS = restConfig.QPS + clusterConfig.Burst = restConfig.Burst + clusterConfig.UserAgent = restConfig.UserAgent + + secretName := cluster.Spec.SecretRef.Name + if len(secretName) == 0 { + clusterConfig.CAFile = restConfig.CAFile + clusterConfig.CertFile = restConfig.CertFile + clusterConfig.KeyFile = restConfig.KeyFile + return clusterConfig, nil + } + + secret, err := fedClient.CoreV1().Secrets(fedSystemNamespace).Get(context.TODO(), secretName, metav1.GetOptions{}) + if err != nil { + return nil, err + } + + err = PopulateAuthDetailsFromSecret(clusterConfig, cluster.Spec.Insecure, secret, useServiceAccountToken) + if err != nil { + return nil, fmt.Errorf("cannot build rest config from cluster secret: %w", err) + } + return clusterConfig, nil +} + +func PopulateAuthDetailsFromSecret( + clusterConfig *rest.Config, + insecure bool, + secret *corev1.Secret, + useServiceAccount bool, +) error { + var exists bool + + if useServiceAccount { + serviceAccountToken, exists := secret.Data[ServiceAccountTokenKey] + if !exists { + return fmt.Errorf("%q data is missing from secret", ServiceAccountTokenKey) + } + clusterConfig.BearerToken = string(serviceAccountToken) + + if insecure { + clusterConfig.Insecure = true + } else { + clusterConfig.CAData, exists = secret.Data[ServiceAccountCAKey] + if !exists { + return fmt.Errorf("%q data is missing from secret and insecure is false", ServiceAccountCAKey) + } + } + } else { + clusterConfig.CertData, exists = secret.Data[ClientCertificateKey] + if !exists { + return fmt.Errorf("%q data is missing from secret", ClientCertificateKey) + } + + clusterConfig.KeyData, exists = secret.Data[ClientKeyKey] + if !exists { + return fmt.Errorf("%q data is missing from secret", ClientKeyKey) + } + + if insecure { + clusterConfig.Insecure = true + } else { + clusterConfig.CAData, exists = secret.Data[CertificateAuthorityKey] + if !exists { + return fmt.Errorf("%q data is missing from secret", CertificateAuthorityKey) + } + } + } + + return nil +} + diff --git a/pkg/util/cluster/util.go b/pkg/util/cluster/util.go index 00734405..d7054feb 100644 --- a/pkg/util/cluster/util.go +++ b/pkg/util/cluster/util.go @@ -22,7 +22,6 @@ import ( fedcorev1a1 "github.com/kubewharf/kubeadmiral/pkg/apis/core/v1alpha1" ) - func IsClusterReady(clusterStatus *fedcorev1a1.FederatedClusterStatus) bool { for _, condition := range clusterStatus.Conditions { if condition.Type == fedcorev1a1.ClusterReady { From bc5b6b56825ad34b18bda16c41305b602eccc6b3 Mon Sep 17 00:00:00 2001 From: "hawjia.lim" Date: Tue, 18 Jul 2023 16:04:06 +0800 Subject: [PATCH 056/173] fix sample configs and update federatedobject crd --- ...ubeadmiral.io_clusterfederatedobjects.yaml | 3 - .../core.kubeadmiral.io_federatedobjects.yaml | 3 - config/sample/host/01-ftc.yaml | 360 ++---------------- .../core/v1alpha1/types_federatedobject.go | 9 +- 4 files changed, 29 insertions(+), 346 deletions(-) diff --git a/config/crds/core.kubeadmiral.io_clusterfederatedobjects.yaml b/config/crds/core.kubeadmiral.io_clusterfederatedobjects.yaml index c89902e0..42c354c7 100644 --- a/config/crds/core.kubeadmiral.io_clusterfederatedobjects.yaml +++ b/config/crds/core.kubeadmiral.io_clusterfederatedobjects.yaml @@ -143,9 +143,6 @@ spec: to be propagated. x-kubernetes-preserve-unknown-fields: true required: - - follows - - overrides - - placements - template type: object status: diff --git a/config/crds/core.kubeadmiral.io_federatedobjects.yaml b/config/crds/core.kubeadmiral.io_federatedobjects.yaml index 46110d16..aa8e9cef 100644 --- a/config/crds/core.kubeadmiral.io_federatedobjects.yaml +++ b/config/crds/core.kubeadmiral.io_federatedobjects.yaml @@ -143,9 +143,6 @@ spec: to be propagated. x-kubernetes-preserve-unknown-fields: true required: - - follows - - overrides - - placements - template type: object status: diff --git a/config/sample/host/01-ftc.yaml b/config/sample/host/01-ftc.yaml index 0783449b..5c6c05db 100644 --- a/config/sample/host/01-ftc.yaml +++ b/config/sample/host/01-ftc.yaml @@ -4,22 +4,11 @@ kind: FederatedTypeConfig metadata: name: namespaces spec: - federatedType: - group: types.kubeadmiral.io - kind: FederatedNamespace - pluralName: federatednamespaces - scope: Cluster - version: v1alpha1 sourceType: kind: Namespace pluralName: namespaces scope: Cluster version: v1 - targetType: - kind: Namespace - pluralName: namespaces - scope: Cluster - version: v1 controllers: - - kubeadmiral.io/nsautoprop-controller - - kubeadmiral.io/overridepolicy-controller @@ -29,32 +18,16 @@ kind: FederatedTypeConfig metadata: name: configmaps spec: - federatedType: - group: types.kubeadmiral.io - kind: FederatedConfigMap - pluralName: federatedconfigmaps - scope: Namespaced - version: v1alpha1 - targetType: - kind: ConfigMap - pluralName: configmaps - scope: Namespaced - version: v1 sourceType: kind: ConfigMap pluralName: configmaps scope: Namespaced version: v1 - statusType: - group: types.kubeadmiral.io - kind: FederatedConfigMapStatus - pluralName: federatedconfigmapstatuses - scope: Namespaced - version: v1alpha1 controllers: - - kubeadmiral.io/global-scheduler - - kubeadmiral.io/overridepolicy-controller statusCollection: + enabled: true fields: - metadata.creationTimestamp --- @@ -63,32 +36,16 @@ kind: FederatedTypeConfig metadata: name: deployments.apps spec: - federatedType: - group: types.kubeadmiral.io - kind: FederatedDeployment - pluralName: federateddeployments - scope: Namespaced - version: v1alpha1 - targetType: - group: apps - kind: Deployment - pluralName: deployments - scope: Namespaced - version: v1 sourceType: group: apps kind: Deployment pluralName: deployments scope: Namespaced version: v1 - statusType: - group: types.kubeadmiral.io - kind: FederatedDeploymentStatus - pluralName: federateddeploymentstatuses - scope: Namespaced - version: v1alpha1 - statusAggregation: Enabled - revisionHistory: Enabled + statusAggregation: + enabled: true + revisionHistory: + enabled: true autoMigration: enabled: true controllers: @@ -102,6 +59,7 @@ spec: availableReplicasStatus: status.availableReplicas readyReplicasStatus: status.readyReplicas statusCollection: + enabled: true fields: - metadata.creationTimestamp - spec.replicas @@ -112,32 +70,16 @@ kind: FederatedTypeConfig metadata: name: serviceaccounts spec: - federatedType: - group: types.kubeadmiral.io - kind: FederatedServiceAccount - pluralName: federatedserviceaccounts - scope: Namespaced - version: v1alpha1 - targetType: - kind: ServiceAccount - pluralName: serviceaccounts - scope: Namespaced - version: v1 sourceType: kind: ServiceAccount pluralName: serviceaccounts scope: Namespaced version: v1 - statusType: - group: types.kubeadmiral.io - kind: FederatedServiceAccountStatus - pluralName: federatedserviceaccountstatuses - scope: Namespaced - version: v1alpha1 controllers: - - kubeadmiral.io/global-scheduler - - kubeadmiral.io/overridepolicy-controller statusCollection: + enabled: true fields: - metadata.creationTimestamp --- @@ -146,32 +88,16 @@ kind: FederatedTypeConfig metadata: name: secrets spec: - federatedType: - group: types.kubeadmiral.io - kind: FederatedSecret - pluralName: federatedsecrets - scope: Namespaced - version: v1alpha1 - targetType: - kind: Secret - pluralName: secrets - scope: Namespaced - version: v1 sourceType: kind: Secret pluralName: secrets scope: Namespaced version: v1 - statusType: - group: types.kubeadmiral.io - kind: FederatedSecretStatus - pluralName: federatedsecretstatuses - scope: Namespaced - version: v1alpha1 controllers: - - kubeadmiral.io/global-scheduler - - kubeadmiral.io/overridepolicy-controller statusCollection: + enabled: true fields: - metadata.creationTimestamp --- @@ -180,32 +106,16 @@ kind: FederatedTypeConfig metadata: name: services spec: - federatedType: - group: types.kubeadmiral.io - kind: FederatedService - pluralName: federatedservices - scope: Namespaced - version: v1alpha1 - targetType: - kind: Service - pluralName: services - scope: Namespaced - version: v1 sourceType: kind: Service pluralName: services scope: Namespaced version: v1 - statusType: - group: types.kubeadmiral.io - kind: FederatedServiceStatus - pluralName: federatedservicestatuses - scope: Namespaced - version: v1alpha1 controllers: - - kubeadmiral.io/global-scheduler - - kubeadmiral.io/overridepolicy-controller statusCollection: + enabled: true fields: - metadata.creationTimestamp - spec.clusterIP @@ -218,34 +128,17 @@ kind: FederatedTypeConfig metadata: name: storageclasses spec: - federatedType: - group: types.kubeadmiral.io - kind: FederatedStorageClass - pluralName: federatedstorageclasses - scope: Cluster - version: v1alpha1 - targetType: - group: storage.k8s.io - kind: StorageClass - pluralName: storageclasses - scope: Cluster - version: v1 sourceType: group: storage.k8s.io kind: StorageClass pluralName: storageclasses scope: Cluster version: v1 - statusType: - group: types.kubeadmiral.io - kind: FederatedStorageClassStatus - pluralName: federatedstorageclassstatuses - scope: Cluster - version: v1alpha1 controllers: - - kubeadmiral.io/global-scheduler - - kubeadmiral.io/overridepolicy-controller statusCollection: + enabled: true fields: - metadata.creationTimestamp - provisioner @@ -256,32 +149,16 @@ kind: FederatedTypeConfig metadata: name: persistentvolumes spec: - federatedType: - group: types.kubeadmiral.io - kind: FederatedPersistentVolume - pluralName: federatedpersistentvolumes - scope: Cluster - version: v1alpha1 - targetType: - kind: PersistentVolume - pluralName: persistentvolumes - scope: Cluster - version: v1 sourceType: kind: PersistentVolume pluralName: persistentvolumes scope: Cluster version: v1 - statusType: - group: types.kubeadmiral.io - kind: FederatedPersistentVolumeStatus - pluralName: federatedpersistentvolumestatuses - scope: Cluster - version: v1alpha1 controllers: - - kubeadmiral.io/global-scheduler - - kubeadmiral.io/overridepolicy-controller statusCollection: + enabled: true fields: - metadata.creationTimestamp - spec.accessModes @@ -295,32 +172,16 @@ kind: FederatedTypeConfig metadata: name: persistentvolumeclaims spec: - federatedType: - group: types.kubeadmiral.io - kind: FederatedPersistentVolumeClaim - pluralName: federatedpersistentvolumeclaims - scope: Namespaced - version: v1alpha1 - targetType: - kind: PersistentVolumeClaim - pluralName: persistentvolumeclaims - scope: Namespaced - version: v1 sourceType: kind: PersistentVolumeClaim pluralName: persistentvolumeclaims scope: Namespaced version: v1 - statusType: - group: types.kubeadmiral.io - kind: FederatedPersistentVolumeClaimStatus - pluralName: federatedpersistentvolumeclaimstatuses - scope: Namespaced - version: v1alpha1 controllers: - - kubeadmiral.io/global-scheduler - - kubeadmiral.io/overridepolicy-controller statusCollection: + enabled: true fields: - metadata.creationTimestamp - spec.resources.requests @@ -332,18 +193,6 @@ kind: FederatedTypeConfig metadata: name: clusterrolebindings.rbac.authorization.k8s.io spec: - federatedType: - group: types.kubeadmiral.io - kind: FederatedClusterRoleBinding - pluralName: federatedclusterrolebindings - scope: Cluster - version: v1alpha1 - targetType: - group: rbac.authorization.k8s.io - kind: ClusterRoleBinding - pluralName: clusterrolebindings - scope: Cluster - version: v1 sourceType: group: rbac.authorization.k8s.io kind: ClusterRoleBinding @@ -359,18 +208,6 @@ kind: FederatedTypeConfig metadata: name: clusterroles.rbac.authorization.k8s.io spec: - federatedType: - group: types.kubeadmiral.io - kind: FederatedClusterRole - pluralName: federatedclusterroles - scope: Cluster - version: v1alpha1 - targetType: - group: rbac.authorization.k8s.io - kind: ClusterRole - pluralName: clusterroles - scope: Cluster - version: v1 sourceType: group: rbac.authorization.k8s.io kind: ClusterRole @@ -386,18 +223,6 @@ kind: FederatedTypeConfig metadata: name: rolebindings.rbac.authorization.k8s.io spec: - federatedType: - group: types.kubeadmiral.io - kind: FederatedRoleBinding - pluralName: federatedrolebindings - scope: Namespaced - version: v1alpha1 - targetType: - group: rbac.authorization.k8s.io - kind: RoleBinding - pluralName: rolebindings - scope: Namespaced - version: v1 sourceType: group: rbac.authorization.k8s.io kind: RoleBinding @@ -413,18 +238,6 @@ kind: FederatedTypeConfig metadata: name: roles.rbac.authorization.k8s.io spec: - federatedType: - group: types.kubeadmiral.io - kind: FederatedRole - pluralName: federatedroles - scope: Namespaced - version: v1alpha1 - targetType: - group: rbac.authorization.k8s.io - kind: Role - pluralName: roles - scope: Namespaced - version: v1 sourceType: group: rbac.authorization.k8s.io kind: Role @@ -440,36 +253,20 @@ kind: FederatedTypeConfig metadata: name: statefulsets.apps spec: - federatedType: - group: types.kubeadmiral.io - kind: FederatedStatefulSet - pluralName: federatedstatefulsets - scope: Namespaced - version: v1alpha1 - targetType: - group: apps - kind: StatefulSet - pluralName: statefulsets - scope: Namespaced - version: v1 sourceType: group: apps kind: StatefulSet pluralName: statefulsets scope: Namespaced version: v1 - statusType: - group: types.kubeadmiral.io - kind: FederatedStatefulSetStatus - pluralName: federatedstatefulsetstatuses - scope: Namespaced - version: v1alpha1 - revisionHistory: Enabled + revisionHistory: + enabled: true controllers: - - kubeadmiral.io/global-scheduler - - kubeadmiral.io/overridepolicy-controller - - kubeadmiral.io/follower-controller statusCollection: + enabled: true fields: - metadata.creationTimestamp - spec.replicas @@ -480,36 +277,20 @@ kind: FederatedTypeConfig metadata: name: daemonsets.apps spec: - federatedType: - group: types.kubeadmiral.io - kind: FederatedDaemonSet - pluralName: federateddaemonsets - scope: Namespaced - version: v1alpha1 - targetType: - group: apps - kind: DaemonSet - pluralName: daemonsets - scope: Namespaced - version: v1 sourceType: group: apps kind: DaemonSet pluralName: daemonsets scope: Namespaced version: v1 - statusType: - group: types.kubeadmiral.io - kind: FederatedDaemonSetStatus - pluralName: federateddaemonsetstatuses - scope: Namespaced - version: v1alpha1 - revisionHistory: Enabled + revisionHistory: + enabled: true controllers: - - kubeadmiral.io/global-scheduler - - kubeadmiral.io/overridepolicy-controller - - kubeadmiral.io/follower-controller statusCollection: + enabled: true fields: - metadata.creationTimestamp - status @@ -519,35 +300,18 @@ kind: FederatedTypeConfig metadata: name: jobs.batch spec: - federatedType: - group: types.kubeadmiral.io - kind: FederatedJob - pluralName: federatedjobs - scope: Namespaced - version: v1alpha1 - targetType: - group: batch - kind: Job - pluralName: jobs - scope: Namespaced - version: v1 sourceType: group: batch kind: Job pluralName: jobs scope: Namespaced version: v1 - statusType: - group: types.kubeadmiral.io - kind: FederatedJobStatus - pluralName: federatedjobstatuses - scope: Namespaced - version: v1alpha1 controllers: - - kubeadmiral.io/global-scheduler - - kubeadmiral.io/overridepolicy-controller - - kubeadmiral.io/follower-controller statusCollection: + enabled: true fields: - metadata.creationTimestamp - status @@ -557,35 +321,18 @@ kind: FederatedTypeConfig metadata: name: cronjobs.batch spec: - federatedType: - group: types.kubeadmiral.io - kind: FederatedCronJob - pluralName: federatedcronjobs - scope: Namespaced - version: v1alpha1 - targetType: - group: batch - kind: CronJob - pluralName: cronjobs - scope: Namespaced - version: v1beta1 sourceType: group: batch kind: CronJob pluralName: cronjobs scope: Namespaced version: v1beta1 - statusType: - group: types.kubeadmiral.io - kind: FederatedCronJobStatus - pluralName: federatedcronjobstatuses - scope: Namespaced - version: v1alpha1 controllers: - - kubeadmiral.io/global-scheduler - - kubeadmiral.io/overridepolicy-controller - - kubeadmiral.io/follower-controller statusCollection: + enabled: true fields: - metadata.creationTimestamp - status @@ -595,34 +342,17 @@ kind: FederatedTypeConfig metadata: name: ingresses.networking.k8s.io spec: - federatedType: - group: types.kubeadmiral.io - kind: FederatedIngress - pluralName: federatedingresses - scope: Namespaced - version: v1alpha1 - targetType: - group: networking.k8s.io - kind: Ingress - pluralName: ingresses - scope: Namespaced - version: v1 sourceType: group: networking.k8s.io kind: Ingress pluralName: ingresses scope: Namespaced version: v1 - statusType: - group: types.kubeadmiral.io - kind: FederatedIngressStatus - pluralName: federatedingressstatuses - scope: Namespaced - version: v1alpha1 controllers: - - kubeadmiral.io/global-scheduler - - kubeadmiral.io/overridepolicy-controller statusCollection: + enabled: true fields: - metadata.creationTimestamp - spec.rules @@ -633,17 +363,6 @@ kind: FederatedTypeConfig metadata: name: limitranges spec: - federatedType: - group: types.kubeadmiral.io - kind: FederatedLimitRange - pluralName: federatedlimitranges - scope: Namespaced - version: v1alpha1 - targetType: - kind: LimitRange - pluralName: limitranges - scope: Namespaced - version: v1 sourceType: kind: LimitRange pluralName: limitranges @@ -658,32 +377,16 @@ kind: FederatedTypeConfig metadata: name: resourcequotas spec: - federatedType: - group: types.kubeadmiral.io - kind: FederatedResourceQuota - pluralName: federatedresourcequotas - scope: Namespaced - version: v1alpha1 - targetType: - kind: ResourceQuota - pluralName: resourcequotas - scope: Namespaced - version: v1 sourceType: kind: ResourceQuota pluralName: resourcequotas scope: Namespaced version: v1 - statusType: - group: types.kubeadmiral.io - kind: FederatedResourceQuotaStatus - pluralName: federatedresourcequotastatuses - scope: Namespaced - version: v1alpha1 controllers: - - kubeadmiral.io/global-scheduler - - kubeadmiral.io/overridepolicy-controller statusCollection: + enabled: true fields: - metadata.creationTimestamp - status @@ -693,34 +396,17 @@ kind: FederatedTypeConfig metadata: name: customresourcedefinitions.apiextensions.k8s.io spec: - federatedType: - group: types.kubeadmiral.io - kind: FederatedCustomResourceDefinition - pluralName: federatedcustomresourcedefinitions - scope: Cluster - version: v1alpha1 - targetType: - kind: CustomResourceDefinition - pluralName: customresourcedefinitions - scope: Cluster - version: v1 - group: apiextensions.k8s.io sourceType: kind: CustomResourceDefinition pluralName: customresourcedefinitions scope: Cluster version: v1 group: apiextensions.k8s.io - statusType: - group: types.kubeadmiral.io - kind: FederatedCustomResourceDefinitionStatus - pluralName: federatedcustomresourcedefinitionstatuses - scope: Cluster - version: v1alpha1 controllers: - - kubeadmiral.io/global-scheduler - - kubeadmiral.io/overridepolicy-controller statusCollection: + enabled: true fields: - metadata.creationTimestamp - status diff --git a/pkg/apis/core/v1alpha1/types_federatedobject.go b/pkg/apis/core/v1alpha1/types_federatedobject.go index d8d35c7f..353cadf7 100644 --- a/pkg/apis/core/v1alpha1/types_federatedobject.go +++ b/pkg/apis/core/v1alpha1/types_federatedobject.go @@ -96,13 +96,16 @@ type GenericFederatedObjectSpec struct { Template apiextensionsv1.JSON `json:"template"` // Overrides describe the overrides that should be applied to the base template of the Kubernetes object before it // is propagated to individual member clusters. - Overrides []OverrideWithController `json:"overrides"` + // +optional + Overrides []OverrideWithController `json:"overrides,omitempty"` // Placements describe the member clusters that the Kubernetes object will be propagated to, which is a union of all // the listed clusters. - Placements []PlacementWithController `json:"placements"` + // +optional + Placements []PlacementWithController `json:"placements,omitempty"` // Follows defines other objects, or "leaders", that the Kubernetes object should follow during propagation, i.e. // the Kubernetes object should be propagated to all member clusters that its "leaders" are placed in. - Follows []LeaderReference `json:"follows"` + // +optional + Follows []LeaderReference `json:"follows,omitempty"` } // GenericFederatedObjectStatus describes the most recently observed status of a FederatedObject or ClusterFederatedObject. From 24656a371254bf0efedab104ad584b3faff10d88 Mon Sep 17 00:00:00 2001 From: "hawjia.lim" Date: Tue, 18 Jul 2023 16:06:31 +0800 Subject: [PATCH 057/173] update federate controller --- pkg/controllers/federate/controller.go | 96 ++++++++++++-------------- pkg/controllers/federate/util.go | 69 ++++++++++-------- 2 files changed, 83 insertions(+), 82 deletions(-) diff --git a/pkg/controllers/federate/controller.go b/pkg/controllers/federate/controller.go index 7aceefa0..4049357a 100644 --- a/pkg/controllers/federate/controller.go +++ b/pkg/controllers/federate/controller.go @@ -127,7 +127,7 @@ func NewFederateController( c.worker.Enqueue(workerKey{ name: uns.GetName(), namespace: uns.GetNamespace(), - ftc: ftc, + gvk: ftc.GetSourceTypeGVK(), }) }) }, @@ -137,7 +137,7 @@ func NewFederateController( if _, err := fedObjectInformer.Informer().AddEventHandler( eventhandlers.NewTriggerOnAllChanges(func(o runtime.Object) { - fedObj := o.(*fedcorev1a1.GenericFederatedObject) + fedObj := o.(*fedcorev1a1.FederatedObject) logger := c.logger.WithValues("federated-object", common.NewQualifiedName(fedObj)) srcMeta, err := meta.GetSourceObjectMeta(fedObj) @@ -146,18 +146,12 @@ func NewFederateController( return } - gvk := srcMeta.GroupVersionKind - logger = logger.WithValues("gvk", gvk) - - ftc, exists := c.informerManager.GetResourceFTC(srcMeta.GroupVersionKind()) - if !exists { - logger.Error(nil, "Received event for FederatedObject without FederatedTypeConfig") - } + gvk := srcMeta.GroupVersionKind() c.worker.Enqueue(workerKey{ name: srcMeta.GetName(), namespace: srcMeta.GetNamespace(), - ftc: ftc, + gvk: gvk, }) }), ); err != nil { @@ -166,7 +160,7 @@ func NewFederateController( if _, err := clusterFedObjectInformer.Informer().AddEventHandler( eventhandlers.NewTriggerOnAllChanges(func(o runtime.Object) { - fedObj := o.(*fedcorev1a1.GenericFederatedObject) + fedObj := o.(*fedcorev1a1.ClusterFederatedObject) logger := c.logger.WithValues("cluster-federated-object", common.NewQualifiedName(fedObj)) srcMeta, err := meta.GetSourceObjectMeta(fedObj) @@ -175,18 +169,12 @@ func NewFederateController( return } - gvk := srcMeta.GroupVersionKind - logger = logger.WithValues("gvk", gvk) - - ftc, exists := c.informerManager.GetResourceFTC(srcMeta.GroupVersionKind()) - if !exists { - logger.Error(nil, "Received event for ClusterFederatedObject without FederatedTypeConfig") - } + gvk := srcMeta.GroupVersionKind() c.worker.Enqueue(workerKey{ name: srcMeta.GetName(), namespace: srcMeta.GetNamespace(), - ftc: ftc, + gvk: gvk, }) }), ); err != nil { @@ -226,7 +214,19 @@ func (c *FederateController) reconcile(ctx context.Context, key workerKey) (stat logger.WithValues("duration", time.Since(startTime), "status", status.String()).V(3).Info("Finished reconcile") }() - sourceGVR := key.ftc.GetSourceTypeGVR() + sourceGVK := key.gvk + ctx, logger = logging.InjectLoggerValues(ctx, "gvk", sourceGVK) + + ftc, exists := c.informerManager.GetResourceFTC(key.gvk) + if !exists { + logger.Error(nil, "FTC does not exist for GVK") + return worker.StatusError + } + ctx, logger = logging.InjectLoggerValues(ctx, "ftc", ftc.Name) + + sourceGVR := ftc.GetSourceTypeGVR() + ctx, logger = logging.InjectLoggerValues(ctx, "gvr", sourceGVR) + sourceObject, err := c.sourceObjectFromStore(key) if err != nil && apierrors.IsNotFound(err) { logger.V(3).Info(fmt.Sprintf("No source object for found, skip federating")) @@ -238,7 +238,15 @@ func (c *FederateController) reconcile(ctx context.Context, key workerKey) (stat } sourceObject = sourceObject.DeepCopy() - fedObject, err := c.federatedObjectFromStore(key) + fedObjectName := naming.GenerateFederatedObjectName(sourceObject.GetName(), ftc.Name) + ctx, logger = logging.InjectLoggerValues(ctx, "federated-object", fedObjectName) + + fedObject, err := fedobjectadapters.GetFromLister( + c.fedObjectInformer.Lister(), + c.clusterFedObjectInformer.Lister(), + sourceObject.GetNamespace(), + fedObjectName, + ) if err != nil && !apierrors.IsNotFound(err) { logger.Error(err, "Failed to get federated object from store") return worker.StatusError @@ -246,7 +254,7 @@ func (c *FederateController) reconcile(ctx context.Context, key workerKey) (stat if apierrors.IsNotFound(err) { fedObject = nil } else { - fedObject = fedObject.DeepCopy() + fedObject = fedObject.DeepCopyGenericFederatedObject() } if sourceObject.GetDeletionTimestamp() != nil { @@ -278,7 +286,7 @@ func (c *FederateController) reconcile(ctx context.Context, key workerKey) (stat if fedObject == nil { logger.V(3).Info("No federated object found") - if err := c.handleCreateFederatedObject(ctx, key.ftc, sourceObject); err != nil { + if err := c.handleCreateFederatedObject(ctx, ftc, sourceObject); err != nil { logger.Error(err, "Failed to create federated object") c.eventRecorder.Eventf( sourceObject, @@ -305,7 +313,7 @@ func (c *FederateController) reconcile(ctx context.Context, key workerKey) (stat } logger.V(3).Info("Federated object already exists") - updated, err := c.handleExistingFederatedObject(ctx, key.ftc, sourceObject, fedObject) + updated, err := c.handleExistingFederatedObject(ctx, ftc, sourceObject, fedObject) if err != nil { logger.Error(err, "Failed to reconcile existing federated object") if apierrors.IsConflict(err) { @@ -316,7 +324,7 @@ func (c *FederateController) reconcile(ctx context.Context, key workerKey) (stat corev1.EventTypeWarning, EventReasonUpdateFederatedObject, "Failed to reconcile existing federated object %s: %v", - fedObject.Name, + fedObject.GetName(), err, ) return worker.StatusError @@ -327,7 +335,7 @@ func (c *FederateController) reconcile(ctx context.Context, key workerKey) (stat corev1.EventTypeNormal, EventReasonUpdateFederatedObject, "Federated object updated: %s", - fedObject.Name, + fedObject.GetName(), ) } else { logger.V(3).Info("No updates required to the federated object") @@ -337,39 +345,26 @@ func (c *FederateController) reconcile(ctx context.Context, key workerKey) (stat } func (c *FederateController) sourceObjectFromStore(key workerKey) (*unstructured.Unstructured, error) { - gvk := key.ftc.GetSourceTypeGVK() - - lister, hasSynced, exists := c.informerManager.GetResourceLister(gvk) + lister, hasSynced, exists := c.informerManager.GetResourceLister(key.gvk) if !exists { - return nil, fmt.Errorf("lister for %s does not exist", gvk) + return nil, fmt.Errorf("lister for %s does not exist", key.gvk) } if !hasSynced() { - return nil, fmt.Errorf("lister for %s not synced", gvk) + return nil, fmt.Errorf("lister for %s not synced", key.gvk) } var obj runtime.Object var err error - if key.ftc.GetNamespaced() { - obj, err = lister.ByNamespace(key.namespace).Get(key.name) - } else { + if key.namespace == "" { obj, err = lister.Get(key.name) + } else { + obj, err = lister.ByNamespace(key.namespace).Get(key.name) } return obj.(*unstructured.Unstructured), err } -func (c *FederateController) federatedObjectFromStore(key workerKey) (*fedcorev1a1.GenericFederatedObject, error) { - fedName := naming.GenerateFederatedObjectName(key.name, key.ftc.Name) - - return fedobjectadapters.GetFromLister( - c.fedObjectInformer.Lister(), - c.clusterFedObjectInformer.Lister(), - key.namespace, - fedName, - ) -} - func (c *FederateController) ensureFinalizer( ctx context.Context, sourceGVR schema.GroupVersionResource, @@ -432,7 +427,7 @@ func (c *FederateController) handleTerminatingSourceObject( ctx context.Context, sourceGVR schema.GroupVersionResource, sourceObject *unstructured.Unstructured, - fedObject *fedcorev1a1.GenericFederatedObject, + fedObject fedcorev1a1.GenericFederatedObject, ) error { logger := klog.FromContext(ctx) @@ -450,9 +445,8 @@ func (c *FederateController) handleTerminatingSourceObject( if err := fedobjectadapters.Delete( ctx, c.fedClient.CoreV1alpha1(), - c.fedClient.CoreV1alpha1(), - fedObject.Namespace, - fedObject.Name, + fedObject.GetNamespace(), + fedObject.GetName(), metav1.DeleteOptions{}, ); err != nil { return fmt.Errorf("failed to delete federated object: %w", err) @@ -484,7 +478,6 @@ func (c *FederateController) handleCreateFederatedObject( if _, err := fedobjectadapters.Create( ctx, c.fedClient.CoreV1alpha1(), - c.fedClient.CoreV1alpha1(), fedObject, metav1.CreateOptions{}, ); err != nil { @@ -498,7 +491,7 @@ func (c *FederateController) handleExistingFederatedObject( ctx context.Context, ftc *fedcorev1a1.FederatedTypeConfig, sourceObject *unstructured.Unstructured, - fedObject *fedcorev1a1.GenericFederatedObject, + fedObject fedcorev1a1.GenericFederatedObject, ) (bool, error) { logger := klog.FromContext(ctx) @@ -516,7 +509,6 @@ func (c *FederateController) handleExistingFederatedObject( if _, err = fedobjectadapters.Update( ctx, c.fedClient.CoreV1alpha1(), - c.fedClient.CoreV1alpha1(), fedObject, metav1.UpdateOptions{}, ); err != nil { diff --git a/pkg/controllers/federate/util.go b/pkg/controllers/federate/util.go index b7436f7b..04e1db9f 100644 --- a/pkg/controllers/federate/util.go +++ b/pkg/controllers/federate/util.go @@ -27,24 +27,25 @@ import ( "k8s.io/apimachinery/pkg/api/equality" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/util/sets" fedcorev1a1 "github.com/kubewharf/kubeadmiral/pkg/apis/core/v1alpha1" "github.com/kubewharf/kubeadmiral/pkg/controllers/common" - "github.com/kubewharf/kubeadmiral/pkg/controllers/nsautoprop" - "github.com/kubewharf/kubeadmiral/pkg/controllers/override" - "github.com/kubewharf/kubeadmiral/pkg/controllers/scheduler" + + // "github.com/kubewharf/kubeadmiral/pkg/controllers/nsautoprop" + // "github.com/kubewharf/kubeadmiral/pkg/controllers/override" + // "github.com/kubewharf/kubeadmiral/pkg/controllers/scheduler" "github.com/kubewharf/kubeadmiral/pkg/controllers/util" annotationutil "github.com/kubewharf/kubeadmiral/pkg/util/annotation" - "github.com/kubewharf/kubeadmiral/pkg/util/pendingcontrollers" - "github.com/kubewharf/kubeadmiral/pkg/controllers/util/sourcefeedback" "github.com/kubewharf/kubeadmiral/pkg/util/naming" + "github.com/kubewharf/kubeadmiral/pkg/util/pendingcontrollers" ) type workerKey struct { name string namespace string - ftc *fedcorev1a1.FederatedTypeConfig + gvk schema.GroupVersionKind } func (k workerKey) String() string { @@ -71,8 +72,18 @@ func templateForSourceObject( return template } -func newFederatedObjectForSourceObject(ftc *fedcorev1a1.FederatedTypeConfig, sourceObj *unstructured.Unstructured) (*fedcorev1a1.GenericFederatedObject, error) { - fedObj := &fedcorev1a1.GenericFederatedObject{} +func newFederatedObjectForSourceObject( + ftc *fedcorev1a1.FederatedTypeConfig, + sourceObj *unstructured.Unstructured, +) (fedcorev1a1.GenericFederatedObject, error) { + var fedObj fedcorev1a1.GenericFederatedObject + + if sourceObj.GetNamespace() == "" { + fedObj = &fedcorev1a1.ClusterFederatedObject{} + } else { + fedObj = &fedcorev1a1.FederatedObject{} + } + fedName := naming.GenerateFederatedObjectName(sourceObj.GetName(), ftc.Name) fedObj.SetName(fedName) @@ -108,7 +119,9 @@ func newFederatedObjectForSourceObject(ftc *fedcorev1a1.FederatedTypeConfig, sou if err != nil { return nil, fmt.Errorf("failed to marshal template: %w", err) } - fedObj.Spec.Template.Raw = rawTemplate + fedObjSpec := &fedcorev1a1.GenericFederatedObjectSpec{} + fedObjSpec.Template.Raw = rawTemplate + fedObjSpec.DeepCopyInto(fedObj.GetSpec()) // Generate the JSON patch required to convert the source object to the FederatedObject's template and store it as // an annotation in the FederatedObject. @@ -130,7 +143,7 @@ func newFederatedObjectForSourceObject(ftc *fedcorev1a1.FederatedTypeConfig, sou func updateFederatedObjectForSourceObject( typeConfig *fedcorev1a1.FederatedTypeConfig, sourceObject *unstructured.Unstructured, - fedObject *fedcorev1a1.GenericFederatedObject, + fedObject fedcorev1a1.GenericFederatedObject, ) (bool, error) { isUpdated := false @@ -162,7 +175,7 @@ func updateFederatedObjectForSourceObject( targetTemplate := templateForSourceObject(sourceObject, templateAnnotations, templateLabels) foundTemplate := &unstructured.Unstructured{} - if err := json.Unmarshal(fedObject.Spec.Template.Raw, foundTemplate); err != nil { + if err := json.Unmarshal(fedObject.GetSpec().Template.Raw, foundTemplate); err != nil { return false, fmt.Errorf("failed to unmarshal template from federated object: %w", err) } if !reflect.DeepEqual(foundTemplate.Object, targetTemplate.Object) { @@ -171,7 +184,7 @@ func updateFederatedObjectForSourceObject( return false, fmt.Errorf("failed to marshal template: %w", err) } - fedObject.Spec.Template.Raw = rawTargetTemplate + fedObject.GetSpec().Template.Raw = rawTargetTemplate isUpdated = true } @@ -191,7 +204,6 @@ func updateFederatedObjectForSourceObject( observedAnnotationKeys := generateObservedKeys(sourceObject.GetAnnotations(), federatedAnnotations) observedLabelKeys := generateObservedKeys(sourceObject.GetLabels(), federatedLabels) - // Generate the JSON patch required to convert the source object to the FederatedObject's template and store it as // an annotation in the FederatedObject. @@ -230,18 +242,18 @@ func updateFederatedObjectForSourceObject( var ( // List of annotations that should be copied to the federated object instead of the template from the source federatedAnnotationSet = sets.New( - scheduler.SchedulingModeAnnotation, - scheduler.StickyClusterAnnotation, + // scheduler.SchedulingModeAnnotation, + // scheduler.StickyClusterAnnotation, util.ConflictResolutionAnnotation, - nsautoprop.NoAutoPropagationAnnotation, + // nsautoprop.NoAutoPropagationAnnotation, util.OrphanManagedResourcesAnnotation, - scheduler.TolerationsAnnotations, - scheduler.PlacementsAnnotations, - scheduler.ClusterSelectorAnnotations, - scheduler.AffinityAnnotations, - scheduler.MaxClustersAnnotations, + // scheduler.TolerationsAnnotations, + // scheduler.PlacementsAnnotations, + // scheduler.ClusterSelectorAnnotations, + // scheduler.AffinityAnnotations, + // scheduler.MaxClustersAnnotations, common.NoSchedulingAnnotation, - scheduler.FollowsObjectAnnotation, + // scheduler.FollowsObjectAnnotation, common.FollowersAnnotation, RetainReplicasAnnotation, ) @@ -250,19 +262,16 @@ var ( // List of annotations that should be ignored on the source object ignoredAnnotationSet = sets.New( util.LatestReplicasetDigestsAnnotation, - sourcefeedback.SchedulingAnnotation, - sourcefeedback.SyncingAnnotation, - sourcefeedback.StatusAnnotation, util.ConflictResolutionInternalAnnotation, util.OrphanManagedResourcesInternalAnnotation, common.EnableFollowerSchedulingAnnotation, ) - federatedLabelSet = sets.New( - scheduler.PropagationPolicyNameLabel, - scheduler.ClusterPropagationPolicyNameLabel, - override.OverridePolicyNameLabel, - override.ClusterOverridePolicyNameLabel, + federatedLabelSet = sets.New[string]( + // scheduler.PropagationPolicyNameLabel, + // scheduler.ClusterPropagationPolicyNameLabel, + // override.OverridePolicyNameLabel, + // override.ClusterOverridePolicyNameLabel, ) ) From dc2cba1d4b168bcd180563953e03b2c2fb84fe63 Mon Sep 17 00:00:00 2001 From: "hawjia.lim" Date: Tue, 18 Jul 2023 16:07:02 +0800 Subject: [PATCH 058/173] remove generic client --- pkg/client/generic/genericclient.go | 273 ---------------------------- 1 file changed, 273 deletions(-) delete mode 100644 pkg/client/generic/genericclient.go diff --git a/pkg/client/generic/genericclient.go b/pkg/client/generic/genericclient.go deleted file mode 100644 index f22ba80f..00000000 --- a/pkg/client/generic/genericclient.go +++ /dev/null @@ -1,273 +0,0 @@ -/* -Copyright 2019 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. - -This file may have been modified by The KubeAdmiral Authors -("KubeAdmiral Modifications"). All KubeAdmiral Modifications -are Copyright 2023 The KubeAdmiral Authors. -*/ - -package generic - -import ( - "context" - "fmt" - "strings" - - appsv1 "k8s.io/api/apps/v1" - "k8s.io/apimachinery/pkg/api/meta" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/types" - "k8s.io/client-go/rest" - "sigs.k8s.io/controller-runtime/pkg/client" - - "github.com/kubewharf/kubeadmiral/pkg/client/generic/scheme" - "github.com/kubewharf/kubeadmiral/pkg/controllers/common" - "github.com/kubewharf/kubeadmiral/pkg/controllers/util/history" -) - -type Client interface { - Create(ctx context.Context, obj client.Object) error - Get(ctx context.Context, obj client.Object, namespace, name string) error - Update(ctx context.Context, obj client.Object) error - Delete(ctx context.Context, obj client.Object, namespace, name string, opts ...client.DeleteOption) error - List(ctx context.Context, obj client.ObjectList, namespace string) error - UpdateStatus(ctx context.Context, obj client.Object) error - Patch(ctx context.Context, obj client.Object, patch client.Patch) error - Rollback(ctx context.Context, obj client.Object, toRevision int64) error - DeleteHistory(ctx context.Context, obj client.Object) error - - ListWithOptions(ctx context.Context, obj client.ObjectList, opts ...client.ListOption) error -} - -type genericClient struct { - client client.Client -} - -func New(config *rest.Config) (Client, error) { - client, err := client.New(config, client.Options{Scheme: scheme.Scheme}) - if err != nil { - return nil, err - } - return &genericClient{client}, err -} - -func NewForConfigOrDie(config *rest.Config) Client { - client, err := New(config) - if err != nil { - panic(err) - } - return client -} - -func NewForConfigOrDieWithUserAgent(config *rest.Config, userAgent string) Client { - configCopy := rest.CopyConfig(config) - rest.AddUserAgent(configCopy, userAgent) - return NewForConfigOrDie(configCopy) -} - -func (c *genericClient) Create(ctx context.Context, obj client.Object) error { - return c.client.Create(ctx, obj) -} - -func (c *genericClient) Get(ctx context.Context, obj client.Object, namespace, name string) error { - return c.client.Get(ctx, client.ObjectKey{Namespace: namespace, Name: name}, obj) -} - -func (c *genericClient) Update(ctx context.Context, obj client.Object) error { - return c.client.Update(ctx, obj) -} - -func (c *genericClient) Delete(ctx context.Context, obj client.Object, namespace, name string, opts ...client.DeleteOption) error { - accessor, err := meta.Accessor(obj) - if err != nil { - return err - } - if accessor == nil { - return fmt.Errorf("nil accessor for generic client") - } - accessor.SetNamespace(namespace) - accessor.SetName(name) - return c.client.Delete(ctx, obj, opts...) -} - -func (c *genericClient) List(ctx context.Context, obj client.ObjectList, namespace string) error { - return c.client.List(ctx, obj, client.InNamespace(namespace)) -} - -func (c *genericClient) ListWithOptions(ctx context.Context, obj client.ObjectList, opts ...client.ListOption) error { - return c.client.List(ctx, obj, opts...) -} - -func (c *genericClient) UpdateStatus(ctx context.Context, obj client.Object) error { - return c.client.Status().Update(ctx, obj) -} - -func (c *genericClient) Patch(ctx context.Context, obj client.Object, patch client.Patch) error { - return c.client.Patch(ctx, obj, patch) -} - -// Rollback rollbacks federated Object such as FederatedDeployment -func (c *genericClient) Rollback(ctx context.Context, obj client.Object, toRevision int64) error { - if toRevision < 0 { - return fmt.Errorf("unable to find specified revision %v in history", toRevision) - } - if toRevision == 0 { - // try to get last revision from annotations, fallback to list all revisions on error - if err := c.rollbackToLastRevision(ctx, obj); err == nil { - return nil - } - } - - history, err := c.controlledHistory(ctx, obj) - if err != nil { - return fmt.Errorf("failed to list history: %s", err) - } - if toRevision == 0 && len(history) <= 1 { - return fmt.Errorf("no last revision to roll back to") - } - - toHistory := findHistory(toRevision, history) - if toHistory == nil { - return fmt.Errorf("unable to find specified revision %v in history", toHistory) - } - - // Restore revision - if err := c.Patch(ctx, obj, client.RawPatch(types.JSONPatchType, toHistory.Data.Raw)); err != nil { - return fmt.Errorf("failed restoring revision %d: %v", toRevision, err) - } - return nil -} - -func (c *genericClient) rollbackToLastRevision(ctx context.Context, obj client.Object) error { - accessor, err := meta.Accessor(obj) - if err != nil { - return err - } - lastRevisionNameWithHash := accessor.GetAnnotations()[common.LastRevisionAnnotation] - if len(lastRevisionNameWithHash) == 0 { - return fmt.Errorf("annotation: %s not found", common.LastRevisionAnnotation) - } - - lastRevisionName, err := c.checkLastRevisionNameWithHash(lastRevisionNameWithHash, obj) - if err != nil { - return fmt.Errorf("failed to check last revision name, err: %v", err) - } - - latestRevision := &appsv1.ControllerRevision{} - if err := c.Get(ctx, latestRevision, accessor.GetNamespace(), lastRevisionName); err != nil { - return err - } - - // restore latest revision - if err := c.Patch(ctx, obj, client.RawPatch(types.JSONPatchType, latestRevision.Data.Raw)); err != nil { - return fmt.Errorf("failed restoring latest revision: %v", err) - } - return nil -} - -func (c *genericClient) checkLastRevisionNameWithHash(lastRevisionNameWithHash string, obj client.Object) (string, error) { - parts := strings.Split(lastRevisionNameWithHash, "|") - if len(parts) != 2 { - return "", fmt.Errorf("invalid lastRevisionNameWithHash: %s", lastRevisionNameWithHash) - } - lastRevisionName, hash := parts[0], parts[1] - - utdObj, err := runtime.DefaultUnstructuredConverter.ToUnstructured(obj) - if err != nil { - return "", err - } - - template, ok, err := unstructured.NestedMap(utdObj, "spec", "template", "spec", "template") - if err != nil { - return "", err - } - if !ok { - return "", fmt.Errorf("spec.template.spec.template is not found, fedResource: %+v", obj) - } - - if templateHash := history.HashObject(template); templateHash != hash { - return "", fmt.Errorf("pod template hash: %s, last revision name suffix: %s, they should be equal", templateHash, hash) - } - return lastRevisionName, nil -} - -// controlledHistories returns all ControllerRevisions in namespace that selected by selector and owned by accessor -func (c *genericClient) controlledHistory(ctx context.Context, obj client.Object) ([]*appsv1.ControllerRevision, error) { - accessor, err := meta.Accessor(obj) - if err != nil { - return nil, fmt.Errorf("failed to create accessor for kind %v: %s", obj.GetObjectKind(), err.Error()) - } - selector := labels.SelectorFromSet(labels.Set{ - "uid": string(accessor.GetUID()), - }) - - opt1 := client.InNamespace(accessor.GetNamespace()) - opt2 := client.MatchingLabelsSelector{Selector: selector} - historyList := &appsv1.ControllerRevisionList{} - if err := c.ListWithOptions(ctx, historyList, opt1, opt2); err != nil { - return nil, err - } - - var result []*appsv1.ControllerRevision - for i := range historyList.Items { - history := historyList.Items[i] - // Only add history that belongs to the API object - if metav1.IsControlledBy(&history, accessor) { - result = append(result, &history) - } - } - return result, nil -} - -func (c *genericClient) DeleteHistory(ctx context.Context, obj client.Object) error { - historyList, err := c.controlledHistory(ctx, obj) - if err != nil { - return err - } - for _, history := range historyList { - if err := c.Delete(ctx, history, history.Namespace, history.Name); err != nil { - return err - } - } - return nil -} - -// findHistory returns a controllerrevision of a specific revision from the given controllerrevisions. -// It returns nil if no such controllerrevision exists. -// If toRevision is 0, the last previously used history is returned. -func findHistory(toRevision int64, allHistory []*appsv1.ControllerRevision) *appsv1.ControllerRevision { - if toRevision == 0 && len(allHistory) <= 1 { - return nil - } - - // Find the history to rollback to - var toHistory *appsv1.ControllerRevision - if toRevision == 0 { - // If toRevision == 0, find the latest revision (2nd max) - history.SortControllerRevisions(allHistory) - toHistory = allHistory[len(allHistory)-2] - } else { - for _, h := range allHistory { - if h.Revision == toRevision { - // If toRevision != 0, find the history with matching revision - return h - } - } - } - return toHistory -} From 5f4b53bc20a8be2dbe823165844566f7d4d92beb Mon Sep 17 00:00:00 2001 From: "hawjia.lim" Date: Tue, 18 Jul 2023 16:08:07 +0800 Subject: [PATCH 059/173] fix naming mapping --- pkg/util/naming/naming.go | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/pkg/util/naming/naming.go b/pkg/util/naming/naming.go index 88667a09..6f32adc5 100644 --- a/pkg/util/naming/naming.go +++ b/pkg/util/naming/naming.go @@ -99,7 +99,16 @@ func transformObjectName(objectName string) (string, bool) { } } - return string(transformedName), transformed + // squash any sequence of more than one '.' + santizedName := []byte{} + for i, ch := range transformedName { + if i != 0 && transformedName[i-1] == '.' && transformedName[i] == '.' { + continue + } + santizedName = append(santizedName, ch) + } + + return string(santizedName), transformed } func fnvHashFunc(key string) uint32 { From b8add41315563da2c3a5a8921d51a5452e49fd4f Mon Sep 17 00:00:00 2001 From: "hawjia.lim" Date: Tue, 18 Jul 2023 16:08:35 +0800 Subject: [PATCH 060/173] fix meta util package --- pkg/util/meta/federatedobject.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pkg/util/meta/federatedobject.go b/pkg/util/meta/federatedobject.go index 3d31fffd..e4d76042 100644 --- a/pkg/util/meta/federatedobject.go +++ b/pkg/util/meta/federatedobject.go @@ -29,9 +29,9 @@ import ( fedcorev1a1 "github.com/kubewharf/kubeadmiral/pkg/apis/core/v1alpha1" ) -func GetSourceObjectMeta(fedObject *fedcorev1a1.GenericFederatedObject) (*metav1.PartialObjectMetadata, error) { +func GetSourceObjectMeta(fedObject fedcorev1a1.GenericFederatedObject) (*metav1.PartialObjectMetadata, error) { partialObjectMeta := &metav1.PartialObjectMetadata{} - if err := json.Unmarshal(fedObject.Spec.Template.Raw, partialObjectMeta); err != nil { + if err := json.Unmarshal(fedObject.GetSpec().Template.Raw, partialObjectMeta); err != nil { return nil, fmt.Errorf("failed to unmarshal FederatedObject's template: %w", err) } return partialObjectMeta, nil From 352864763a25d08ca050d8c81b7123fad9be6a80 Mon Sep 17 00:00:00 2001 From: "hawjia.lim" Date: Tue, 18 Jul 2023 16:22:02 +0800 Subject: [PATCH 061/173] revert commented code --- .../app/controllermanager.go | 6 +- cmd/controller-manager/app/core.go | 218 ++++++++---------- cmd/controller-manager/app/util.go | 7 +- pkg/controllers/federate/util.go | 32 +-- 4 files changed, 121 insertions(+), 142 deletions(-) diff --git a/cmd/controller-manager/app/controllermanager.go b/cmd/controller-manager/app/controllermanager.go index 9ebec699..c92ac4ec 100644 --- a/cmd/controller-manager/app/controllermanager.go +++ b/cmd/controller-manager/app/controllermanager.go @@ -42,10 +42,10 @@ const ( ) var knownControllers = map[string]controllermanager.StartControllerFunc{ - // FederatedClusterControllerName: startFederatedClusterController, + FederatedClusterControllerName: startFederatedClusterController, FederateControllerName: startFederateController, - // MonitorControllerName: startMonitorController, - // FollowerControllerName: startFollowerController, + MonitorControllerName: startMonitorController, + FollowerControllerName: startFollowerController, } var controllersDisabledByDefault = sets.New(MonitorControllerName) diff --git a/cmd/controller-manager/app/core.go b/cmd/controller-manager/app/core.go index d3630832..a23f901d 100644 --- a/cmd/controller-manager/app/core.go +++ b/cmd/controller-manager/app/core.go @@ -5,7 +5,7 @@ Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, @@ -20,43 +20,43 @@ import ( "context" "fmt" - // "k8s.io/klog/v2" + "k8s.io/klog/v2" fedcorev1a1 "github.com/kubewharf/kubeadmiral/pkg/apis/core/v1alpha1" - // "github.com/kubewharf/kubeadmiral/pkg/client/generic" + "github.com/kubewharf/kubeadmiral/pkg/client/generic" "github.com/kubewharf/kubeadmiral/pkg/controllermanager" - // "github.com/kubewharf/kubeadmiral/pkg/controllers/automigration" + "github.com/kubewharf/kubeadmiral/pkg/controllers/automigration" controllercontext "github.com/kubewharf/kubeadmiral/pkg/controllers/context" "github.com/kubewharf/kubeadmiral/pkg/controllers/federate" - // "github.com/kubewharf/kubeadmiral/pkg/controllers/federatedcluster" - // "github.com/kubewharf/kubeadmiral/pkg/controllers/federatedtypeconfig" + "github.com/kubewharf/kubeadmiral/pkg/controllers/federatedcluster" + "github.com/kubewharf/kubeadmiral/pkg/controllers/federatedtypeconfig" "github.com/kubewharf/kubeadmiral/pkg/controllers/follower" - // "github.com/kubewharf/kubeadmiral/pkg/controllers/monitor" + "github.com/kubewharf/kubeadmiral/pkg/controllers/monitor" "github.com/kubewharf/kubeadmiral/pkg/controllers/scheduler" "github.com/kubewharf/kubeadmiral/pkg/controllers/util" - // schemautil "github.com/kubewharf/kubeadmiral/pkg/controllers/util/schema" + schemautil "github.com/kubewharf/kubeadmiral/pkg/controllers/util/schema" ) -// func startFederatedClusterController(ctx context.Context, controllerCtx *controllercontext.Context) (controllermanager.Controller, error) { -// clusterController, err := federatedcluster.NewFederatedClusterController( -// controllerCtx.FedClientset, -// controllerCtx.KubeClientset, -// controllerCtx.FedInformerFactory.Core().V1alpha1().FederatedClusters(), -// controllerCtx.FederatedClientFactory, -// controllerCtx.Metrics, -// controllerCtx.FedSystemNamespace, -// controllerCtx.RestConfig, -// controllerCtx.WorkerCount, -// controllerCtx.ComponentConfig.ClusterJoinTimeout, -// ) -// if err != nil { -// return nil, fmt.Errorf("error creating federated cluster controller: %w", err) -// } - -// go clusterController.Run(ctx) - -// return clusterController, nil -// } +func startFederatedClusterController(ctx context.Context, controllerCtx *controllercontext.Context) (controllermanager.Controller, error) { + clusterController, err := federatedcluster.NewFederatedClusterController( + controllerCtx.FedClientset, + controllerCtx.KubeClientset, + controllerCtx.FedInformerFactory.Core().V1alpha1().FederatedClusters(), + controllerCtx.FederatedClientFactory, + controllerCtx.Metrics, + controllerCtx.FedSystemNamespace, + controllerCtx.RestConfig, + controllerCtx.WorkerCount, + controllerCtx.ComponentConfig.ClusterJoinTimeout, + ) + if err != nil { + return nil, fmt.Errorf("error creating federated cluster controller: %w", err) + } + + go clusterController.Run(ctx) + + return clusterController, nil +} func startFederateController(ctx context.Context, controllerCtx *controllercontext.Context) (controllermanager.Controller, error) { federateController, err := federate.NewFederateController( @@ -79,22 +79,22 @@ func startFederateController(ctx context.Context, controllerCtx *controllerconte return federateController, nil } -//func startMonitorController(ctx context.Context, controllerCtx *controllercontext.Context) (controllermanager.Controller, error) { -// controllerConfig := controllerConfigFromControllerContext(controllerCtx) -// //nolint:contextcheck -// monitorController, err := monitor.NewMonitorController(controllerConfig) -// if err != nil { -// return nil, fmt.Errorf("error creating monitor controller: %w", err) -// } +func startMonitorController(ctx context.Context, controllerCtx *controllercontext.Context) (controllermanager.Controller, error) { + controllerConfig := controllerConfigFromControllerContext(controllerCtx) + //nolint:contextcheck + monitorController, err := monitor.NewMonitorController(controllerConfig) + if err != nil { + return nil, fmt.Errorf("error creating monitor controller: %w", err) + } -// if err = monitorController.Run(ctx.Done()); err != nil { -// return nil, err -// } + if err = monitorController.Run(ctx.Done()); err != nil { + return nil, err + } -// return monitorController, nil -//} + return monitorController, nil +} -//nolint:contextcheck +// nolint:contextcheck func startFollowerController(ctx context.Context, controllerCtx *controllercontext.Context) (controllermanager.Controller, error) { controller, err := follower.NewFollowerController( controllerCtx.KubeClientset, @@ -113,55 +113,37 @@ func startFollowerController(ctx context.Context, controllerCtx *controllerconte return controller, nil } -// TODO: remove this function once all controllers are fully refactored -func controllerConfigFromControllerContext(controllerCtx *controllercontext.Context) *util.ControllerConfig { - return &util.ControllerConfig{ - FederationNamespaces: util.FederationNamespaces{ - FedSystemNamespace: controllerCtx.FedSystemNamespace, - TargetNamespace: controllerCtx.TargetNamespace, - }, - KubeConfig: controllerCtx.RestConfig, - ClusterAvailableDelay: controllerCtx.ClusterAvailableDelay, - ClusterUnavailableDelay: controllerCtx.ClusterUnavailableDelay, - SkipAdoptingResources: true, - WorkerCount: controllerCtx.WorkerCount, - NamespaceAutoPropagationExcludeRegexp: controllerCtx.ComponentConfig.NSAutoPropExcludeRegexp, - CreateCrdForFtcs: controllerCtx.ComponentConfig.FederatedTypeConfigCreateCRDsForFTCs, - Metrics: controllerCtx.Metrics, +func startGlobalScheduler( + ctx context.Context, + controllerCtx *controllercontext.Context, + typeConfig *fedcorev1a1.FederatedTypeConfig, +) (controllermanager.Controller, error) { + federatedAPIResource := typeConfig.GetFederatedType() + federatedGVR := schemautil.APIResourceToGVR(&federatedAPIResource) + + scheduler, err := scheduler.NewScheduler( + klog.FromContext(ctx), + typeConfig, + controllerCtx.KubeClientset, + controllerCtx.FedClientset, + controllerCtx.DynamicClientset, + controllerCtx.DynamicInformerFactory.ForResource(federatedGVR), + controllerCtx.FedInformerFactory.Core().V1alpha1().PropagationPolicies(), + controllerCtx.FedInformerFactory.Core().V1alpha1().ClusterPropagationPolicies(), + controllerCtx.FedInformerFactory.Core().V1alpha1().FederatedClusters(), + controllerCtx.FedInformerFactory.Core().V1alpha1().SchedulingProfiles(), + controllerCtx.FedInformerFactory.Core().V1alpha1().SchedulerPluginWebhookConfigurations(), + controllerCtx.Metrics, + controllerCtx.WorkerCount, + ) + if err != nil { + return nil, fmt.Errorf("error creating global scheduler: %w", err) } -} -// func startGlobalScheduler( -// ctx context.Context, -// controllerCtx *controllercontext.Context, -// typeConfig *fedcorev1a1.FederatedTypeConfig, -// ) (controllermanager.Controller, error) { -// federatedAPIResource := typeConfig.GetFederatedType() -// federatedGVR := schemautil.APIResourceToGVR(&federatedAPIResource) - -// scheduler, err := scheduler.NewScheduler( -// klog.FromContext(ctx), -// typeConfig, -// controllerCtx.KubeClientset, -// controllerCtx.FedClientset, -// controllerCtx.DynamicClientset, -// controllerCtx.DynamicInformerFactory.ForResource(federatedGVR), -// controllerCtx.FedInformerFactory.Core().V1alpha1().PropagationPolicies(), -// controllerCtx.FedInformerFactory.Core().V1alpha1().ClusterPropagationPolicies(), -// controllerCtx.FedInformerFactory.Core().V1alpha1().FederatedClusters(), -// controllerCtx.FedInformerFactory.Core().V1alpha1().SchedulingProfiles(), -// controllerCtx.FedInformerFactory.Core().V1alpha1().SchedulerPluginWebhookConfigurations(), -// controllerCtx.Metrics, -// controllerCtx.WorkerCount, -// ) -// if err != nil { -// return nil, fmt.Errorf("error creating global scheduler: %w", err) -// } - -// go scheduler.Run(ctx) - -// return scheduler, nil -// } + go scheduler.Run(ctx) + + return scheduler, nil +} func isGlobalSchedulerEnabled(typeConfig *fedcorev1a1.FederatedTypeConfig) bool { for _, controllerGroup := range typeConfig.GetControllers() { @@ -174,36 +156,36 @@ func isGlobalSchedulerEnabled(typeConfig *fedcorev1a1.FederatedTypeConfig) bool return false } -//func startAutoMigrationController( -// ctx context.Context, -// controllerCtx *controllercontext.Context, -// typeConfig *fedcorev1a1.FederatedTypeConfig, -//) (controllermanager.Controller, error) { -// genericClient, err := generic.New(controllerCtx.RestConfig) -// if err != nil { -// return nil, fmt.Errorf("error creating generic client: %w", err) -// } - -// federatedAPIResource := typeConfig.GetFederatedType() -// federatedGVR := schemautil.APIResourceToGVR(&federatedAPIResource) - -// //nolint:contextcheck -// controller, err := automigration.NewAutoMigrationController( -// controllerConfigFromControllerContext(controllerCtx), -// typeConfig, -// genericClient, -// controllerCtx.KubeClientset, -// controllerCtx.DynamicClientset.Resource(federatedGVR), -// controllerCtx.DynamicInformerFactory.ForResource(federatedGVR), -// ) -// if err != nil { -// return nil, fmt.Errorf("error creating auto-migration controller: %w", err) -// } - -// go controller.Run(ctx) - -// return controller, nil -//} +func startAutoMigrationController( + ctx context.Context, + controllerCtx *controllercontext.Context, + typeConfig *fedcorev1a1.FederatedTypeConfig, +) (controllermanager.Controller, error) { + genericClient, err := generic.New(controllerCtx.RestConfig) + if err != nil { + return nil, fmt.Errorf("error creating generic client: %w", err) + } + + federatedAPIResource := typeConfig.GetFederatedType() + federatedGVR := schemautil.APIResourceToGVR(&federatedAPIResource) + + //nolint:contextcheck + controller, err := automigration.NewAutoMigrationController( + controllerConfigFromControllerContext(controllerCtx), + typeConfig, + genericClient, + controllerCtx.KubeClientset, + controllerCtx.DynamicClientset.Resource(federatedGVR), + controllerCtx.DynamicInformerFactory.ForResource(federatedGVR), + ) + if err != nil { + return nil, fmt.Errorf("error creating auto-migration controller: %w", err) + } + + go controller.Run(ctx) + + return controller, nil +} func isAutoMigrationControllerEnabled(typeConfig *fedcorev1a1.FederatedTypeConfig) bool { return typeConfig.Spec.AutoMigration != nil && typeConfig.Spec.AutoMigration.Enabled diff --git a/cmd/controller-manager/app/util.go b/cmd/controller-manager/app/util.go index 3ea35de3..6ad431d0 100644 --- a/cmd/controller-manager/app/util.go +++ b/cmd/controller-manager/app/util.go @@ -35,7 +35,6 @@ import ( fedinformers "github.com/kubewharf/kubeadmiral/pkg/client/informers/externalversions" "github.com/kubewharf/kubeadmiral/pkg/controllers/common" controllercontext "github.com/kubewharf/kubeadmiral/pkg/controllers/context" - "github.com/kubewharf/kubeadmiral/pkg/controllers/util" clusterutil "github.com/kubewharf/kubeadmiral/pkg/util/cluster" "github.com/kubewharf/kubeadmiral/pkg/stats" "github.com/kubewharf/kubeadmiral/pkg/util/informermanager" @@ -44,9 +43,7 @@ import ( // KnownControllers returns all well known controller names func KnownControllers() []string { controllers := sets.StringKeySet(knownControllers) - ftcSubControllers := sets.StringKeySet(knownFTCSubControllers) - ret := controllers.Union(ftcSubControllers) - return ret.List() + return controllers.List() } // ControllersDisabledByDefault returns all controllers that are disabled by default @@ -107,7 +104,7 @@ func createControllerContext(opts *options.Options) (*controllercontext.Context, return nil, fmt.Errorf("failed to create fed clientset: %w", err) } - informerResyncPeriod := util.NoResyncPeriod + informerResyncPeriod := time.Duration(0) kubeInformerFactory := informers.NewSharedInformerFactory(kubeClientset, informerResyncPeriod) dynamicInformerFactory := dynamicinformer.NewDynamicSharedInformerFactory(dynamicClientset, informerResyncPeriod) fedInformerFactory := fedinformers.NewSharedInformerFactory(fedClientset, informerResyncPeriod) diff --git a/pkg/controllers/federate/util.go b/pkg/controllers/federate/util.go index 04e1db9f..6754c938 100644 --- a/pkg/controllers/federate/util.go +++ b/pkg/controllers/federate/util.go @@ -33,9 +33,9 @@ import ( fedcorev1a1 "github.com/kubewharf/kubeadmiral/pkg/apis/core/v1alpha1" "github.com/kubewharf/kubeadmiral/pkg/controllers/common" - // "github.com/kubewharf/kubeadmiral/pkg/controllers/nsautoprop" - // "github.com/kubewharf/kubeadmiral/pkg/controllers/override" - // "github.com/kubewharf/kubeadmiral/pkg/controllers/scheduler" + "github.com/kubewharf/kubeadmiral/pkg/controllers/nsautoprop" + "github.com/kubewharf/kubeadmiral/pkg/controllers/override" + "github.com/kubewharf/kubeadmiral/pkg/controllers/scheduler" "github.com/kubewharf/kubeadmiral/pkg/controllers/util" annotationutil "github.com/kubewharf/kubeadmiral/pkg/util/annotation" "github.com/kubewharf/kubeadmiral/pkg/util/naming" @@ -242,18 +242,18 @@ func updateFederatedObjectForSourceObject( var ( // List of annotations that should be copied to the federated object instead of the template from the source federatedAnnotationSet = sets.New( - // scheduler.SchedulingModeAnnotation, - // scheduler.StickyClusterAnnotation, + scheduler.SchedulingModeAnnotation, + scheduler.StickyClusterAnnotation, util.ConflictResolutionAnnotation, - // nsautoprop.NoAutoPropagationAnnotation, + nsautoprop.NoAutoPropagationAnnotation, util.OrphanManagedResourcesAnnotation, - // scheduler.TolerationsAnnotations, - // scheduler.PlacementsAnnotations, - // scheduler.ClusterSelectorAnnotations, - // scheduler.AffinityAnnotations, - // scheduler.MaxClustersAnnotations, + scheduler.TolerationsAnnotations, + scheduler.PlacementsAnnotations, + scheduler.ClusterSelectorAnnotations, + scheduler.AffinityAnnotations, + scheduler.MaxClustersAnnotations, common.NoSchedulingAnnotation, - // scheduler.FollowsObjectAnnotation, + scheduler.FollowsObjectAnnotation, common.FollowersAnnotation, RetainReplicasAnnotation, ) @@ -268,10 +268,10 @@ var ( ) federatedLabelSet = sets.New[string]( - // scheduler.PropagationPolicyNameLabel, - // scheduler.ClusterPropagationPolicyNameLabel, - // override.OverridePolicyNameLabel, - // override.ClusterOverridePolicyNameLabel, + scheduler.PropagationPolicyNameLabel, + scheduler.ClusterPropagationPolicyNameLabel, + override.OverridePolicyNameLabel, + override.ClusterOverridePolicyNameLabel, ) ) From 4c6df68010f4af186dc8c2eb2fa711094438a794 Mon Sep 17 00:00:00 2001 From: "hawjia.lim" Date: Tue, 18 Jul 2023 17:39:12 +0800 Subject: [PATCH 062/173] add exclude build tags --- .../app/controllermanager.go | 3 - cmd/controller-manager/app/core.go | 145 ------------------ pkg/controllers/automigration/controller.go | 1 + .../automigration/plugins/deployments.go | 1 + .../automigration/plugins/plugins.go | 1 + pkg/controllers/automigration/util.go | 1 + pkg/controllers/federate/controller.go | 4 +- pkg/controllers/federate/util.go | 11 +- .../federatedcluster/clusterjoin.go | 1 + .../federatedcluster/clusterstatus.go | 1 + .../federatedcluster/controller.go | 1 + pkg/controllers/federatedcluster/util.go | 1 + .../follower/bidirectional_cache.go | 1 + pkg/controllers/follower/controller.go | 1 + pkg/controllers/follower/util.go | 1 + pkg/controllers/monitor/monitor_controller.go | 1 + .../monitor/monitor_subcontroller.go | 1 + pkg/controllers/monitor/report.go | 1 + pkg/controllers/nsautoprop/constant.go | 21 +++ pkg/controllers/nsautoprop/controller.go | 2 +- pkg/controllers/override/constant.go | 24 +++ .../override/overridepolicy_controller.go | 4 +- pkg/controllers/override/util.go | 1 + pkg/controllers/policyrc/controller.go | 1 + pkg/controllers/policyrc/counter.go | 1 + .../scheduler/core/generic_scheduler.go | 1 + .../extensions/webhook/v1alpha1/adapter.go | 1 + .../extensions/webhook/v1alpha1/plugin.go | 1 + .../scheduler/framework/interface.go | 1 + .../plugins/apiresources/apiresources.go | 1 + .../clusteraffinity/cluster_affinity.go | 1 + .../clusterresources/balanced_allocation.go | 1 + .../framework/plugins/clusterresources/fit.go | 1 + .../clusterresources/least_allocated.go | 1 + .../clusterresources/most_allocated.go | 1 + .../plugins/maxcluster/max_cluster.go | 1 + .../framework/plugins/placement/filter.go | 1 + .../scheduler/framework/plugins/rsp/rsp.go | 1 + .../tainttoleration/taint_toleration.go | 1 + .../scheduler/framework/runtime/framework.go | 1 + .../scheduler/framework/runtime/registry.go | 1 + pkg/controllers/scheduler/framework/types.go | 1 + pkg/controllers/scheduler/framework/util.go | 1 + pkg/controllers/scheduler/handle.go | 1 + pkg/controllers/scheduler/profile.go | 1 + pkg/controllers/scheduler/scheduler.go | 1 + .../scheduler/schedulingtriggers.go | 1 + pkg/controllers/scheduler/schedulingunit.go | 1 + pkg/controllers/scheduler/util.go | 1 + pkg/controllers/scheduler/webhook.go | 1 + pkg/controllers/status/controller.go | 1 + .../statusaggregator/controller.go | 1 + .../statusaggregator/plugins/deployment.go | 1 + .../statusaggregator/plugins/job.go | 1 + .../statusaggregator/plugins/plugin.go | 1 + .../statusaggregator/plugins/pod.go | 1 + .../plugins/single_cluster_plugin.go | 1 + pkg/controllers/sync/accessor.go | 1 + pkg/controllers/sync/constant.go | 98 ++++++++++++ pkg/controllers/sync/controller.go | 1 + .../sync/dispatch/checkunmanaged.go | 1 + pkg/controllers/sync/dispatch/managed.go | 1 + pkg/controllers/sync/dispatch/operation.go | 1 + pkg/controllers/sync/dispatch/retain.go | 1 + pkg/controllers/sync/dispatch/unmanaged.go | 1 + pkg/controllers/sync/history.go | 1 + pkg/controllers/sync/placement.go | 1 + pkg/controllers/sync/resource.go | 1 + pkg/controllers/sync/status/status.go | 1 + pkg/controllers/sync/version/adapter.go | 1 + pkg/controllers/sync/version/cluster.go | 1 + pkg/controllers/sync/version/manager.go | 1 + pkg/controllers/sync/version/namespaced.go | 1 + pkg/controllers/util/adoptedannotation.go | 1 + .../util/cascadingdeleteannotation.go | 1 + pkg/controllers/util/clusterselector/util.go | 1 + pkg/controllers/util/clusterutil.go | 1 + .../util/conflictresolutionannotation.go | 1 + pkg/controllers/util/controllerconfig.go | 1 + .../util/federatedclient/client.go | 1 + .../util/federatedclient/interface.go | 1 + .../util/federatedclient/podinformer.go | 1 + pkg/controllers/util/federatedinformer.go | 1 + pkg/controllers/util/federatedstatus.go | 1 + pkg/controllers/util/genericinformer.go | 1 + pkg/controllers/util/hash/hash.go | 1 + .../util/history/controller_history.go | 1 + pkg/controllers/util/meta.go | 1 + pkg/controllers/util/orphaningannotation.go | 1 + pkg/controllers/util/overrides.go | 1 + pkg/controllers/util/planner/planner.go | 1 + pkg/controllers/util/propagatedversion.go | 1 + .../propagationstatus/propagationstatus.go | 1 + pkg/controllers/util/resourceclient.go | 1 + pkg/controllers/util/resourceinformer.go | 1 + pkg/controllers/util/rolloutplan.go | 1 + pkg/controllers/util/schema/apiresource.go | 1 + pkg/controllers/util/schema/gvk.go | 1 + .../util/sourcefeedback/scheduling.go | 70 --------- pkg/controllers/util/sourcefeedback/status.go | 72 --------- .../util/sourcefeedback/syncing.go | 91 ----------- pkg/controllers/util/sourcefeedback/util.go | 40 ----- pkg/controllers/util/store.go | 1 + .../util/unstructured/unstructured.go | 1 + .../util/eventsink/eventsink.go | 0 .../util/eventsink/eventsink_test.go | 0 .../util/finalizers/finalizers.go | 0 .../util/finalizers/finalizers_test.go | 0 108 files changed, 243 insertions(+), 433 deletions(-) create mode 100644 pkg/controllers/nsautoprop/constant.go create mode 100644 pkg/controllers/override/constant.go create mode 100644 pkg/controllers/sync/constant.go delete mode 100644 pkg/controllers/util/sourcefeedback/scheduling.go delete mode 100644 pkg/controllers/util/sourcefeedback/status.go delete mode 100644 pkg/controllers/util/sourcefeedback/syncing.go delete mode 100644 pkg/controllers/util/sourcefeedback/util.go rename pkg/{controllers => }/util/eventsink/eventsink.go (100%) rename pkg/{controllers => }/util/eventsink/eventsink_test.go (100%) rename pkg/{controllers => }/util/finalizers/finalizers.go (100%) rename pkg/{controllers => }/util/finalizers/finalizers_test.go (100%) diff --git a/cmd/controller-manager/app/controllermanager.go b/cmd/controller-manager/app/controllermanager.go index c92ac4ec..87811a6a 100644 --- a/cmd/controller-manager/app/controllermanager.go +++ b/cmd/controller-manager/app/controllermanager.go @@ -42,10 +42,7 @@ const ( ) var knownControllers = map[string]controllermanager.StartControllerFunc{ - FederatedClusterControllerName: startFederatedClusterController, FederateControllerName: startFederateController, - MonitorControllerName: startMonitorController, - FollowerControllerName: startFollowerController, } var controllersDisabledByDefault = sets.New(MonitorControllerName) diff --git a/cmd/controller-manager/app/core.go b/cmd/controller-manager/app/core.go index a23f901d..c4908fd8 100644 --- a/cmd/controller-manager/app/core.go +++ b/cmd/controller-manager/app/core.go @@ -20,44 +20,11 @@ import ( "context" "fmt" - "k8s.io/klog/v2" - - fedcorev1a1 "github.com/kubewharf/kubeadmiral/pkg/apis/core/v1alpha1" - "github.com/kubewharf/kubeadmiral/pkg/client/generic" "github.com/kubewharf/kubeadmiral/pkg/controllermanager" - "github.com/kubewharf/kubeadmiral/pkg/controllers/automigration" controllercontext "github.com/kubewharf/kubeadmiral/pkg/controllers/context" "github.com/kubewharf/kubeadmiral/pkg/controllers/federate" - "github.com/kubewharf/kubeadmiral/pkg/controllers/federatedcluster" - "github.com/kubewharf/kubeadmiral/pkg/controllers/federatedtypeconfig" - "github.com/kubewharf/kubeadmiral/pkg/controllers/follower" - "github.com/kubewharf/kubeadmiral/pkg/controllers/monitor" - "github.com/kubewharf/kubeadmiral/pkg/controllers/scheduler" - "github.com/kubewharf/kubeadmiral/pkg/controllers/util" - schemautil "github.com/kubewharf/kubeadmiral/pkg/controllers/util/schema" ) -func startFederatedClusterController(ctx context.Context, controllerCtx *controllercontext.Context) (controllermanager.Controller, error) { - clusterController, err := federatedcluster.NewFederatedClusterController( - controllerCtx.FedClientset, - controllerCtx.KubeClientset, - controllerCtx.FedInformerFactory.Core().V1alpha1().FederatedClusters(), - controllerCtx.FederatedClientFactory, - controllerCtx.Metrics, - controllerCtx.FedSystemNamespace, - controllerCtx.RestConfig, - controllerCtx.WorkerCount, - controllerCtx.ComponentConfig.ClusterJoinTimeout, - ) - if err != nil { - return nil, fmt.Errorf("error creating federated cluster controller: %w", err) - } - - go clusterController.Run(ctx) - - return clusterController, nil -} - func startFederateController(ctx context.Context, controllerCtx *controllercontext.Context) (controllermanager.Controller, error) { federateController, err := federate.NewFederateController( controllerCtx.KubeClientset, @@ -78,115 +45,3 @@ func startFederateController(ctx context.Context, controllerCtx *controllerconte return federateController, nil } - -func startMonitorController(ctx context.Context, controllerCtx *controllercontext.Context) (controllermanager.Controller, error) { - controllerConfig := controllerConfigFromControllerContext(controllerCtx) - //nolint:contextcheck - monitorController, err := monitor.NewMonitorController(controllerConfig) - if err != nil { - return nil, fmt.Errorf("error creating monitor controller: %w", err) - } - - if err = monitorController.Run(ctx.Done()); err != nil { - return nil, err - } - - return monitorController, nil -} - -// nolint:contextcheck -func startFollowerController(ctx context.Context, controllerCtx *controllercontext.Context) (controllermanager.Controller, error) { - controller, err := follower.NewFollowerController( - controllerCtx.KubeClientset, - controllerCtx.DynamicClientset, - controllerCtx.FedClientset, - controllerCtx.DynamicInformerFactory, - controllerCtx.Metrics, - controllerCtx.WorkerCount, - ) - if err != nil { - return nil, fmt.Errorf("error creating follower controller: %w", err) - } - - go controller.Run(ctx.Done()) - - return controller, nil -} - -func startGlobalScheduler( - ctx context.Context, - controllerCtx *controllercontext.Context, - typeConfig *fedcorev1a1.FederatedTypeConfig, -) (controllermanager.Controller, error) { - federatedAPIResource := typeConfig.GetFederatedType() - federatedGVR := schemautil.APIResourceToGVR(&federatedAPIResource) - - scheduler, err := scheduler.NewScheduler( - klog.FromContext(ctx), - typeConfig, - controllerCtx.KubeClientset, - controllerCtx.FedClientset, - controllerCtx.DynamicClientset, - controllerCtx.DynamicInformerFactory.ForResource(federatedGVR), - controllerCtx.FedInformerFactory.Core().V1alpha1().PropagationPolicies(), - controllerCtx.FedInformerFactory.Core().V1alpha1().ClusterPropagationPolicies(), - controllerCtx.FedInformerFactory.Core().V1alpha1().FederatedClusters(), - controllerCtx.FedInformerFactory.Core().V1alpha1().SchedulingProfiles(), - controllerCtx.FedInformerFactory.Core().V1alpha1().SchedulerPluginWebhookConfigurations(), - controllerCtx.Metrics, - controllerCtx.WorkerCount, - ) - if err != nil { - return nil, fmt.Errorf("error creating global scheduler: %w", err) - } - - go scheduler.Run(ctx) - - return scheduler, nil -} - -func isGlobalSchedulerEnabled(typeConfig *fedcorev1a1.FederatedTypeConfig) bool { - for _, controllerGroup := range typeConfig.GetControllers() { - for _, controller := range controllerGroup { - if controller == scheduler.PrefixedGlobalSchedulerName { - return true - } - } - } - return false -} - -func startAutoMigrationController( - ctx context.Context, - controllerCtx *controllercontext.Context, - typeConfig *fedcorev1a1.FederatedTypeConfig, -) (controllermanager.Controller, error) { - genericClient, err := generic.New(controllerCtx.RestConfig) - if err != nil { - return nil, fmt.Errorf("error creating generic client: %w", err) - } - - federatedAPIResource := typeConfig.GetFederatedType() - federatedGVR := schemautil.APIResourceToGVR(&federatedAPIResource) - - //nolint:contextcheck - controller, err := automigration.NewAutoMigrationController( - controllerConfigFromControllerContext(controllerCtx), - typeConfig, - genericClient, - controllerCtx.KubeClientset, - controllerCtx.DynamicClientset.Resource(federatedGVR), - controllerCtx.DynamicInformerFactory.ForResource(federatedGVR), - ) - if err != nil { - return nil, fmt.Errorf("error creating auto-migration controller: %w", err) - } - - go controller.Run(ctx) - - return controller, nil -} - -func isAutoMigrationControllerEnabled(typeConfig *fedcorev1a1.FederatedTypeConfig) bool { - return typeConfig.Spec.AutoMigration != nil && typeConfig.Spec.AutoMigration.Enabled -} diff --git a/pkg/controllers/automigration/controller.go b/pkg/controllers/automigration/controller.go index 2a05b4ce..bc08257c 100644 --- a/pkg/controllers/automigration/controller.go +++ b/pkg/controllers/automigration/controller.go @@ -1,3 +1,4 @@ +//go:build exclude /* Copyright 2023 The KubeAdmiral Authors. diff --git a/pkg/controllers/automigration/plugins/deployments.go b/pkg/controllers/automigration/plugins/deployments.go index 86724035..5e3a6b8b 100644 --- a/pkg/controllers/automigration/plugins/deployments.go +++ b/pkg/controllers/automigration/plugins/deployments.go @@ -1,3 +1,4 @@ +//go:build exclude /* Copyright 2023 The KubeAdmiral Authors. diff --git a/pkg/controllers/automigration/plugins/plugins.go b/pkg/controllers/automigration/plugins/plugins.go index ee77b266..4699709f 100644 --- a/pkg/controllers/automigration/plugins/plugins.go +++ b/pkg/controllers/automigration/plugins/plugins.go @@ -1,3 +1,4 @@ +//go:build exclude /* Copyright 2023 The KubeAdmiral Authors. diff --git a/pkg/controllers/automigration/util.go b/pkg/controllers/automigration/util.go index 9611e1b0..55a20a94 100644 --- a/pkg/controllers/automigration/util.go +++ b/pkg/controllers/automigration/util.go @@ -1,3 +1,4 @@ +//go:build exclude /* Copyright 2023 The KubeAdmiral Authors. diff --git a/pkg/controllers/federate/controller.go b/pkg/controllers/federate/controller.go index 4049357a..602b4593 100644 --- a/pkg/controllers/federate/controller.go +++ b/pkg/controllers/federate/controller.go @@ -39,8 +39,8 @@ import ( fedclient "github.com/kubewharf/kubeadmiral/pkg/client/clientset/versioned" fedcorev1a1informers "github.com/kubewharf/kubeadmiral/pkg/client/informers/externalversions/core/v1alpha1" "github.com/kubewharf/kubeadmiral/pkg/controllers/common" - "github.com/kubewharf/kubeadmiral/pkg/controllers/util/eventsink" - finalizersutil "github.com/kubewharf/kubeadmiral/pkg/controllers/util/finalizers" + "github.com/kubewharf/kubeadmiral/pkg/util/eventsink" + finalizersutil "github.com/kubewharf/kubeadmiral/pkg/util/finalizers" "github.com/kubewharf/kubeadmiral/pkg/stats" "github.com/kubewharf/kubeadmiral/pkg/util/eventhandlers" "github.com/kubewharf/kubeadmiral/pkg/util/fedobjectadapters" diff --git a/pkg/controllers/federate/util.go b/pkg/controllers/federate/util.go index 6754c938..0076e2ce 100644 --- a/pkg/controllers/federate/util.go +++ b/pkg/controllers/federate/util.go @@ -32,11 +32,11 @@ import ( fedcorev1a1 "github.com/kubewharf/kubeadmiral/pkg/apis/core/v1alpha1" "github.com/kubewharf/kubeadmiral/pkg/controllers/common" + "github.com/kubewharf/kubeadmiral/pkg/controllers/sync" "github.com/kubewharf/kubeadmiral/pkg/controllers/nsautoprop" "github.com/kubewharf/kubeadmiral/pkg/controllers/override" "github.com/kubewharf/kubeadmiral/pkg/controllers/scheduler" - "github.com/kubewharf/kubeadmiral/pkg/controllers/util" annotationutil "github.com/kubewharf/kubeadmiral/pkg/util/annotation" "github.com/kubewharf/kubeadmiral/pkg/util/naming" "github.com/kubewharf/kubeadmiral/pkg/util/pendingcontrollers" @@ -244,9 +244,9 @@ var ( federatedAnnotationSet = sets.New( scheduler.SchedulingModeAnnotation, scheduler.StickyClusterAnnotation, - util.ConflictResolutionAnnotation, nsautoprop.NoAutoPropagationAnnotation, - util.OrphanManagedResourcesAnnotation, + sync.OrphanManagedResourcesAnnotation, + sync.ConflictResolutionAnnotation, scheduler.TolerationsAnnotations, scheduler.PlacementsAnnotations, scheduler.ClusterSelectorAnnotations, @@ -261,9 +261,8 @@ var ( // TODO: Do we need to specify the internal annotations here? // List of annotations that should be ignored on the source object ignoredAnnotationSet = sets.New( - util.LatestReplicasetDigestsAnnotation, - util.ConflictResolutionInternalAnnotation, - util.OrphanManagedResourcesInternalAnnotation, + sync.ConflictResolutionInternalAnnotation, + sync.OrphanManagedResourcesInternalAnnotation, common.EnableFollowerSchedulingAnnotation, ) diff --git a/pkg/controllers/federatedcluster/clusterjoin.go b/pkg/controllers/federatedcluster/clusterjoin.go index cb02f662..7b54790e 100644 --- a/pkg/controllers/federatedcluster/clusterjoin.go +++ b/pkg/controllers/federatedcluster/clusterjoin.go @@ -1,3 +1,4 @@ +//go:build exclude /* Copyright 2018 The Kubernetes Authors. diff --git a/pkg/controllers/federatedcluster/clusterstatus.go b/pkg/controllers/federatedcluster/clusterstatus.go index 8d361b97..87adca6f 100644 --- a/pkg/controllers/federatedcluster/clusterstatus.go +++ b/pkg/controllers/federatedcluster/clusterstatus.go @@ -1,3 +1,4 @@ +//go:build exclude /* Copyright 2016 The Kubernetes Authors. diff --git a/pkg/controllers/federatedcluster/controller.go b/pkg/controllers/federatedcluster/controller.go index b216ace2..d0c999ba 100644 --- a/pkg/controllers/federatedcluster/controller.go +++ b/pkg/controllers/federatedcluster/controller.go @@ -1,3 +1,4 @@ +//go:build exclude /* Copyright 2016 The Kubernetes Authors. diff --git a/pkg/controllers/federatedcluster/util.go b/pkg/controllers/federatedcluster/util.go index e21ab7ae..32ff9a95 100644 --- a/pkg/controllers/federatedcluster/util.go +++ b/pkg/controllers/federatedcluster/util.go @@ -1,3 +1,4 @@ +//go:build exclude /* Copyright 2023 The KubeAdmiral Authors. diff --git a/pkg/controllers/follower/bidirectional_cache.go b/pkg/controllers/follower/bidirectional_cache.go index 394b8576..1ea4af86 100644 --- a/pkg/controllers/follower/bidirectional_cache.go +++ b/pkg/controllers/follower/bidirectional_cache.go @@ -1,3 +1,4 @@ +//go:build exclude /* Copyright 2023 The KubeAdmiral Authors. diff --git a/pkg/controllers/follower/controller.go b/pkg/controllers/follower/controller.go index 25d668c3..009a4c5c 100644 --- a/pkg/controllers/follower/controller.go +++ b/pkg/controllers/follower/controller.go @@ -1,3 +1,4 @@ +//go:build exclude /* Copyright 2023 The KubeAdmiral Authors. diff --git a/pkg/controllers/follower/util.go b/pkg/controllers/follower/util.go index db356ac7..99b32364 100644 --- a/pkg/controllers/follower/util.go +++ b/pkg/controllers/follower/util.go @@ -1,3 +1,4 @@ +//go:build exclude /* Copyright 2023 The KubeAdmiral Authors. diff --git a/pkg/controllers/monitor/monitor_controller.go b/pkg/controllers/monitor/monitor_controller.go index e19d82b2..c258cf0a 100644 --- a/pkg/controllers/monitor/monitor_controller.go +++ b/pkg/controllers/monitor/monitor_controller.go @@ -1,3 +1,4 @@ +//go:build exclude /* Copyright 2023 The KubeAdmiral Authors. diff --git a/pkg/controllers/monitor/monitor_subcontroller.go b/pkg/controllers/monitor/monitor_subcontroller.go index 8ced928f..831ee62d 100644 --- a/pkg/controllers/monitor/monitor_subcontroller.go +++ b/pkg/controllers/monitor/monitor_subcontroller.go @@ -1,3 +1,4 @@ +//go:build exclude /* Copyright 2023 The KubeAdmiral Authors. diff --git a/pkg/controllers/monitor/report.go b/pkg/controllers/monitor/report.go index f1de7c0e..9822eeab 100644 --- a/pkg/controllers/monitor/report.go +++ b/pkg/controllers/monitor/report.go @@ -1,3 +1,4 @@ +//go:build exclude /* Copyright 2023 The KubeAdmiral Authors. diff --git a/pkg/controllers/nsautoprop/constant.go b/pkg/controllers/nsautoprop/constant.go new file mode 100644 index 00000000..3f5da374 --- /dev/null +++ b/pkg/controllers/nsautoprop/constant.go @@ -0,0 +1,21 @@ +/* +Copyright 2023 The KubeAdmiral Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package nsautoprop + +import "github.com/kubewharf/kubeadmiral/pkg/controllers/common" + +var NoAutoPropagationAnnotation = common.DefaultPrefix + "no-auto-propagation" diff --git a/pkg/controllers/nsautoprop/controller.go b/pkg/controllers/nsautoprop/controller.go index 830e5c46..56658cb2 100644 --- a/pkg/controllers/nsautoprop/controller.go +++ b/pkg/controllers/nsautoprop/controller.go @@ -1,3 +1,4 @@ +//go:build exclude /* Copyright 2023 The KubeAdmiral Authors. @@ -57,7 +58,6 @@ const ( NamespaceAutoPropagationControllerName = "nsautoprop-controller" PrefixedNamespaceAutoPropagationControllerName = common.DefaultPrefix + NamespaceAutoPropagationControllerName EventReasonNamespaceAutoPropagation = "NamespaceAutoPropagation" - NoAutoPropagationAnnotation = common.DefaultPrefix + "no-auto-propagation" ) /* diff --git a/pkg/controllers/override/constant.go b/pkg/controllers/override/constant.go new file mode 100644 index 00000000..fdd1be32 --- /dev/null +++ b/pkg/controllers/override/constant.go @@ -0,0 +1,24 @@ +/* +Copyright 2023 The KubeAdmiral Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package override + +import "github.com/kubewharf/kubeadmiral/pkg/controllers/common" + +var ( + OverridePolicyNameLabel = common.DefaultPrefix + "override-policy-name" + ClusterOverridePolicyNameLabel = common.DefaultPrefix + "cluster-override-policy-name" +) diff --git a/pkg/controllers/override/overridepolicy_controller.go b/pkg/controllers/override/overridepolicy_controller.go index b559f813..f1ad8a5d 100644 --- a/pkg/controllers/override/overridepolicy_controller.go +++ b/pkg/controllers/override/overridepolicy_controller.go @@ -1,3 +1,4 @@ +//go:build exclude /* Copyright 2023 The KubeAdmiral Authors. @@ -49,9 +50,6 @@ const ( EventReasonMatchOverridePolicyFailed = "MatchOverridePolicyFailed" EventReasonParseOverridePolicyFailed = "ParseOverridePolicyFailed" EventReasonOverridePolicyApplied = "OverridePolicyApplied" - - OverridePolicyNameLabel = common.DefaultPrefix + "override-policy-name" - ClusterOverridePolicyNameLabel = common.DefaultPrefix + "cluster-override-policy-name" ) var PrefixedControllerName = common.DefaultPrefix + ControllerName diff --git a/pkg/controllers/override/util.go b/pkg/controllers/override/util.go index c083b064..e9b44ead 100644 --- a/pkg/controllers/override/util.go +++ b/pkg/controllers/override/util.go @@ -1,3 +1,4 @@ +//go:build exclude /* Copyright 2023 The KubeAdmiral Authors. diff --git a/pkg/controllers/policyrc/controller.go b/pkg/controllers/policyrc/controller.go index 7bcbffb4..f894d772 100644 --- a/pkg/controllers/policyrc/controller.go +++ b/pkg/controllers/policyrc/controller.go @@ -1,3 +1,4 @@ +//go:build exclude /* Copyright 2023 The KubeAdmiral Authors. diff --git a/pkg/controllers/policyrc/counter.go b/pkg/controllers/policyrc/counter.go index 641d5891..f17f8427 100644 --- a/pkg/controllers/policyrc/counter.go +++ b/pkg/controllers/policyrc/counter.go @@ -1,3 +1,4 @@ +//go:build exclude /* Copyright 2023 The KubeAdmiral Authors. diff --git a/pkg/controllers/scheduler/core/generic_scheduler.go b/pkg/controllers/scheduler/core/generic_scheduler.go index 12acaa85..dfd04a0b 100644 --- a/pkg/controllers/scheduler/core/generic_scheduler.go +++ b/pkg/controllers/scheduler/core/generic_scheduler.go @@ -1,3 +1,4 @@ +//go:build exclude /* Copyright 2014 The Kubernetes Authors. diff --git a/pkg/controllers/scheduler/extensions/webhook/v1alpha1/adapter.go b/pkg/controllers/scheduler/extensions/webhook/v1alpha1/adapter.go index c7c7a811..9b964747 100644 --- a/pkg/controllers/scheduler/extensions/webhook/v1alpha1/adapter.go +++ b/pkg/controllers/scheduler/extensions/webhook/v1alpha1/adapter.go @@ -1,3 +1,4 @@ +//go:build exclude /* Copyright 2023 The KubeAdmiral Authors. diff --git a/pkg/controllers/scheduler/extensions/webhook/v1alpha1/plugin.go b/pkg/controllers/scheduler/extensions/webhook/v1alpha1/plugin.go index ea1da454..78b78979 100644 --- a/pkg/controllers/scheduler/extensions/webhook/v1alpha1/plugin.go +++ b/pkg/controllers/scheduler/extensions/webhook/v1alpha1/plugin.go @@ -1,3 +1,4 @@ +//go:build exclude /* Copyright 2023 The KubeAdmiral Authors. diff --git a/pkg/controllers/scheduler/framework/interface.go b/pkg/controllers/scheduler/framework/interface.go index 50165e89..3052b1cc 100644 --- a/pkg/controllers/scheduler/framework/interface.go +++ b/pkg/controllers/scheduler/framework/interface.go @@ -1,3 +1,4 @@ +//go:build exclude /* Copyright 2019 The Kubernetes Authors. diff --git a/pkg/controllers/scheduler/framework/plugins/apiresources/apiresources.go b/pkg/controllers/scheduler/framework/plugins/apiresources/apiresources.go index 71f23d1c..a949e86f 100644 --- a/pkg/controllers/scheduler/framework/plugins/apiresources/apiresources.go +++ b/pkg/controllers/scheduler/framework/plugins/apiresources/apiresources.go @@ -1,3 +1,4 @@ +//go:build exclude // The design of this plugin is heavily inspired by karmada-scheduler. Kudos! package apiresources diff --git a/pkg/controllers/scheduler/framework/plugins/clusteraffinity/cluster_affinity.go b/pkg/controllers/scheduler/framework/plugins/clusteraffinity/cluster_affinity.go index 978391f3..e034f79c 100644 --- a/pkg/controllers/scheduler/framework/plugins/clusteraffinity/cluster_affinity.go +++ b/pkg/controllers/scheduler/framework/plugins/clusteraffinity/cluster_affinity.go @@ -1,3 +1,4 @@ +//go:build exclude /* Copyright 2019 The Kubernetes Authors. diff --git a/pkg/controllers/scheduler/framework/plugins/clusterresources/balanced_allocation.go b/pkg/controllers/scheduler/framework/plugins/clusterresources/balanced_allocation.go index 46e41d80..a99d2a6d 100644 --- a/pkg/controllers/scheduler/framework/plugins/clusterresources/balanced_allocation.go +++ b/pkg/controllers/scheduler/framework/plugins/clusterresources/balanced_allocation.go @@ -1,3 +1,4 @@ +//go:build exclude /* Copyright 2019 The Kubernetes Authors. diff --git a/pkg/controllers/scheduler/framework/plugins/clusterresources/fit.go b/pkg/controllers/scheduler/framework/plugins/clusterresources/fit.go index 0310205b..114926a6 100644 --- a/pkg/controllers/scheduler/framework/plugins/clusterresources/fit.go +++ b/pkg/controllers/scheduler/framework/plugins/clusterresources/fit.go @@ -1,3 +1,4 @@ +//go:build exclude /* Copyright 2019 The Kubernetes Authors. diff --git a/pkg/controllers/scheduler/framework/plugins/clusterresources/least_allocated.go b/pkg/controllers/scheduler/framework/plugins/clusterresources/least_allocated.go index 1eca9f16..38d365bf 100644 --- a/pkg/controllers/scheduler/framework/plugins/clusterresources/least_allocated.go +++ b/pkg/controllers/scheduler/framework/plugins/clusterresources/least_allocated.go @@ -1,3 +1,4 @@ +//go:build exclude /* Copyright 2019 The Kubernetes Authors. diff --git a/pkg/controllers/scheduler/framework/plugins/clusterresources/most_allocated.go b/pkg/controllers/scheduler/framework/plugins/clusterresources/most_allocated.go index 586455ab..6f5b830a 100644 --- a/pkg/controllers/scheduler/framework/plugins/clusterresources/most_allocated.go +++ b/pkg/controllers/scheduler/framework/plugins/clusterresources/most_allocated.go @@ -1,3 +1,4 @@ +//go:build exclude /* Copyright 2019 The Kubernetes Authors. diff --git a/pkg/controllers/scheduler/framework/plugins/maxcluster/max_cluster.go b/pkg/controllers/scheduler/framework/plugins/maxcluster/max_cluster.go index 7a1691de..08ca56f1 100644 --- a/pkg/controllers/scheduler/framework/plugins/maxcluster/max_cluster.go +++ b/pkg/controllers/scheduler/framework/plugins/maxcluster/max_cluster.go @@ -1,3 +1,4 @@ +//go:build exclude /* Copyright 2023 The KubeAdmiral Authors. diff --git a/pkg/controllers/scheduler/framework/plugins/placement/filter.go b/pkg/controllers/scheduler/framework/plugins/placement/filter.go index 35748d7d..19c3253b 100644 --- a/pkg/controllers/scheduler/framework/plugins/placement/filter.go +++ b/pkg/controllers/scheduler/framework/plugins/placement/filter.go @@ -1,3 +1,4 @@ +//go:build exclude /* Copyright 2023 The KubeAdmiral Authors. diff --git a/pkg/controllers/scheduler/framework/plugins/rsp/rsp.go b/pkg/controllers/scheduler/framework/plugins/rsp/rsp.go index 29694023..50b94495 100644 --- a/pkg/controllers/scheduler/framework/plugins/rsp/rsp.go +++ b/pkg/controllers/scheduler/framework/plugins/rsp/rsp.go @@ -1,3 +1,4 @@ +//go:build exclude /* Copyright 2023 The KubeAdmiral Authors. diff --git a/pkg/controllers/scheduler/framework/plugins/tainttoleration/taint_toleration.go b/pkg/controllers/scheduler/framework/plugins/tainttoleration/taint_toleration.go index dae64acf..80571411 100644 --- a/pkg/controllers/scheduler/framework/plugins/tainttoleration/taint_toleration.go +++ b/pkg/controllers/scheduler/framework/plugins/tainttoleration/taint_toleration.go @@ -1,3 +1,4 @@ +//go:build exclude /* Copyright 2019 The Kubernetes Authors. diff --git a/pkg/controllers/scheduler/framework/runtime/framework.go b/pkg/controllers/scheduler/framework/runtime/framework.go index 8da7e857..3cf2bee7 100644 --- a/pkg/controllers/scheduler/framework/runtime/framework.go +++ b/pkg/controllers/scheduler/framework/runtime/framework.go @@ -1,3 +1,4 @@ +//go:build exclude /* Copyright 2019 The Kubernetes Authors. diff --git a/pkg/controllers/scheduler/framework/runtime/registry.go b/pkg/controllers/scheduler/framework/runtime/registry.go index b8970bed..278ed0d2 100644 --- a/pkg/controllers/scheduler/framework/runtime/registry.go +++ b/pkg/controllers/scheduler/framework/runtime/registry.go @@ -1,3 +1,4 @@ +//go:build exclude /* Copyright 2019 The Kubernetes Authors. diff --git a/pkg/controllers/scheduler/framework/types.go b/pkg/controllers/scheduler/framework/types.go index 3b1abbde..b8b8a212 100644 --- a/pkg/controllers/scheduler/framework/types.go +++ b/pkg/controllers/scheduler/framework/types.go @@ -1,3 +1,4 @@ +//go:build exclude /* Copyright 2019 The Kubernetes Authors. diff --git a/pkg/controllers/scheduler/framework/util.go b/pkg/controllers/scheduler/framework/util.go index a12dedd9..165e73a0 100644 --- a/pkg/controllers/scheduler/framework/util.go +++ b/pkg/controllers/scheduler/framework/util.go @@ -1,3 +1,4 @@ +//go:build exclude /* Copyright 2015 The Kubernetes Authors. diff --git a/pkg/controllers/scheduler/handle.go b/pkg/controllers/scheduler/handle.go index 6b99558e..84589766 100644 --- a/pkg/controllers/scheduler/handle.go +++ b/pkg/controllers/scheduler/handle.go @@ -1,3 +1,4 @@ +//go:build exclude /* Copyright 2023 The KubeAdmiral Authors. diff --git a/pkg/controllers/scheduler/profile.go b/pkg/controllers/scheduler/profile.go index 3c6f8c44..811d9325 100644 --- a/pkg/controllers/scheduler/profile.go +++ b/pkg/controllers/scheduler/profile.go @@ -1,3 +1,4 @@ +//go:build exclude /* Copyright 2023 The KubeAdmiral Authors. diff --git a/pkg/controllers/scheduler/scheduler.go b/pkg/controllers/scheduler/scheduler.go index 87ea260e..b62cf5a2 100644 --- a/pkg/controllers/scheduler/scheduler.go +++ b/pkg/controllers/scheduler/scheduler.go @@ -1,3 +1,4 @@ +//go:build exclude /* Copyright 2023 The KubeAdmiral Authors. diff --git a/pkg/controllers/scheduler/schedulingtriggers.go b/pkg/controllers/scheduler/schedulingtriggers.go index 75437ace..e1e1bab8 100644 --- a/pkg/controllers/scheduler/schedulingtriggers.go +++ b/pkg/controllers/scheduler/schedulingtriggers.go @@ -1,3 +1,4 @@ +//go:build exclude /* Copyright 2023 The KubeAdmiral Authors. diff --git a/pkg/controllers/scheduler/schedulingunit.go b/pkg/controllers/scheduler/schedulingunit.go index 06eb401f..6e2f3cb6 100644 --- a/pkg/controllers/scheduler/schedulingunit.go +++ b/pkg/controllers/scheduler/schedulingunit.go @@ -1,3 +1,4 @@ +//go:build exclude /* Copyright 2023 The KubeAdmiral Authors. diff --git a/pkg/controllers/scheduler/util.go b/pkg/controllers/scheduler/util.go index 17169de1..6172c02b 100644 --- a/pkg/controllers/scheduler/util.go +++ b/pkg/controllers/scheduler/util.go @@ -1,3 +1,4 @@ +//go:build exclude /* Copyright 2023 The KubeAdmiral Authors. diff --git a/pkg/controllers/scheduler/webhook.go b/pkg/controllers/scheduler/webhook.go index f287d206..ee978c51 100644 --- a/pkg/controllers/scheduler/webhook.go +++ b/pkg/controllers/scheduler/webhook.go @@ -1,3 +1,4 @@ +//go:build exclude /* Copyright 2023 The KubeAdmiral Authors. diff --git a/pkg/controllers/status/controller.go b/pkg/controllers/status/controller.go index 3821ec7a..dc42fb2f 100644 --- a/pkg/controllers/status/controller.go +++ b/pkg/controllers/status/controller.go @@ -1,3 +1,4 @@ +//go:build exclude /* Copyright 2018 The Kubernetes Authors. diff --git a/pkg/controllers/statusaggregator/controller.go b/pkg/controllers/statusaggregator/controller.go index 4484e6d7..46c96dd4 100644 --- a/pkg/controllers/statusaggregator/controller.go +++ b/pkg/controllers/statusaggregator/controller.go @@ -1,3 +1,4 @@ +//go:build exclude /* Copyright 2023 The KubeAdmiral Authors. diff --git a/pkg/controllers/statusaggregator/plugins/deployment.go b/pkg/controllers/statusaggregator/plugins/deployment.go index d04ee064..c30371eb 100644 --- a/pkg/controllers/statusaggregator/plugins/deployment.go +++ b/pkg/controllers/statusaggregator/plugins/deployment.go @@ -1,3 +1,4 @@ +//go:build exclude /* Copyright 2023 The KubeAdmiral Authors. diff --git a/pkg/controllers/statusaggregator/plugins/job.go b/pkg/controllers/statusaggregator/plugins/job.go index d16af3ae..aad89156 100644 --- a/pkg/controllers/statusaggregator/plugins/job.go +++ b/pkg/controllers/statusaggregator/plugins/job.go @@ -1,3 +1,4 @@ +//go:build exclude /* Copyright 2023 The KubeAdmiral Authors. diff --git a/pkg/controllers/statusaggregator/plugins/plugin.go b/pkg/controllers/statusaggregator/plugins/plugin.go index f9d931a6..13a722b6 100644 --- a/pkg/controllers/statusaggregator/plugins/plugin.go +++ b/pkg/controllers/statusaggregator/plugins/plugin.go @@ -1,3 +1,4 @@ +//go:build exclude /* Copyright 2023 The KubeAdmiral Authors. diff --git a/pkg/controllers/statusaggregator/plugins/pod.go b/pkg/controllers/statusaggregator/plugins/pod.go index 95f18ee3..da045563 100644 --- a/pkg/controllers/statusaggregator/plugins/pod.go +++ b/pkg/controllers/statusaggregator/plugins/pod.go @@ -1,3 +1,4 @@ +//go:build exclude /* Copyright 2023 The KubeAdmiral Authors. diff --git a/pkg/controllers/statusaggregator/plugins/single_cluster_plugin.go b/pkg/controllers/statusaggregator/plugins/single_cluster_plugin.go index 1db89f9b..60ab9c67 100644 --- a/pkg/controllers/statusaggregator/plugins/single_cluster_plugin.go +++ b/pkg/controllers/statusaggregator/plugins/single_cluster_plugin.go @@ -1,3 +1,4 @@ +//go:build exclude /* Copyright 2023 The KubeAdmiral Authors. diff --git a/pkg/controllers/sync/accessor.go b/pkg/controllers/sync/accessor.go index a1ceca7d..236f2935 100644 --- a/pkg/controllers/sync/accessor.go +++ b/pkg/controllers/sync/accessor.go @@ -1,3 +1,4 @@ +//go:build exclude /* Copyright 2019 The Kubernetes Authors. diff --git a/pkg/controllers/sync/constant.go b/pkg/controllers/sync/constant.go new file mode 100644 index 00000000..6dfd1765 --- /dev/null +++ b/pkg/controllers/sync/constant.go @@ -0,0 +1,98 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +This file may have been modified by The KubeAdmiral Authors +("KubeAdmiral Modifications"). All KubeAdmiral Modifications +are Copyright 2023 The KubeAdmiral Authors. +*/ + +package sync + +import ( + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + + "github.com/kubewharf/kubeadmiral/pkg/controllers/common" +) + +type ConflictResolution string + +type OrphanManagedResourcesBehavior string + +const ( + ConflictResolutionAnnotation = common.DefaultPrefix + "conflict-resolution" + ConflictResolutionInternalAnnotation = common.InternalPrefix + "conflict-resolution" + AdoptedAnnotation = common.DefaultPrefix + "adopted" + // If this annotation is present on a federated resource, it controls the + // manner in which resources in the member clusters are orphaned when the + // federated resource is deleted. + // If the annotation is not present (the default), resources in member + // clusters will be deleted before the federated resource is deleted. + OrphanManagedResourcesAnnotation = common.DefaultPrefix + "orphan" + OrphanManagedResourcesInternalAnnotation = common.InternalPrefix + "orphan" + + // Conflict resolution for preexisting resources + ConflictResolutionAdopt ConflictResolution = "adopt" + + // Orphan all managed resources + OrphanManagedResourcesAll OrphanManagedResourcesBehavior = "all" + // Orphan only the adopted resources + OrphanManagedResourcesAdopted OrphanManagedResourcesBehavior = "adopted" + // Orphaning disabled, delete managed resources + OrphanManagedResourcesNone OrphanManagedResourcesBehavior = "" +) + +func ShouldAdoptPreexistingResources(obj *unstructured.Unstructured) bool { + annotations := obj.GetAnnotations() + + value, exists := annotations[ConflictResolutionInternalAnnotation] + if !exists { + value = annotations[ConflictResolutionAnnotation] + } + + return value == string(ConflictResolutionAdopt) +} + +func HasAdoptedAnnotation(obj *unstructured.Unstructured) bool { + annotations := obj.GetAnnotations() + if annotations == nil { + return false + } + return annotations[AdoptedAnnotation] == common.AnnotationValueTrue +} + +func RemoveAdoptedAnnotation(obj *unstructured.Unstructured) { + annotations := obj.GetAnnotations() + if annotations == nil || annotations[AdoptedAnnotation] != common.AnnotationValueTrue { + return + } + delete(annotations, AdoptedAnnotation) + obj.SetAnnotations(annotations) +} + +func GetOrphaningBehavior(obj *unstructured.Unstructured) OrphanManagedResourcesBehavior { + annotations := obj.GetAnnotations() + + value, exists := annotations[OrphanManagedResourcesInternalAnnotation] + if !exists { + value = annotations[OrphanManagedResourcesAnnotation] + } + + switch value { + case string(OrphanManagedResourcesAll), string(OrphanManagedResourcesAdopted): + return (OrphanManagedResourcesBehavior)(value) + default: + return OrphanManagedResourcesNone + } +} diff --git a/pkg/controllers/sync/controller.go b/pkg/controllers/sync/controller.go index 2c81b6b8..fbb34e19 100644 --- a/pkg/controllers/sync/controller.go +++ b/pkg/controllers/sync/controller.go @@ -1,3 +1,4 @@ +//go:build exclude /* Copyright 2018 The Kubernetes Authors. diff --git a/pkg/controllers/sync/dispatch/checkunmanaged.go b/pkg/controllers/sync/dispatch/checkunmanaged.go index 6f3a570f..b0a3a894 100644 --- a/pkg/controllers/sync/dispatch/checkunmanaged.go +++ b/pkg/controllers/sync/dispatch/checkunmanaged.go @@ -1,3 +1,4 @@ +//go:build exclude /* Copyright 2019 The Kubernetes Authors. diff --git a/pkg/controllers/sync/dispatch/managed.go b/pkg/controllers/sync/dispatch/managed.go index fa4dff82..a8dca909 100644 --- a/pkg/controllers/sync/dispatch/managed.go +++ b/pkg/controllers/sync/dispatch/managed.go @@ -1,3 +1,4 @@ +//go:build exclude /* Copyright 2019 The Kubernetes Authors. diff --git a/pkg/controllers/sync/dispatch/operation.go b/pkg/controllers/sync/dispatch/operation.go index aed6d8a1..5109fca7 100644 --- a/pkg/controllers/sync/dispatch/operation.go +++ b/pkg/controllers/sync/dispatch/operation.go @@ -1,3 +1,4 @@ +//go:build exclude /* Copyright 2019 The Kubernetes Authors. diff --git a/pkg/controllers/sync/dispatch/retain.go b/pkg/controllers/sync/dispatch/retain.go index ef13755c..6c3004f7 100644 --- a/pkg/controllers/sync/dispatch/retain.go +++ b/pkg/controllers/sync/dispatch/retain.go @@ -1,3 +1,4 @@ +//go:build exclude /* Copyright 2019 The Kubernetes Authors. diff --git a/pkg/controllers/sync/dispatch/unmanaged.go b/pkg/controllers/sync/dispatch/unmanaged.go index d25c557b..75a4700b 100644 --- a/pkg/controllers/sync/dispatch/unmanaged.go +++ b/pkg/controllers/sync/dispatch/unmanaged.go @@ -1,3 +1,4 @@ +//go:build exclude /* Copyright 2019 The Kubernetes Authors. diff --git a/pkg/controllers/sync/history.go b/pkg/controllers/sync/history.go index 7d9da402..167fc1ac 100644 --- a/pkg/controllers/sync/history.go +++ b/pkg/controllers/sync/history.go @@ -1,3 +1,4 @@ +//go:build exclude /* Copyright 2023 The KubeAdmiral Authors. diff --git a/pkg/controllers/sync/placement.go b/pkg/controllers/sync/placement.go index 3fa84c7a..ad85b233 100644 --- a/pkg/controllers/sync/placement.go +++ b/pkg/controllers/sync/placement.go @@ -1,3 +1,4 @@ +//go:build exclude /* Copyright 2019 The Kubernetes Authors. diff --git a/pkg/controllers/sync/resource.go b/pkg/controllers/sync/resource.go index 1cd2e6fd..092d4634 100644 --- a/pkg/controllers/sync/resource.go +++ b/pkg/controllers/sync/resource.go @@ -1,3 +1,4 @@ +//go:build exclude /* Copyright 2019 The Kubernetes Authors. diff --git a/pkg/controllers/sync/status/status.go b/pkg/controllers/sync/status/status.go index ad09c27b..ae082df7 100644 --- a/pkg/controllers/sync/status/status.go +++ b/pkg/controllers/sync/status/status.go @@ -1,3 +1,4 @@ +//go:build exclude /* Copyright 2019 The Kubernetes Authors. diff --git a/pkg/controllers/sync/version/adapter.go b/pkg/controllers/sync/version/adapter.go index 9ac5aaa5..65bcf84e 100644 --- a/pkg/controllers/sync/version/adapter.go +++ b/pkg/controllers/sync/version/adapter.go @@ -1,3 +1,4 @@ +//go:build exclude /* Copyright 2018 The Kubernetes Authors. diff --git a/pkg/controllers/sync/version/cluster.go b/pkg/controllers/sync/version/cluster.go index fe45cc3a..3d78843d 100644 --- a/pkg/controllers/sync/version/cluster.go +++ b/pkg/controllers/sync/version/cluster.go @@ -1,3 +1,4 @@ +//go:build exclude /* Copyright 2018 The Kubernetes Authors. diff --git a/pkg/controllers/sync/version/manager.go b/pkg/controllers/sync/version/manager.go index 733116b6..59468f7d 100644 --- a/pkg/controllers/sync/version/manager.go +++ b/pkg/controllers/sync/version/manager.go @@ -1,3 +1,4 @@ +//go:build exclude /* Copyright 2018 The Kubernetes Authors. diff --git a/pkg/controllers/sync/version/namespaced.go b/pkg/controllers/sync/version/namespaced.go index 8a0d437c..3047c2d4 100644 --- a/pkg/controllers/sync/version/namespaced.go +++ b/pkg/controllers/sync/version/namespaced.go @@ -1,3 +1,4 @@ +//go:build exclude /* Copyright 2018 The Kubernetes Authors. diff --git a/pkg/controllers/util/adoptedannotation.go b/pkg/controllers/util/adoptedannotation.go index 63060b74..aa9855b1 100644 --- a/pkg/controllers/util/adoptedannotation.go +++ b/pkg/controllers/util/adoptedannotation.go @@ -1,3 +1,4 @@ +//go:build exclude /* Copyright 2023 The KubeAdmiral Authors. diff --git a/pkg/controllers/util/cascadingdeleteannotation.go b/pkg/controllers/util/cascadingdeleteannotation.go index ce99c99b..d91c371f 100644 --- a/pkg/controllers/util/cascadingdeleteannotation.go +++ b/pkg/controllers/util/cascadingdeleteannotation.go @@ -1,3 +1,4 @@ +//go:build exclude /* Copyright 2023 The KubeAdmiral Authors. diff --git a/pkg/controllers/util/clusterselector/util.go b/pkg/controllers/util/clusterselector/util.go index 5f3e2f2a..fa6a7bcf 100644 --- a/pkg/controllers/util/clusterselector/util.go +++ b/pkg/controllers/util/clusterselector/util.go @@ -1,3 +1,4 @@ +//go:build exclude /* Copyright 2023 The KubeAdmiral Authors. diff --git a/pkg/controllers/util/clusterutil.go b/pkg/controllers/util/clusterutil.go index 6b7e1236..24c6940d 100644 --- a/pkg/controllers/util/clusterutil.go +++ b/pkg/controllers/util/clusterutil.go @@ -1,3 +1,4 @@ +//go:build exclude /* Copyright 2016 The Kubernetes Authors. diff --git a/pkg/controllers/util/conflictresolutionannotation.go b/pkg/controllers/util/conflictresolutionannotation.go index e726ac5e..522f4f16 100644 --- a/pkg/controllers/util/conflictresolutionannotation.go +++ b/pkg/controllers/util/conflictresolutionannotation.go @@ -1,3 +1,4 @@ +//go:build exclude /* Copyright 2023 The KubeAdmiral Authors. diff --git a/pkg/controllers/util/controllerconfig.go b/pkg/controllers/util/controllerconfig.go index 88fea409..c13f92fb 100644 --- a/pkg/controllers/util/controllerconfig.go +++ b/pkg/controllers/util/controllerconfig.go @@ -1,3 +1,4 @@ +//go:build exclude /* Copyright 2018 The Kubernetes Authors. diff --git a/pkg/controllers/util/federatedclient/client.go b/pkg/controllers/util/federatedclient/client.go index 1ca63afc..7754f40a 100644 --- a/pkg/controllers/util/federatedclient/client.go +++ b/pkg/controllers/util/federatedclient/client.go @@ -1,3 +1,4 @@ +//go:build exclude /* Copyright 2023 The KubeAdmiral Authors. diff --git a/pkg/controllers/util/federatedclient/interface.go b/pkg/controllers/util/federatedclient/interface.go index 446c3608..9c979096 100644 --- a/pkg/controllers/util/federatedclient/interface.go +++ b/pkg/controllers/util/federatedclient/interface.go @@ -1,3 +1,4 @@ +//go:build exclude /* Copyright 2023 The KubeAdmiral Authors. diff --git a/pkg/controllers/util/federatedclient/podinformer.go b/pkg/controllers/util/federatedclient/podinformer.go index 782bfaba..b6fc61a1 100644 --- a/pkg/controllers/util/federatedclient/podinformer.go +++ b/pkg/controllers/util/federatedclient/podinformer.go @@ -1,3 +1,4 @@ +//go:build exclude /* Copyright 2023 The KubeAdmiral Authors. diff --git a/pkg/controllers/util/federatedinformer.go b/pkg/controllers/util/federatedinformer.go index a57ce03b..90cb7fc8 100644 --- a/pkg/controllers/util/federatedinformer.go +++ b/pkg/controllers/util/federatedinformer.go @@ -1,3 +1,4 @@ +//go:build exclude /* Copyright 2016 The Kubernetes Authors. diff --git a/pkg/controllers/util/federatedstatus.go b/pkg/controllers/util/federatedstatus.go index 87d1a911..824cd70c 100644 --- a/pkg/controllers/util/federatedstatus.go +++ b/pkg/controllers/util/federatedstatus.go @@ -1,3 +1,4 @@ +//go:build exclude /* Copyright 2018 The Kubernetes Authors. diff --git a/pkg/controllers/util/genericinformer.go b/pkg/controllers/util/genericinformer.go index d46d8cbe..74621164 100644 --- a/pkg/controllers/util/genericinformer.go +++ b/pkg/controllers/util/genericinformer.go @@ -1,3 +1,4 @@ +//go:build exclude /* Copyright 2019 The Kubernetes Authors. diff --git a/pkg/controllers/util/hash/hash.go b/pkg/controllers/util/hash/hash.go index 2d239839..94a81902 100644 --- a/pkg/controllers/util/hash/hash.go +++ b/pkg/controllers/util/hash/hash.go @@ -1,3 +1,4 @@ +//go:build exclude /* Copyright 2015 The Kubernetes Authors. diff --git a/pkg/controllers/util/history/controller_history.go b/pkg/controllers/util/history/controller_history.go index 463d9a46..d217c6d5 100644 --- a/pkg/controllers/util/history/controller_history.go +++ b/pkg/controllers/util/history/controller_history.go @@ -1,3 +1,4 @@ +//go:build exclude /* Copyright 2023 The KubeAdmiral Authors. diff --git a/pkg/controllers/util/meta.go b/pkg/controllers/util/meta.go index eccfa583..a2feb5bd 100644 --- a/pkg/controllers/util/meta.go +++ b/pkg/controllers/util/meta.go @@ -1,3 +1,4 @@ +//go:build exclude /* Copyright 2016 The Kubernetes Authors. diff --git a/pkg/controllers/util/orphaningannotation.go b/pkg/controllers/util/orphaningannotation.go index 974aa5fd..6c915506 100644 --- a/pkg/controllers/util/orphaningannotation.go +++ b/pkg/controllers/util/orphaningannotation.go @@ -1,3 +1,4 @@ +//go:build exclude /* Copyright 2019 The Kubernetes Authors. diff --git a/pkg/controllers/util/overrides.go b/pkg/controllers/util/overrides.go index 383fb3cf..dcf65626 100644 --- a/pkg/controllers/util/overrides.go +++ b/pkg/controllers/util/overrides.go @@ -1,3 +1,4 @@ +//go:build exclude /* Copyright 2018 The Kubernetes Authors. diff --git a/pkg/controllers/util/planner/planner.go b/pkg/controllers/util/planner/planner.go index 46f91c3f..d4034bcd 100644 --- a/pkg/controllers/util/planner/planner.go +++ b/pkg/controllers/util/planner/planner.go @@ -1,3 +1,4 @@ +//go:build exclude /* Copyright 2016 The Kubernetes Authors. diff --git a/pkg/controllers/util/propagatedversion.go b/pkg/controllers/util/propagatedversion.go index 4df3d4c6..71a1d095 100644 --- a/pkg/controllers/util/propagatedversion.go +++ b/pkg/controllers/util/propagatedversion.go @@ -1,3 +1,4 @@ +//go:build exclude /* Copyright 2018 The Kubernetes Authors. diff --git a/pkg/controllers/util/propagationstatus/propagationstatus.go b/pkg/controllers/util/propagationstatus/propagationstatus.go index f2d09223..acb0b016 100644 --- a/pkg/controllers/util/propagationstatus/propagationstatus.go +++ b/pkg/controllers/util/propagationstatus/propagationstatus.go @@ -1,3 +1,4 @@ +//go:build exclude /* Copyright 2023 The KubeAdmiral Authors. diff --git a/pkg/controllers/util/resourceclient.go b/pkg/controllers/util/resourceclient.go index 4df9ad2b..0e595de3 100644 --- a/pkg/controllers/util/resourceclient.go +++ b/pkg/controllers/util/resourceclient.go @@ -1,3 +1,4 @@ +//go:build exclude /* Copyright 2018 The Kubernetes Authors. diff --git a/pkg/controllers/util/resourceinformer.go b/pkg/controllers/util/resourceinformer.go index 427973ec..8159c58a 100644 --- a/pkg/controllers/util/resourceinformer.go +++ b/pkg/controllers/util/resourceinformer.go @@ -1,3 +1,4 @@ +//go:build exclude /* Copyright 2018 The Kubernetes Authors. diff --git a/pkg/controllers/util/rolloutplan.go b/pkg/controllers/util/rolloutplan.go index 7f777770..e6bd057f 100644 --- a/pkg/controllers/util/rolloutplan.go +++ b/pkg/controllers/util/rolloutplan.go @@ -1,3 +1,4 @@ +//go:build exclude /* Copyright 2023 The KubeAdmiral Authors. diff --git a/pkg/controllers/util/schema/apiresource.go b/pkg/controllers/util/schema/apiresource.go index 010a8426..b5361f70 100644 --- a/pkg/controllers/util/schema/apiresource.go +++ b/pkg/controllers/util/schema/apiresource.go @@ -1,3 +1,4 @@ +//go:build exclude /* Copyright 2023 The KubeAdmiral Authors. diff --git a/pkg/controllers/util/schema/gvk.go b/pkg/controllers/util/schema/gvk.go index 8c075f44..9abf7b25 100644 --- a/pkg/controllers/util/schema/gvk.go +++ b/pkg/controllers/util/schema/gvk.go @@ -1,3 +1,4 @@ +//go:build exclude /* Copyright 2023 The KubeAdmiral Authors. diff --git a/pkg/controllers/util/sourcefeedback/scheduling.go b/pkg/controllers/util/sourcefeedback/scheduling.go deleted file mode 100644 index 85e6f49a..00000000 --- a/pkg/controllers/util/sourcefeedback/scheduling.go +++ /dev/null @@ -1,70 +0,0 @@ -/* -Copyright 2023 The KubeAdmiral Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package sourcefeedback - -import ( - "sort" - - "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" - "k8s.io/utils/pointer" - - fedcorev1a1 "github.com/kubewharf/kubeadmiral/pkg/apis/core/v1alpha1" - "github.com/kubewharf/kubeadmiral/pkg/controllers/common" - "github.com/kubewharf/kubeadmiral/pkg/controllers/util" - "github.com/kubewharf/kubeadmiral/pkg/util/meta" -) - -var SchedulingAnnotation = common.DefaultPrefix + "scheduling" - -type Scheduling struct { - // Generation is the generation of the source object - // observed in the federated object when this placement is sampled. - // This value should not be null unless in the condition - // where the federated object is manually created by another controller. - Generation *int64 `json:"generation"` - - // FederatedGeneration is the generation of the federated object - // observed when this placement is sampled. - FederatedGeneration int64 `json:"fedGeneration"` - - // Placement contains a list of FederatedCluster object names. - Placement []string `json:"placement,omitempty"` -} - -func PopulateSchedulingAnnotation(sourceObject *unstructured.Unstructured, fedObject *fedcorev1a1.GenericFederatedObject, hasChanged *bool) (err error) { - scheduling := Scheduling{} - - srcMeta, err := meta.GetSourceObjectMeta(fedObject) - if err != nil { - return err - } - - scheduling.Generation = pointer.Int64(srcMeta.GetGeneration()) - scheduling.FederatedGeneration = fedObject.GetGeneration() - - clusterNames := fedObject.GetPlacementUnion() - if len(clusterNames) > 0 { - for clusterName := range clusterNames { - scheduling.Placement = append(scheduling.Placement, clusterName) - } - sort.Strings(scheduling.Placement) - } - - setAnnotation(sourceObject, SchedulingAnnotation, &scheduling, hasChanged) - - return nil -} diff --git a/pkg/controllers/util/sourcefeedback/status.go b/pkg/controllers/util/sourcefeedback/status.go deleted file mode 100644 index 9153d5c7..00000000 --- a/pkg/controllers/util/sourcefeedback/status.go +++ /dev/null @@ -1,72 +0,0 @@ -/* -Copyright 2023 The KubeAdmiral Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package sourcefeedback - -import ( - "sort" - "strconv" - - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" - "k8s.io/klog/v2" - - "github.com/kubewharf/kubeadmiral/pkg/controllers/common" -) - -var StatusAnnotation = common.DefaultPrefix + "status" - -// Status is JSON-serialized into the status annotation of source objects -// that enabled status aggregator. -type Status struct { - Clusters []StatusCluster `json:"clusters"` -} - -type StatusCluster struct { - // Name is the name of the cluster that this entry describes. - Name string `json:"name"` - - // Generation is the generation of the source object that got dispatched to the member cluster. - // Zero if the target object has no source-generation annotation. - Generation int64 `json:"generation"` -} - -func PopulateStatusAnnotation(sourceObj *unstructured.Unstructured, clusterObjs map[string]interface{}, hasChanged *bool) { - status := Status{ - Clusters: make([]StatusCluster, 0, len(clusterObjs)), - } - - for cluster, obj := range clusterObjs { - obj := obj.(metav1.Object) - generationString := obj.GetAnnotations()[common.SourceGenerationAnnotation] - generation, err := strconv.ParseInt(generationString, 10, 64) - if err != nil { - klog.Errorf("invalid generation string %q: %v", generationString, err) - generation = 0 - } - - status.Clusters = append(status.Clusters, StatusCluster{ - Name: cluster, - Generation: generation, - }) - } - - sort.Slice(status.Clusters, func(i, j int) bool { - return status.Clusters[i].Name < status.Clusters[j].Name - }) - - setAnnotation(sourceObj, StatusAnnotation, &status, hasChanged) -} diff --git a/pkg/controllers/util/sourcefeedback/syncing.go b/pkg/controllers/util/sourcefeedback/syncing.go deleted file mode 100644 index 25a7d269..00000000 --- a/pkg/controllers/util/sourcefeedback/syncing.go +++ /dev/null @@ -1,91 +0,0 @@ -/* -Copyright 2023 The KubeAdmiral Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package sourcefeedback - -import ( - "sort" - - "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" - - fedtypesv1a1 "github.com/kubewharf/kubeadmiral/pkg/apis/types/v1alpha1" - "github.com/kubewharf/kubeadmiral/pkg/controllers/common" -) - -const SyncingAnnotation = common.DefaultPrefix + "syncing" - -type Syncing struct { - // Generation is the generation of the source object - // observed in the federated object during this sync operation. - // This value should not be null unless in the condition - // where the federated object is manually created by another controller. - Generation *int64 `json:"generation"` - - // FederatedGeneration is the generation of the federated object - // observed during this sync operation. - FederatedGeneration int64 `json:"fedGeneration"` - - Clusters []SyncingCluster `json:"clusters"` -} - -type SyncingCluster struct { - // Name is the name of the cluster that this entry describes. - Name string `json:"name"` - // Status is the cluster propagation status string. - Status fedtypesv1a1.PropagationStatus `json:"status"` -} - -func PopulateSyncingAnnotation( - fedObject *unstructured.Unstructured, - clusterStatusMap map[string]fedtypesv1a1.PropagationStatus, - hasChanged *bool, -) (err error) { - syncing := Syncing{} - - generation, exists, err := unstructured.NestedInt64( - fedObject.Object, - common.SpecField, - common.TemplateField, - common.MetadataField, - common.GenerationField, - ) - if err != nil { - return err - } - if exists { - generation := generation - syncing.Generation = &generation - } - - syncing.FederatedGeneration = fedObject.GetGeneration() - - syncing.Clusters = make([]SyncingCluster, 0, len(clusterStatusMap)) - - for clusterName, clusterStatus := range clusterStatusMap { - syncing.Clusters = append(syncing.Clusters, SyncingCluster{ - Name: clusterName, - Status: clusterStatus, - }) - } - - sort.Slice(syncing.Clusters, func(i, j int) bool { - return syncing.Clusters[i].Name < syncing.Clusters[j].Name - }) - - setAnnotation(fedObject, SyncingAnnotation, &syncing, hasChanged) - - return nil -} diff --git a/pkg/controllers/util/sourcefeedback/util.go b/pkg/controllers/util/sourcefeedback/util.go deleted file mode 100644 index aeae5af8..00000000 --- a/pkg/controllers/util/sourcefeedback/util.go +++ /dev/null @@ -1,40 +0,0 @@ -/* -Copyright 2023 The KubeAdmiral Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package sourcefeedback - -import ( - "encoding/json" - - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/klog/v2" - - "github.com/kubewharf/kubeadmiral/pkg/controllers/util/annotation" -) - -func setAnnotation(object metav1.Object, key string, value interface{}, hasChanged *bool) { - jsonBuf, err := json.Marshal(value) - if err != nil { - klog.Errorf("Cannot marshal JSON: %v", err) - } - - localHasChanged, err := annotation.AddAnnotation(object, key, string(jsonBuf)) - if err != nil { - klog.Errorf("Cannot add %q annotation: %v", key, err) - } - - *hasChanged = *hasChanged || localHasChanged -} diff --git a/pkg/controllers/util/store.go b/pkg/controllers/util/store.go index df613e46..7cd149a1 100644 --- a/pkg/controllers/util/store.go +++ b/pkg/controllers/util/store.go @@ -1,3 +1,4 @@ +//go:build exclude /* Copyright 2023 The KubeAdmiral Authors. diff --git a/pkg/controllers/util/unstructured/unstructured.go b/pkg/controllers/util/unstructured/unstructured.go index 3c2f3f03..f1ab977f 100644 --- a/pkg/controllers/util/unstructured/unstructured.go +++ b/pkg/controllers/util/unstructured/unstructured.go @@ -1,3 +1,4 @@ +//go:build exclude /* Copyright 2023 The KubeAdmiral Authors. diff --git a/pkg/controllers/util/eventsink/eventsink.go b/pkg/util/eventsink/eventsink.go similarity index 100% rename from pkg/controllers/util/eventsink/eventsink.go rename to pkg/util/eventsink/eventsink.go diff --git a/pkg/controllers/util/eventsink/eventsink_test.go b/pkg/util/eventsink/eventsink_test.go similarity index 100% rename from pkg/controllers/util/eventsink/eventsink_test.go rename to pkg/util/eventsink/eventsink_test.go diff --git a/pkg/controllers/util/finalizers/finalizers.go b/pkg/util/finalizers/finalizers.go similarity index 100% rename from pkg/controllers/util/finalizers/finalizers.go rename to pkg/util/finalizers/finalizers.go diff --git a/pkg/controllers/util/finalizers/finalizers_test.go b/pkg/util/finalizers/finalizers_test.go similarity index 100% rename from pkg/controllers/util/finalizers/finalizers_test.go rename to pkg/util/finalizers/finalizers_test.go From 3005994a50307366fd321643f97e48ee36b4fe1b Mon Sep 17 00:00:00 2001 From: "hawjia.lim" Date: Tue, 18 Jul 2023 18:04:05 +0800 Subject: [PATCH 063/173] revert annotation files --- pkg/controllers/federate/util.go | 10 +- pkg/controllers/sync/constant.go | 98 ------------------- pkg/controllers/util/adoptedannotation.go | 1 - .../util/conflictresolutionannotation.go | 1 - pkg/controllers/util/orphaningannotation.go | 1 - 5 files changed, 5 insertions(+), 106 deletions(-) delete mode 100644 pkg/controllers/sync/constant.go diff --git a/pkg/controllers/federate/util.go b/pkg/controllers/federate/util.go index 0076e2ce..e682c151 100644 --- a/pkg/controllers/federate/util.go +++ b/pkg/controllers/federate/util.go @@ -32,7 +32,7 @@ import ( fedcorev1a1 "github.com/kubewharf/kubeadmiral/pkg/apis/core/v1alpha1" "github.com/kubewharf/kubeadmiral/pkg/controllers/common" - "github.com/kubewharf/kubeadmiral/pkg/controllers/sync" + "github.com/kubewharf/kubeadmiral/pkg/controllers/util" "github.com/kubewharf/kubeadmiral/pkg/controllers/nsautoprop" "github.com/kubewharf/kubeadmiral/pkg/controllers/override" @@ -245,8 +245,8 @@ var ( scheduler.SchedulingModeAnnotation, scheduler.StickyClusterAnnotation, nsautoprop.NoAutoPropagationAnnotation, - sync.OrphanManagedResourcesAnnotation, - sync.ConflictResolutionAnnotation, + util.OrphanManagedResourcesAnnotation, + util.ConflictResolutionAnnotation, scheduler.TolerationsAnnotations, scheduler.PlacementsAnnotations, scheduler.ClusterSelectorAnnotations, @@ -261,8 +261,8 @@ var ( // TODO: Do we need to specify the internal annotations here? // List of annotations that should be ignored on the source object ignoredAnnotationSet = sets.New( - sync.ConflictResolutionInternalAnnotation, - sync.OrphanManagedResourcesInternalAnnotation, + util.ConflictResolutionInternalAnnotation, + util.OrphanManagedResourcesInternalAnnotation, common.EnableFollowerSchedulingAnnotation, ) diff --git a/pkg/controllers/sync/constant.go b/pkg/controllers/sync/constant.go deleted file mode 100644 index 6dfd1765..00000000 --- a/pkg/controllers/sync/constant.go +++ /dev/null @@ -1,98 +0,0 @@ -/* -Copyright 2019 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. - -This file may have been modified by The KubeAdmiral Authors -("KubeAdmiral Modifications"). All KubeAdmiral Modifications -are Copyright 2023 The KubeAdmiral Authors. -*/ - -package sync - -import ( - "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" - - "github.com/kubewharf/kubeadmiral/pkg/controllers/common" -) - -type ConflictResolution string - -type OrphanManagedResourcesBehavior string - -const ( - ConflictResolutionAnnotation = common.DefaultPrefix + "conflict-resolution" - ConflictResolutionInternalAnnotation = common.InternalPrefix + "conflict-resolution" - AdoptedAnnotation = common.DefaultPrefix + "adopted" - // If this annotation is present on a federated resource, it controls the - // manner in which resources in the member clusters are orphaned when the - // federated resource is deleted. - // If the annotation is not present (the default), resources in member - // clusters will be deleted before the federated resource is deleted. - OrphanManagedResourcesAnnotation = common.DefaultPrefix + "orphan" - OrphanManagedResourcesInternalAnnotation = common.InternalPrefix + "orphan" - - // Conflict resolution for preexisting resources - ConflictResolutionAdopt ConflictResolution = "adopt" - - // Orphan all managed resources - OrphanManagedResourcesAll OrphanManagedResourcesBehavior = "all" - // Orphan only the adopted resources - OrphanManagedResourcesAdopted OrphanManagedResourcesBehavior = "adopted" - // Orphaning disabled, delete managed resources - OrphanManagedResourcesNone OrphanManagedResourcesBehavior = "" -) - -func ShouldAdoptPreexistingResources(obj *unstructured.Unstructured) bool { - annotations := obj.GetAnnotations() - - value, exists := annotations[ConflictResolutionInternalAnnotation] - if !exists { - value = annotations[ConflictResolutionAnnotation] - } - - return value == string(ConflictResolutionAdopt) -} - -func HasAdoptedAnnotation(obj *unstructured.Unstructured) bool { - annotations := obj.GetAnnotations() - if annotations == nil { - return false - } - return annotations[AdoptedAnnotation] == common.AnnotationValueTrue -} - -func RemoveAdoptedAnnotation(obj *unstructured.Unstructured) { - annotations := obj.GetAnnotations() - if annotations == nil || annotations[AdoptedAnnotation] != common.AnnotationValueTrue { - return - } - delete(annotations, AdoptedAnnotation) - obj.SetAnnotations(annotations) -} - -func GetOrphaningBehavior(obj *unstructured.Unstructured) OrphanManagedResourcesBehavior { - annotations := obj.GetAnnotations() - - value, exists := annotations[OrphanManagedResourcesInternalAnnotation] - if !exists { - value = annotations[OrphanManagedResourcesAnnotation] - } - - switch value { - case string(OrphanManagedResourcesAll), string(OrphanManagedResourcesAdopted): - return (OrphanManagedResourcesBehavior)(value) - default: - return OrphanManagedResourcesNone - } -} diff --git a/pkg/controllers/util/adoptedannotation.go b/pkg/controllers/util/adoptedannotation.go index aa9855b1..63060b74 100644 --- a/pkg/controllers/util/adoptedannotation.go +++ b/pkg/controllers/util/adoptedannotation.go @@ -1,4 +1,3 @@ -//go:build exclude /* Copyright 2023 The KubeAdmiral Authors. diff --git a/pkg/controllers/util/conflictresolutionannotation.go b/pkg/controllers/util/conflictresolutionannotation.go index 522f4f16..e726ac5e 100644 --- a/pkg/controllers/util/conflictresolutionannotation.go +++ b/pkg/controllers/util/conflictresolutionannotation.go @@ -1,4 +1,3 @@ -//go:build exclude /* Copyright 2023 The KubeAdmiral Authors. diff --git a/pkg/controllers/util/orphaningannotation.go b/pkg/controllers/util/orphaningannotation.go index 6c915506..974aa5fd 100644 --- a/pkg/controllers/util/orphaningannotation.go +++ b/pkg/controllers/util/orphaningannotation.go @@ -1,4 +1,3 @@ -//go:build exclude /* Copyright 2019 The Kubernetes Authors. From d9107de36a7259b149d24208f05338e0c368f751 Mon Sep 17 00:00:00 2001 From: "hawjia.lim" Date: Tue, 18 Jul 2023 18:18:17 +0800 Subject: [PATCH 064/173] fix lint errors --- pkg/controllers/federate/controller.go | 6 +++--- pkg/controllers/federate/util.go | 3 +-- 2 files changed, 4 insertions(+), 5 deletions(-) diff --git a/pkg/controllers/federate/controller.go b/pkg/controllers/federate/controller.go index 602b4593..a5ee19b2 100644 --- a/pkg/controllers/federate/controller.go +++ b/pkg/controllers/federate/controller.go @@ -39,11 +39,11 @@ import ( fedclient "github.com/kubewharf/kubeadmiral/pkg/client/clientset/versioned" fedcorev1a1informers "github.com/kubewharf/kubeadmiral/pkg/client/informers/externalversions/core/v1alpha1" "github.com/kubewharf/kubeadmiral/pkg/controllers/common" - "github.com/kubewharf/kubeadmiral/pkg/util/eventsink" - finalizersutil "github.com/kubewharf/kubeadmiral/pkg/util/finalizers" "github.com/kubewharf/kubeadmiral/pkg/stats" "github.com/kubewharf/kubeadmiral/pkg/util/eventhandlers" + "github.com/kubewharf/kubeadmiral/pkg/util/eventsink" "github.com/kubewharf/kubeadmiral/pkg/util/fedobjectadapters" + finalizersutil "github.com/kubewharf/kubeadmiral/pkg/util/finalizers" "github.com/kubewharf/kubeadmiral/pkg/util/informermanager" "github.com/kubewharf/kubeadmiral/pkg/util/logging" "github.com/kubewharf/kubeadmiral/pkg/util/meta" @@ -496,7 +496,7 @@ func (c *FederateController) handleExistingFederatedObject( logger := klog.FromContext(ctx) logger.V(3).Info("Checking if federated object needs update") - needsUpdate, err := updateFederatedObjectForSourceObject(ftc,sourceObject, fedObject) + needsUpdate, err := updateFederatedObjectForSourceObject(ftc, sourceObject, fedObject) if err != nil { return false, fmt.Errorf("failed to check if federated object needs update: %w", err) } diff --git a/pkg/controllers/federate/util.go b/pkg/controllers/federate/util.go index e682c151..7d960ae7 100644 --- a/pkg/controllers/federate/util.go +++ b/pkg/controllers/federate/util.go @@ -32,11 +32,10 @@ import ( fedcorev1a1 "github.com/kubewharf/kubeadmiral/pkg/apis/core/v1alpha1" "github.com/kubewharf/kubeadmiral/pkg/controllers/common" - "github.com/kubewharf/kubeadmiral/pkg/controllers/util" - "github.com/kubewharf/kubeadmiral/pkg/controllers/nsautoprop" "github.com/kubewharf/kubeadmiral/pkg/controllers/override" "github.com/kubewharf/kubeadmiral/pkg/controllers/scheduler" + "github.com/kubewharf/kubeadmiral/pkg/controllers/util" annotationutil "github.com/kubewharf/kubeadmiral/pkg/util/annotation" "github.com/kubewharf/kubeadmiral/pkg/util/naming" "github.com/kubewharf/kubeadmiral/pkg/util/pendingcontrollers" From 2b333bf24e53ec1df761288d1e40b10bd763aa81 Mon Sep 17 00:00:00 2001 From: "hawjia.lim" Date: Tue, 18 Jul 2023 18:31:10 +0800 Subject: [PATCH 065/173] fix lint errors --- cmd/controller-manager/app/util.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/cmd/controller-manager/app/util.go b/cmd/controller-manager/app/util.go index 6ad431d0..87bab5ef 100644 --- a/cmd/controller-manager/app/util.go +++ b/cmd/controller-manager/app/util.go @@ -29,14 +29,14 @@ import ( "k8s.io/client-go/kubernetes" "k8s.io/client-go/tools/clientcmd" - fedcorev1a1 "github.com/kubewharf/kubeadmiral/pkg/apis/core/v1alpha1" "github.com/kubewharf/kubeadmiral/cmd/controller-manager/app/options" + fedcorev1a1 "github.com/kubewharf/kubeadmiral/pkg/apis/core/v1alpha1" fedclient "github.com/kubewharf/kubeadmiral/pkg/client/clientset/versioned" fedinformers "github.com/kubewharf/kubeadmiral/pkg/client/informers/externalversions" "github.com/kubewharf/kubeadmiral/pkg/controllers/common" controllercontext "github.com/kubewharf/kubeadmiral/pkg/controllers/context" - clusterutil "github.com/kubewharf/kubeadmiral/pkg/util/cluster" "github.com/kubewharf/kubeadmiral/pkg/stats" + clusterutil "github.com/kubewharf/kubeadmiral/pkg/util/cluster" "github.com/kubewharf/kubeadmiral/pkg/util/informermanager" ) @@ -154,7 +154,7 @@ func createControllerContext(opts *options.Options) (*controllercontext.Context, DynamicInformerFactory: dynamicInformerFactory, FedInformerFactory: fedInformerFactory, - InformerManager: informerManager, + InformerManager: informerManager, FederatedInformerManager: federatedInformerManager, }, nil } From 2f5350c1f1310c0fe624e2fd391b793f750dc2ce Mon Sep 17 00:00:00 2001 From: "hawjia.lim" Date: Wed, 19 Jul 2023 09:10:36 +0800 Subject: [PATCH 066/173] filter kube-admiral-system namespace objects --- pkg/controllers/federate/controller.go | 39 ++++++++++++++++++-------- 1 file changed, 27 insertions(+), 12 deletions(-) diff --git a/pkg/controllers/federate/controller.go b/pkg/controllers/federate/controller.go index a5ee19b2..3cb272b1 100644 --- a/pkg/controllers/federate/controller.go +++ b/pkg/controllers/federate/controller.go @@ -74,6 +74,8 @@ type FederateController struct { fedObjectInformer fedcorev1a1informers.FederatedObjectInformer clusterFedObjectInformer fedcorev1a1informers.ClusterFederatedObjectInformer + fedSystemNamespace string + fedClient fedclient.Interface dynamicClient dynamic.Interface @@ -103,8 +105,11 @@ func NewFederateController( informerManager: informerManager, fedObjectInformer: fedObjectInformer, clusterFedObjectInformer: clusterFedObjectInformer, + fedSystemNamespace: fedSystemNamespace, fedClient: fedClient, dynamicClient: dynamicClient, + worker: nil, + eventRecorder: nil, metrics: metrics, logger: klog.Background().WithValues("controller", FederateControllerName), } @@ -122,21 +127,31 @@ func NewFederateController( if err := informerManager.AddEventHandlerGenerator(&informermanager.EventHandlerGenerator{ Predicate: informermanager.RegisterOncePredicate, Generator: func(ftc *fedcorev1a1.FederatedTypeConfig) cache.ResourceEventHandler { - return eventhandlers.NewTriggerOnAllChanges(func(obj runtime.Object) { - uns := obj.(*unstructured.Unstructured) - c.worker.Enqueue(workerKey{ - name: uns.GetName(), - namespace: uns.GetNamespace(), - gvk: ftc.GetSourceTypeGVK(), - }) - }) + return cache.FilteringResourceEventHandler{ + FilterFunc: func(obj interface{}) bool { + uns := obj.(*unstructured.Unstructured) + return uns.GetNamespace() != fedSystemNamespace + }, + Handler: eventhandlers.NewTriggerOnAllChanges(func(obj runtime.Object) { + uns := obj.(*unstructured.Unstructured) + c.worker.Enqueue(workerKey{ + name: uns.GetName(), + namespace: uns.GetNamespace(), + gvk: ftc.GetSourceTypeGVK(), + }) + }), + } }, }); err != nil { return nil, err } - if _, err := fedObjectInformer.Informer().AddEventHandler( - eventhandlers.NewTriggerOnAllChanges(func(o runtime.Object) { + if _, err := fedObjectInformer.Informer().AddEventHandler(cache.FilteringResourceEventHandler{ + FilterFunc: func(obj interface{}) bool { + fedObj := obj.(*fedcorev1a1.FederatedObject) + return fedObj.Namespace != fedSystemNamespace + }, + Handler: eventhandlers.NewTriggerOnAllChanges(func(o runtime.Object) { fedObj := o.(*fedcorev1a1.FederatedObject) logger := c.logger.WithValues("federated-object", common.NewQualifiedName(fedObj)) @@ -154,7 +169,7 @@ func NewFederateController( gvk: gvk, }) }), - ); err != nil { + }); err != nil { return nil, err } @@ -185,7 +200,7 @@ func NewFederateController( } func (c *FederateController) Run(ctx context.Context) { - ctx, logger := logging.InjectLoggerValues(ctx, "controller", FederateControllerName) + ctx, logger := logging.InjectLogger(ctx, c.logger) logger.Info("Starting controller") defer logger.Info("Stopping controller") From 009a469f6f43fb989cd659cfe71a67cb9a16025e Mon Sep 17 00:00:00 2001 From: "hawjia.lim" Date: Wed, 19 Jul 2023 09:11:00 +0800 Subject: [PATCH 067/173] add more tests for naming --- pkg/util/naming/naming_test.go | 32 ++++++++++++++++++++++++++++++++ 1 file changed, 32 insertions(+) diff --git a/pkg/util/naming/naming_test.go b/pkg/util/naming/naming_test.go index ff6cd244..13b0060e 100644 --- a/pkg/util/naming/naming_test.go +++ b/pkg/util/naming/naming_test.go @@ -49,6 +49,14 @@ func TestGenerateFederatedObjectName(t *testing.T) { }, want: "system.foo-roles.rbac.authorization.k8s.io-2728495308", }, + { + name: "generate federated object name with consecutive :", + args: args{ + objectName: "system::foo", + ftcName: "roles.rbac.authorization.k8s.io", + }, + want: "system.foo-roles.rbac.authorization.k8s.io-2999937238", + }, { name: "generate federated object name with $", args: args{ @@ -57,6 +65,14 @@ func TestGenerateFederatedObjectName(t *testing.T) { }, want: "system.foo-roles.rbac.authorization.k8s.io-4258037882", }, + { + name: "generate federated object name with consecutive $", + args: args{ + objectName: "system$foo", + ftcName: "roles.rbac.authorization.k8s.io", + }, + want: "system.foo-roles.rbac.authorization.k8s.io-4258037882", + }, { name: "generate federated object name with %", args: args{ @@ -65,6 +81,14 @@ func TestGenerateFederatedObjectName(t *testing.T) { }, want: "system.foo-roles.rbac.authorization.k8s.io-1244789457", }, + { + name: "generate federated object name with consecutive %", + args: args{ + objectName: "system%%%foo", + ftcName: "roles.rbac.authorization.k8s.io", + }, + want: "system.foo-roles.rbac.authorization.k8s.io-4069727015", + }, { name: "generate federated object name with #", args: args{ @@ -73,6 +97,14 @@ func TestGenerateFederatedObjectName(t *testing.T) { }, want: "system.foo-roles.rbac.authorization.k8s.io-1128546011", }, + { + name: "generate federated object name with consecutive #", + args: args{ + objectName: "system####foo", + ftcName: "roles.rbac.authorization.k8s.io", + }, + want: "system.foo-roles.rbac.authorization.k8s.io-3227827662", + }, { name: "generate federated object name with upper case letter", args: args{ From 1153d666d0e1b9246e20828d33c00e320fbba325 Mon Sep 17 00:00:00 2001 From: "hawjia.lim" Date: Wed, 19 Jul 2023 09:12:08 +0800 Subject: [PATCH 068/173] restore genericclient --- pkg/client/generic/genericclient.go | 273 ++++++++++++++++++++++++++++ 1 file changed, 273 insertions(+) create mode 100644 pkg/client/generic/genericclient.go diff --git a/pkg/client/generic/genericclient.go b/pkg/client/generic/genericclient.go new file mode 100644 index 00000000..f22ba80f --- /dev/null +++ b/pkg/client/generic/genericclient.go @@ -0,0 +1,273 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +This file may have been modified by The KubeAdmiral Authors +("KubeAdmiral Modifications"). All KubeAdmiral Modifications +are Copyright 2023 The KubeAdmiral Authors. +*/ + +package generic + +import ( + "context" + "fmt" + "strings" + + appsv1 "k8s.io/api/apps/v1" + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + "k8s.io/client-go/rest" + "sigs.k8s.io/controller-runtime/pkg/client" + + "github.com/kubewharf/kubeadmiral/pkg/client/generic/scheme" + "github.com/kubewharf/kubeadmiral/pkg/controllers/common" + "github.com/kubewharf/kubeadmiral/pkg/controllers/util/history" +) + +type Client interface { + Create(ctx context.Context, obj client.Object) error + Get(ctx context.Context, obj client.Object, namespace, name string) error + Update(ctx context.Context, obj client.Object) error + Delete(ctx context.Context, obj client.Object, namespace, name string, opts ...client.DeleteOption) error + List(ctx context.Context, obj client.ObjectList, namespace string) error + UpdateStatus(ctx context.Context, obj client.Object) error + Patch(ctx context.Context, obj client.Object, patch client.Patch) error + Rollback(ctx context.Context, obj client.Object, toRevision int64) error + DeleteHistory(ctx context.Context, obj client.Object) error + + ListWithOptions(ctx context.Context, obj client.ObjectList, opts ...client.ListOption) error +} + +type genericClient struct { + client client.Client +} + +func New(config *rest.Config) (Client, error) { + client, err := client.New(config, client.Options{Scheme: scheme.Scheme}) + if err != nil { + return nil, err + } + return &genericClient{client}, err +} + +func NewForConfigOrDie(config *rest.Config) Client { + client, err := New(config) + if err != nil { + panic(err) + } + return client +} + +func NewForConfigOrDieWithUserAgent(config *rest.Config, userAgent string) Client { + configCopy := rest.CopyConfig(config) + rest.AddUserAgent(configCopy, userAgent) + return NewForConfigOrDie(configCopy) +} + +func (c *genericClient) Create(ctx context.Context, obj client.Object) error { + return c.client.Create(ctx, obj) +} + +func (c *genericClient) Get(ctx context.Context, obj client.Object, namespace, name string) error { + return c.client.Get(ctx, client.ObjectKey{Namespace: namespace, Name: name}, obj) +} + +func (c *genericClient) Update(ctx context.Context, obj client.Object) error { + return c.client.Update(ctx, obj) +} + +func (c *genericClient) Delete(ctx context.Context, obj client.Object, namespace, name string, opts ...client.DeleteOption) error { + accessor, err := meta.Accessor(obj) + if err != nil { + return err + } + if accessor == nil { + return fmt.Errorf("nil accessor for generic client") + } + accessor.SetNamespace(namespace) + accessor.SetName(name) + return c.client.Delete(ctx, obj, opts...) +} + +func (c *genericClient) List(ctx context.Context, obj client.ObjectList, namespace string) error { + return c.client.List(ctx, obj, client.InNamespace(namespace)) +} + +func (c *genericClient) ListWithOptions(ctx context.Context, obj client.ObjectList, opts ...client.ListOption) error { + return c.client.List(ctx, obj, opts...) +} + +func (c *genericClient) UpdateStatus(ctx context.Context, obj client.Object) error { + return c.client.Status().Update(ctx, obj) +} + +func (c *genericClient) Patch(ctx context.Context, obj client.Object, patch client.Patch) error { + return c.client.Patch(ctx, obj, patch) +} + +// Rollback rollbacks federated Object such as FederatedDeployment +func (c *genericClient) Rollback(ctx context.Context, obj client.Object, toRevision int64) error { + if toRevision < 0 { + return fmt.Errorf("unable to find specified revision %v in history", toRevision) + } + if toRevision == 0 { + // try to get last revision from annotations, fallback to list all revisions on error + if err := c.rollbackToLastRevision(ctx, obj); err == nil { + return nil + } + } + + history, err := c.controlledHistory(ctx, obj) + if err != nil { + return fmt.Errorf("failed to list history: %s", err) + } + if toRevision == 0 && len(history) <= 1 { + return fmt.Errorf("no last revision to roll back to") + } + + toHistory := findHistory(toRevision, history) + if toHistory == nil { + return fmt.Errorf("unable to find specified revision %v in history", toHistory) + } + + // Restore revision + if err := c.Patch(ctx, obj, client.RawPatch(types.JSONPatchType, toHistory.Data.Raw)); err != nil { + return fmt.Errorf("failed restoring revision %d: %v", toRevision, err) + } + return nil +} + +func (c *genericClient) rollbackToLastRevision(ctx context.Context, obj client.Object) error { + accessor, err := meta.Accessor(obj) + if err != nil { + return err + } + lastRevisionNameWithHash := accessor.GetAnnotations()[common.LastRevisionAnnotation] + if len(lastRevisionNameWithHash) == 0 { + return fmt.Errorf("annotation: %s not found", common.LastRevisionAnnotation) + } + + lastRevisionName, err := c.checkLastRevisionNameWithHash(lastRevisionNameWithHash, obj) + if err != nil { + return fmt.Errorf("failed to check last revision name, err: %v", err) + } + + latestRevision := &appsv1.ControllerRevision{} + if err := c.Get(ctx, latestRevision, accessor.GetNamespace(), lastRevisionName); err != nil { + return err + } + + // restore latest revision + if err := c.Patch(ctx, obj, client.RawPatch(types.JSONPatchType, latestRevision.Data.Raw)); err != nil { + return fmt.Errorf("failed restoring latest revision: %v", err) + } + return nil +} + +func (c *genericClient) checkLastRevisionNameWithHash(lastRevisionNameWithHash string, obj client.Object) (string, error) { + parts := strings.Split(lastRevisionNameWithHash, "|") + if len(parts) != 2 { + return "", fmt.Errorf("invalid lastRevisionNameWithHash: %s", lastRevisionNameWithHash) + } + lastRevisionName, hash := parts[0], parts[1] + + utdObj, err := runtime.DefaultUnstructuredConverter.ToUnstructured(obj) + if err != nil { + return "", err + } + + template, ok, err := unstructured.NestedMap(utdObj, "spec", "template", "spec", "template") + if err != nil { + return "", err + } + if !ok { + return "", fmt.Errorf("spec.template.spec.template is not found, fedResource: %+v", obj) + } + + if templateHash := history.HashObject(template); templateHash != hash { + return "", fmt.Errorf("pod template hash: %s, last revision name suffix: %s, they should be equal", templateHash, hash) + } + return lastRevisionName, nil +} + +// controlledHistories returns all ControllerRevisions in namespace that selected by selector and owned by accessor +func (c *genericClient) controlledHistory(ctx context.Context, obj client.Object) ([]*appsv1.ControllerRevision, error) { + accessor, err := meta.Accessor(obj) + if err != nil { + return nil, fmt.Errorf("failed to create accessor for kind %v: %s", obj.GetObjectKind(), err.Error()) + } + selector := labels.SelectorFromSet(labels.Set{ + "uid": string(accessor.GetUID()), + }) + + opt1 := client.InNamespace(accessor.GetNamespace()) + opt2 := client.MatchingLabelsSelector{Selector: selector} + historyList := &appsv1.ControllerRevisionList{} + if err := c.ListWithOptions(ctx, historyList, opt1, opt2); err != nil { + return nil, err + } + + var result []*appsv1.ControllerRevision + for i := range historyList.Items { + history := historyList.Items[i] + // Only add history that belongs to the API object + if metav1.IsControlledBy(&history, accessor) { + result = append(result, &history) + } + } + return result, nil +} + +func (c *genericClient) DeleteHistory(ctx context.Context, obj client.Object) error { + historyList, err := c.controlledHistory(ctx, obj) + if err != nil { + return err + } + for _, history := range historyList { + if err := c.Delete(ctx, history, history.Namespace, history.Name); err != nil { + return err + } + } + return nil +} + +// findHistory returns a controllerrevision of a specific revision from the given controllerrevisions. +// It returns nil if no such controllerrevision exists. +// If toRevision is 0, the last previously used history is returned. +func findHistory(toRevision int64, allHistory []*appsv1.ControllerRevision) *appsv1.ControllerRevision { + if toRevision == 0 && len(allHistory) <= 1 { + return nil + } + + // Find the history to rollback to + var toHistory *appsv1.ControllerRevision + if toRevision == 0 { + // If toRevision == 0, find the latest revision (2nd max) + history.SortControllerRevisions(allHistory) + toHistory = allHistory[len(allHistory)-2] + } else { + for _, h := range allHistory { + if h.Revision == toRevision { + // If toRevision != 0, find the history with matching revision + return h + } + } + } + return toHistory +} From 32917e29f9905b683d6320a2adf2d3a8334865d6 Mon Sep 17 00:00:00 2001 From: "hawjia.lim" Date: Wed, 19 Jul 2023 13:26:30 +0800 Subject: [PATCH 069/173] set transformed on name squash --- pkg/util/naming/naming.go | 2 ++ 1 file changed, 2 insertions(+) diff --git a/pkg/util/naming/naming.go b/pkg/util/naming/naming.go index 6f32adc5..a8ecbb0f 100644 --- a/pkg/util/naming/naming.go +++ b/pkg/util/naming/naming.go @@ -90,6 +90,7 @@ func transformObjectName(objectName string) (string, bool) { } transformed = true + if ch >= 'A' && ch <= 'Z' { // transform uppercase letters into lowercase transformedName[i] = caseDiff + ch @@ -103,6 +104,7 @@ func transformObjectName(objectName string) (string, bool) { santizedName := []byte{} for i, ch := range transformedName { if i != 0 && transformedName[i-1] == '.' && transformedName[i] == '.' { + transformed = true continue } santizedName = append(santizedName, ch) From 7bc07300a27cac3623467f98263ab22b29ae742f Mon Sep 17 00:00:00 2001 From: "hawjia.lim" Date: Wed, 19 Jul 2023 14:01:49 +0800 Subject: [PATCH 070/173] fix typo --- pkg/util/naming/naming.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/pkg/util/naming/naming.go b/pkg/util/naming/naming.go index a8ecbb0f..864b429f 100644 --- a/pkg/util/naming/naming.go +++ b/pkg/util/naming/naming.go @@ -101,16 +101,16 @@ func transformObjectName(objectName string) (string, bool) { } // squash any sequence of more than one '.' - santizedName := []byte{} + sanitizedName := []byte{} for i, ch := range transformedName { if i != 0 && transformedName[i-1] == '.' && transformedName[i] == '.' { transformed = true continue } - santizedName = append(santizedName, ch) + sanitizedName = append(sanitizedName, ch) } - return string(santizedName), transformed + return string(sanitizedName), transformed } func fnvHashFunc(key string) uint32 { From da0f2865748c9c5e80ed3ac132f88c0b6483717b Mon Sep 17 00:00:00 2001 From: "hawjia.lim" Date: Wed, 19 Jul 2023 18:42:57 +0800 Subject: [PATCH 071/173] fix issues --- cmd/controller-manager/app/core.go | 2 + pkg/controllers/federate/controller.go | 62 ++++++-- pkg/controllers/federate/util.go | 31 ++-- pkg/controllers/util/clusterutil.go | 206 ------------------------- pkg/util/meta/federatedobject.go | 38 ----- pkg/util/naming/naming_test.go | 8 + 6 files changed, 70 insertions(+), 277 deletions(-) delete mode 100644 pkg/controllers/util/clusterutil.go delete mode 100644 pkg/util/meta/federatedobject.go diff --git a/cmd/controller-manager/app/core.go b/cmd/controller-manager/app/core.go index c4908fd8..c654dfa6 100644 --- a/cmd/controller-manager/app/core.go +++ b/cmd/controller-manager/app/core.go @@ -23,6 +23,7 @@ import ( "github.com/kubewharf/kubeadmiral/pkg/controllermanager" controllercontext "github.com/kubewharf/kubeadmiral/pkg/controllers/context" "github.com/kubewharf/kubeadmiral/pkg/controllers/federate" + "k8s.io/klog/v2" ) func startFederateController(ctx context.Context, controllerCtx *controllercontext.Context) (controllermanager.Controller, error) { @@ -34,6 +35,7 @@ func startFederateController(ctx context.Context, controllerCtx *controllerconte controllerCtx.FedInformerFactory.Core().V1alpha1().ClusterFederatedObjects(), controllerCtx.InformerManager, controllerCtx.Metrics, + klog.Background(), controllerCtx.WorkerCount, controllerCtx.FedSystemNamespace, ) diff --git a/pkg/controllers/federate/controller.go b/pkg/controllers/federate/controller.go index 3cb272b1..68de7781 100644 --- a/pkg/controllers/federate/controller.go +++ b/pkg/controllers/federate/controller.go @@ -29,8 +29,7 @@ import ( "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/util/sets" "k8s.io/client-go/dynamic" - dynamicclient "k8s.io/client-go/dynamic" - kubeclient "k8s.io/client-go/kubernetes" + "k8s.io/client-go/kubernetes" "k8s.io/client-go/tools/cache" "k8s.io/client-go/tools/record" "k8s.io/klog/v2" @@ -46,7 +45,6 @@ import ( finalizersutil "github.com/kubewharf/kubeadmiral/pkg/util/finalizers" "github.com/kubewharf/kubeadmiral/pkg/util/informermanager" "github.com/kubewharf/kubeadmiral/pkg/util/logging" - "github.com/kubewharf/kubeadmiral/pkg/util/meta" "github.com/kubewharf/kubeadmiral/pkg/util/naming" "github.com/kubewharf/kubeadmiral/pkg/util/pendingcontrollers" "github.com/kubewharf/kubeadmiral/pkg/util/worker" @@ -68,14 +66,12 @@ const ( RetainReplicasAnnotation = common.DefaultPrefix + "retain-replicas" ) -// FederateController federates objects of source type to objects of federated type +// FederateController federates objects of source type to FederatedObjects or ClusterFederatedObjects. type FederateController struct { informerManager informermanager.InformerManager fedObjectInformer fedcorev1a1informers.FederatedObjectInformer clusterFedObjectInformer fedcorev1a1informers.ClusterFederatedObjectInformer - fedSystemNamespace string - fedClient fedclient.Interface dynamicClient dynamic.Interface @@ -91,13 +87,14 @@ func (c *FederateController) IsControllerReady() bool { } func NewFederateController( - kubeClient kubeclient.Interface, - dynamicClient dynamicclient.Interface, + kubeClient kubernetes.Interface, + dynamicClient dynamic.Interface, fedClient fedclient.Interface, fedObjectInformer fedcorev1a1informers.FederatedObjectInformer, clusterFedObjectInformer fedcorev1a1informers.ClusterFederatedObjectInformer, informerManager informermanager.InformerManager, metrics stats.Metrics, + logger klog.Logger, workerCount int, fedSystemNamespace string, ) (*FederateController, error) { @@ -105,13 +102,12 @@ func NewFederateController( informerManager: informerManager, fedObjectInformer: fedObjectInformer, clusterFedObjectInformer: clusterFedObjectInformer, - fedSystemNamespace: fedSystemNamespace, fedClient: fedClient, dynamicClient: dynamicClient, worker: nil, eventRecorder: nil, metrics: metrics, - logger: klog.Background().WithValues("controller", FederateControllerName), + logger: logger.WithValues("controller", FederateControllerName), } c.eventRecorder = eventsink.NewDefederatingRecorderMux(kubeClient, FederateControllerName, 6) @@ -129,6 +125,9 @@ func NewFederateController( Generator: func(ftc *fedcorev1a1.FederatedTypeConfig) cache.ResourceEventHandler { return cache.FilteringResourceEventHandler{ FilterFunc: func(obj interface{}) bool { + if deleted, ok := obj.(*cache.DeletedFinalStateUnknown); ok { + obj = deleted.Obj + } uns := obj.(*unstructured.Unstructured) return uns.GetNamespace() != fedSystemNamespace }, @@ -148,6 +147,9 @@ func NewFederateController( if _, err := fedObjectInformer.Informer().AddEventHandler(cache.FilteringResourceEventHandler{ FilterFunc: func(obj interface{}) bool { + if deleted, ok := obj.(*cache.DeletedFinalStateUnknown); ok { + obj = deleted.Obj + } fedObj := obj.(*fedcorev1a1.FederatedObject) return fedObj.Namespace != fedSystemNamespace }, @@ -155,7 +157,7 @@ func NewFederateController( fedObj := o.(*fedcorev1a1.FederatedObject) logger := c.logger.WithValues("federated-object", common.NewQualifiedName(fedObj)) - srcMeta, err := meta.GetSourceObjectMeta(fedObj) + srcMeta, err := fedObj.Spec.GetTemplateAsUnstructured() if err != nil { logger.Error(err, "Failed to get source object's metadata from FederatedObject") return @@ -178,7 +180,7 @@ func NewFederateController( fedObj := o.(*fedcorev1a1.ClusterFederatedObject) logger := c.logger.WithValues("cluster-federated-object", common.NewQualifiedName(fedObj)) - srcMeta, err := meta.GetSourceObjectMeta(fedObj) + srcMeta, err := fedObj.Spec.GetTemplateAsUnstructured() if err != nil { logger.Error(err, "Failed to get source object's metadata from ClusterFederatedObject") return @@ -220,7 +222,7 @@ func (c *FederateController) HasSynced() bool { func (c *FederateController) reconcile(ctx context.Context, key workerKey) (status worker.Result) { _ = c.metrics.Rate("federate.throughput", 1) ctx, logger := logging.InjectLogger(ctx, c.logger) - ctx, logger = logging.InjectLoggerValues(ctx, "source-object", key.String()) + ctx, logger = logging.InjectLoggerValues(ctx, "source-object", key.ObjectKey()) startTime := time.Now() logger.V(3).Info("Start reconcile") @@ -237,10 +239,9 @@ func (c *FederateController) reconcile(ctx context.Context, key workerKey) (stat logger.Error(nil, "FTC does not exist for GVK") return worker.StatusError } - ctx, logger = logging.InjectLoggerValues(ctx, "ftc", ftc.Name) sourceGVR := ftc.GetSourceTypeGVR() - ctx, logger = logging.InjectLoggerValues(ctx, "gvr", sourceGVR) + ctx, logger = logging.InjectLoggerValues(ctx, "ftc", ftc.Name, "gvr", sourceGVR) sourceObject, err := c.sourceObjectFromStore(key) if err != nil && apierrors.IsNotFound(err) { @@ -272,6 +273,37 @@ func (c *FederateController) reconcile(ctx context.Context, key workerKey) (stat fedObject = fedObject.DeepCopyGenericFederatedObject() } + if fedObject != nil { + // To account for the very small chance of name collision, we verify the owner reference before proceeding. + ownedbySource := false + requiresUpdate := -1 + + for i, ref := range fedObject.GetOwnerReferences() { + if schema.FromAPIVersionAndKind(ref.APIVersion, ref.Kind) == sourceGVK && + sourceObject.GetName() == ref.Name { + ownedbySource = true + + // Allow different UIDs to support adopting forcibly orphaned FederatedObjects. + if ref.UID != sourceObject.GetUID() { + requiresUpdate = i + } + + break + } + } + + if !ownedbySource { + logger.Error(nil, "Federated object not owned by source object, possible name collision detected") + return worker.StatusErrorNoRetry + } + + if requiresUpdate > -1 { + newRefs := fedObject.GetOwnerReferences() + newRefs[requiresUpdate].UID = sourceObject.GetUID() + fedObject.SetOwnerReferences(newRefs) + } + } + if sourceObject.GetDeletionTimestamp() != nil { logger.V(3).Info("Source object terminating") if err := c.handleTerminatingSourceObject(ctx, sourceGVR, sourceObject, fedObject); err != nil { diff --git a/pkg/controllers/federate/util.go b/pkg/controllers/federate/util.go index 7d960ae7..88a3f6a8 100644 --- a/pkg/controllers/federate/util.go +++ b/pkg/controllers/federate/util.go @@ -17,6 +17,7 @@ limitations under the License. package federate import ( + "bytes" "encoding/json" "fmt" "reflect" @@ -47,7 +48,7 @@ type workerKey struct { gvk schema.GroupVersionKind } -func (k workerKey) String() string { +func (k workerKey) ObjectKey() string { return fmt.Sprintf("%s/%s", k.namespace, k.name) } @@ -118,9 +119,7 @@ func newFederatedObjectForSourceObject( if err != nil { return nil, fmt.Errorf("failed to marshal template: %w", err) } - fedObjSpec := &fedcorev1a1.GenericFederatedObjectSpec{} - fedObjSpec.Template.Raw = rawTemplate - fedObjSpec.DeepCopyInto(fedObj.GetSpec()) + fedObj.GetSpec().Template.Raw = rawTemplate // Generate the JSON patch required to convert the source object to the FederatedObject's template and store it as // an annotation in the FederatedObject. @@ -169,20 +168,21 @@ func updateFederatedObjectForSourceObject( federatedAnnotations, templateAnnotations := classifyAnnotations(sourceObject.GetAnnotations()) + // Record the observed label and annotation keys in an annotation on the FederatedObject. + + observedAnnotationKeys := generateObservedKeys(sourceObject.GetAnnotations(), federatedAnnotations) + observedLabelKeys := generateObservedKeys(sourceObject.GetLabels(), federatedLabels) + // Generate the FederatedObject's template and compare it to the template in the FederatedObject, updating the // FederatedObject if necessary. targetTemplate := templateForSourceObject(sourceObject, templateAnnotations, templateLabels) - foundTemplate := &unstructured.Unstructured{} - if err := json.Unmarshal(fedObject.GetSpec().Template.Raw, foundTemplate); err != nil { - return false, fmt.Errorf("failed to unmarshal template from federated object: %w", err) + rawTargetTemplate, err := targetTemplate.MarshalJSON() + if err != nil { + return false, fmt.Errorf("failed to marshal template: %w", err) } - if !reflect.DeepEqual(foundTemplate.Object, targetTemplate.Object) { - rawTargetTemplate, err := json.Marshal(targetTemplate) - if err != nil { - return false, fmt.Errorf("failed to marshal template: %w", err) - } - + + if !bytes.Equal(rawTargetTemplate, fedObject.GetSpec().Template.Raw) { fedObject.GetSpec().Template.Raw = rawTargetTemplate isUpdated = true } @@ -198,11 +198,6 @@ func updateFederatedObjectForSourceObject( }, ) - // Record the observed label and annotation keys in an annotation on the FederatedObject. - - observedAnnotationKeys := generateObservedKeys(sourceObject.GetAnnotations(), federatedAnnotations) - observedLabelKeys := generateObservedKeys(sourceObject.GetLabels(), federatedLabels) - // Generate the JSON patch required to convert the source object to the FederatedObject's template and store it as // an annotation in the FederatedObject. diff --git a/pkg/controllers/util/clusterutil.go b/pkg/controllers/util/clusterutil.go deleted file mode 100644 index 24c6940d..00000000 --- a/pkg/controllers/util/clusterutil.go +++ /dev/null @@ -1,206 +0,0 @@ -//go:build exclude -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. - -This file may have been modified by The KubeAdmiral Authors -("KubeAdmiral Modifications"). All KubeAdmiral Modifications -are Copyright 2023 The KubeAdmiral Authors. -*/ - -package util - -import ( - "context" - "fmt" - - corev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - kubeclient "k8s.io/client-go/kubernetes" - restclient "k8s.io/client-go/rest" - "k8s.io/client-go/tools/clientcmd" - - fedcorev1a1 "github.com/kubewharf/kubeadmiral/pkg/apis/core/v1alpha1" - "github.com/kubewharf/kubeadmiral/pkg/client/generic" -) - -// User account keys -const ( - ClientCertificateKey = "client-certificate-data" - ClientKeyKey = "client-key-data" - CertificateAuthorityKey = "certificate-authority-data" -) - -// Service account keys -const ( - ServiceAccountTokenKey = "service-account-token-data" - ServiceAccountCAKey = "service-account-ca-data" -) - -// BuildClusterConfig returns a restclient.Config that can be used to configure -// a client for the given FederatedCluster or an error. -func BuildClusterConfig( - cluster *fedcorev1a1.FederatedCluster, - fedClient kubeclient.Interface, - restConfig *restclient.Config, - fedSystemNamespace string, -) (*restclient.Config, error) { - return buildClusterConfig( - cluster, - fedClient, - restConfig, - fedSystemNamespace, - cluster.Spec.UseServiceAccountToken, - ) -} - -// BuildRawClusterConfig returns a restclient.Config built using key and certificate -// credentials from the secret referenced in the FederatedCluster. -func BuildRawClusterConfig( - cluster *fedcorev1a1.FederatedCluster, - fedClient kubeclient.Interface, - restConfig *restclient.Config, - fedSystemNamespace string, -) (*restclient.Config, error) { - return buildClusterConfig( - cluster, - fedClient, - restConfig, - fedSystemNamespace, - false, - ) -} - -func buildClusterConfig( - cluster *fedcorev1a1.FederatedCluster, - fedClient kubeclient.Interface, - restConfig *restclient.Config, - fedSystemNamespace string, - useServiceAccountToken bool, -) (*restclient.Config, error) { - apiEndpoint := cluster.Spec.APIEndpoint - if len(apiEndpoint) == 0 { - return nil, fmt.Errorf("api endpoint of cluster %s is empty", cluster.Name) - } - - clusterConfig, err := clientcmd.BuildConfigFromFlags(apiEndpoint, "") - if err != nil { - return nil, err - } - - clusterConfig.QPS = restConfig.QPS - clusterConfig.Burst = restConfig.Burst - clusterConfig.UserAgent = restConfig.UserAgent - - secretName := cluster.Spec.SecretRef.Name - if len(secretName) == 0 { - clusterConfig.CAFile = restConfig.CAFile - clusterConfig.CertFile = restConfig.CertFile - clusterConfig.KeyFile = restConfig.KeyFile - return clusterConfig, nil - } - - secret, err := fedClient.CoreV1().Secrets(fedSystemNamespace).Get(context.TODO(), secretName, metav1.GetOptions{}) - if err != nil { - return nil, err - } - - err = PopulateAuthDetailsFromSecret(clusterConfig, cluster.Spec.Insecure, secret, useServiceAccountToken) - if err != nil { - return nil, fmt.Errorf("cannot build rest config from cluster secret: %w", err) - } - return clusterConfig, nil -} - -func PopulateAuthDetailsFromSecret( - clusterConfig *restclient.Config, - insecure bool, - secret *corev1.Secret, - useServiceAccount bool, -) error { - var exists bool - - if useServiceAccount { - serviceAccountToken, exists := secret.Data[ServiceAccountTokenKey] - if !exists { - return fmt.Errorf("%q data is missing from secret", ServiceAccountTokenKey) - } - clusterConfig.BearerToken = string(serviceAccountToken) - - if insecure { - clusterConfig.Insecure = true - } else { - clusterConfig.CAData, exists = secret.Data[ServiceAccountCAKey] - if !exists { - return fmt.Errorf("%q data is missing from secret and insecure is false", ServiceAccountCAKey) - } - } - } else { - clusterConfig.CertData, exists = secret.Data[ClientCertificateKey] - if !exists { - return fmt.Errorf("%q data is missing from secret", ClientCertificateKey) - } - - clusterConfig.KeyData, exists = secret.Data[ClientKeyKey] - if !exists { - return fmt.Errorf("%q data is missing from secret", ClientKeyKey) - } - - if insecure { - clusterConfig.Insecure = true - } else { - clusterConfig.CAData, exists = secret.Data[CertificateAuthorityKey] - if !exists { - return fmt.Errorf("%q data is missing from secret", CertificateAuthorityKey) - } - } - } - - return nil -} - -// BuildClusterConfig returns a restclient.Config that can be used to configure -// a client for the given FederatedCluster or an error. -func BuildClusterConfigWithGenericClient( - cluster *fedcorev1a1.FederatedCluster, - fedClient generic.Client, - restConfig *restclient.Config, - fedSystemNamespace string, -) (*restclient.Config, error) { - apiEndpoint := cluster.Spec.APIEndpoint - if len(apiEndpoint) == 0 { - return nil, fmt.Errorf("api endpoint of cluster %s is empty", cluster.Name) - } - - clusterConfig, err := clientcmd.BuildConfigFromFlags(apiEndpoint, "") - if err != nil { - return nil, err - } - - clusterConfig.QPS = restConfig.QPS - clusterConfig.Burst = restConfig.Burst - - secret := &corev1.Secret{} - err = fedClient.Get(context.TODO(), secret, fedSystemNamespace, cluster.Spec.SecretRef.Name) - if err != nil { - return nil, err - } - - err = PopulateAuthDetailsFromSecret(clusterConfig, cluster.Spec.Insecure, secret, cluster.Spec.UseServiceAccountToken) - if err != nil { - return nil, fmt.Errorf("cannot build rest config from cluster secret: %w", err) - } - - return clusterConfig, nil -} diff --git a/pkg/util/meta/federatedobject.go b/pkg/util/meta/federatedobject.go deleted file mode 100644 index e4d76042..00000000 --- a/pkg/util/meta/federatedobject.go +++ /dev/null @@ -1,38 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. - -This file may have been modified by The KubeAdmiral Authors -("KubeAdmiral Modifications"). All KubeAdmiral Modifications -are Copyright 2023 The KubeAdmiral Authors. -*/ - -package meta - -import ( - "encoding/json" - "fmt" - - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - - fedcorev1a1 "github.com/kubewharf/kubeadmiral/pkg/apis/core/v1alpha1" -) - -func GetSourceObjectMeta(fedObject fedcorev1a1.GenericFederatedObject) (*metav1.PartialObjectMetadata, error) { - partialObjectMeta := &metav1.PartialObjectMetadata{} - if err := json.Unmarshal(fedObject.GetSpec().Template.Raw, partialObjectMeta); err != nil { - return nil, fmt.Errorf("failed to unmarshal FederatedObject's template: %w", err) - } - return partialObjectMeta, nil -} diff --git a/pkg/util/naming/naming_test.go b/pkg/util/naming/naming_test.go index 13b0060e..9c4af59b 100644 --- a/pkg/util/naming/naming_test.go +++ b/pkg/util/naming/naming_test.go @@ -41,6 +41,14 @@ func TestGenerateFederatedObjectName(t *testing.T) { }, want: "foo-roles.rbac.authorization.k8s.io", }, + { + name: "generate federated object name with consecutive .", + args: args{ + objectName: "system...foo", + ftcName: "roles.rbac.authorization.k8s.io", + }, + want: "system.foo-roles.rbac.authorization.k8s.io-1857674172", + }, { name: "generate federated object name with :", args: args{ From 44ea95ad5b7515836800a895468a5170f3abdd11 Mon Sep 17 00:00:00 2001 From: "hawjia.lim" Date: Wed, 19 Jul 2023 19:05:14 +0800 Subject: [PATCH 072/173] revert to comparing unstructured --- pkg/controllers/federate/util.go | 24 +++++++++++++++--------- 1 file changed, 15 insertions(+), 9 deletions(-) diff --git a/pkg/controllers/federate/util.go b/pkg/controllers/federate/util.go index 88a3f6a8..7596a202 100644 --- a/pkg/controllers/federate/util.go +++ b/pkg/controllers/federate/util.go @@ -17,7 +17,6 @@ limitations under the License. package federate import ( - "bytes" "encoding/json" "fmt" "reflect" @@ -114,8 +113,8 @@ func newFederatedObjectForSourceObject( // Generate the FederatedObject's template and update the FederatedObject. - templateObject := templateForSourceObject(sourceObj, templateAnnotations, templateLabels).Object - rawTemplate, err := json.Marshal(templateObject) + templateObject := templateForSourceObject(sourceObj, templateAnnotations, templateLabels) + rawTemplate, err := templateObject.MarshalJSON() if err != nil { return nil, fmt.Errorf("failed to marshal template: %w", err) } @@ -124,7 +123,10 @@ func newFederatedObjectForSourceObject( // Generate the JSON patch required to convert the source object to the FederatedObject's template and store it as // an annotation in the FederatedObject. - templateGeneratorMergePatch, err := CreateMergePatch(sourceObj, &unstructured.Unstructured{Object: templateObject}) + templateGeneratorMergePatch, err := CreateMergePatch( + sourceObj, + &unstructured.Unstructured{Object: templateObject.Object}, + ) if err != nil { return nil, fmt.Errorf("failed to create merge patch for source object: %w", err) } @@ -177,12 +179,16 @@ func updateFederatedObjectForSourceObject( // FederatedObject if necessary. targetTemplate := templateForSourceObject(sourceObject, templateAnnotations, templateLabels) - rawTargetTemplate, err := targetTemplate.MarshalJSON() - if err != nil { - return false, fmt.Errorf("failed to marshal template: %w", err) + foundTemplate := &unstructured.Unstructured{} + if err := foundTemplate.UnmarshalJSON(fedObject.GetSpec().Template.Raw); err != nil { + return false, fmt.Errorf("failed to unmarshal template from federated object: %w", err) } - - if !bytes.Equal(rawTargetTemplate, fedObject.GetSpec().Template.Raw) { + if !reflect.DeepEqual(foundTemplate.Object, targetTemplate.Object) { + rawTargetTemplate, err := targetTemplate.MarshalJSON() + if err != nil { + return false, fmt.Errorf("failed to marshal template: %w", err) + } + fedObject.GetSpec().Template.Raw = rawTargetTemplate isUpdated = true } From 187dcfbaadfed2359daddc7c3a75069e42c995c9 Mon Sep 17 00:00:00 2001 From: "hawjia.lim" Date: Thu, 20 Jul 2023 09:14:58 +0800 Subject: [PATCH 073/173] fix lint errors --- cmd/controller-manager/app/core.go | 8 ++++++-- pkg/util/eventhandlers/eventhandler.go | 16 ++++++++++++++++ pkg/util/logging/logging.go | 2 +- 3 files changed, 23 insertions(+), 3 deletions(-) diff --git a/cmd/controller-manager/app/core.go b/cmd/controller-manager/app/core.go index c654dfa6..31feff65 100644 --- a/cmd/controller-manager/app/core.go +++ b/cmd/controller-manager/app/core.go @@ -20,13 +20,17 @@ import ( "context" "fmt" + "k8s.io/klog/v2" + "github.com/kubewharf/kubeadmiral/pkg/controllermanager" controllercontext "github.com/kubewharf/kubeadmiral/pkg/controllers/context" "github.com/kubewharf/kubeadmiral/pkg/controllers/federate" - "k8s.io/klog/v2" ) -func startFederateController(ctx context.Context, controllerCtx *controllercontext.Context) (controllermanager.Controller, error) { +func startFederateController( + ctx context.Context, + controllerCtx *controllercontext.Context, +) (controllermanager.Controller, error) { federateController, err := federate.NewFederateController( controllerCtx.KubeClientset, controllerCtx.DynamicClientset, diff --git a/pkg/util/eventhandlers/eventhandler.go b/pkg/util/eventhandlers/eventhandler.go index 2aed6004..a5dbd3ae 100644 --- a/pkg/util/eventhandlers/eventhandler.go +++ b/pkg/util/eventhandlers/eventhandler.go @@ -1,3 +1,19 @@ +/* +Copyright 2023 The KubeAdmiral Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + package eventhandlers import ( diff --git a/pkg/util/logging/logging.go b/pkg/util/logging/logging.go index 0c9e5090..cee4496e 100644 --- a/pkg/util/logging/logging.go +++ b/pkg/util/logging/logging.go @@ -24,7 +24,7 @@ import ( ) func InjectLogger(ctx context.Context, logger klog.Logger) (context.Context, logr.Logger) { - ctx = klog.NewContext(ctx, logger) + ctx = klog.NewContext(ctx, logger) return ctx, logger } From a25da20b361c7b8884ea7ee8758df178629ba980 Mon Sep 17 00:00:00 2001 From: "hawjia.lim" Date: Thu, 20 Jul 2023 11:21:17 +0800 Subject: [PATCH 074/173] remove unnecessary owner ref update --- pkg/controllers/federate/controller.go | 15 +-------------- 1 file changed, 1 insertion(+), 14 deletions(-) diff --git a/pkg/controllers/federate/controller.go b/pkg/controllers/federate/controller.go index 68de7781..763e0928 100644 --- a/pkg/controllers/federate/controller.go +++ b/pkg/controllers/federate/controller.go @@ -276,18 +276,11 @@ func (c *FederateController) reconcile(ctx context.Context, key workerKey) (stat if fedObject != nil { // To account for the very small chance of name collision, we verify the owner reference before proceeding. ownedbySource := false - requiresUpdate := -1 - for i, ref := range fedObject.GetOwnerReferences() { + for _, ref := range fedObject.GetOwnerReferences() { if schema.FromAPIVersionAndKind(ref.APIVersion, ref.Kind) == sourceGVK && sourceObject.GetName() == ref.Name { ownedbySource = true - - // Allow different UIDs to support adopting forcibly orphaned FederatedObjects. - if ref.UID != sourceObject.GetUID() { - requiresUpdate = i - } - break } } @@ -296,12 +289,6 @@ func (c *FederateController) reconcile(ctx context.Context, key workerKey) (stat logger.Error(nil, "Federated object not owned by source object, possible name collision detected") return worker.StatusErrorNoRetry } - - if requiresUpdate > -1 { - newRefs := fedObject.GetOwnerReferences() - newRefs[requiresUpdate].UID = sourceObject.GetUID() - fedObject.SetOwnerReferences(newRefs) - } } if sourceObject.GetDeletionTimestamp() != nil { From 7b116e294046c9d86eafd965854d775d24ec315a Mon Sep 17 00:00:00 2001 From: "hawjia.lim" Date: Thu, 20 Jul 2023 18:51:57 +0800 Subject: [PATCH 075/173] fix(federate-controller): handle case where ftc does not exist --- pkg/controllers/federate/controller.go | 69 ++++++++++++++++---------- 1 file changed, 43 insertions(+), 26 deletions(-) diff --git a/pkg/controllers/federate/controller.go b/pkg/controllers/federate/controller.go index 763e0928..94a8647f 100644 --- a/pkg/controllers/federate/controller.go +++ b/pkg/controllers/federate/controller.go @@ -33,6 +33,7 @@ import ( "k8s.io/client-go/tools/cache" "k8s.io/client-go/tools/record" "k8s.io/klog/v2" + "k8s.io/utils/pointer" fedcorev1a1 "github.com/kubewharf/kubeadmiral/pkg/apis/core/v1alpha1" fedclient "github.com/kubewharf/kubeadmiral/pkg/client/clientset/versioned" @@ -236,16 +237,42 @@ func (c *FederateController) reconcile(ctx context.Context, key workerKey) (stat ftc, exists := c.informerManager.GetResourceFTC(key.gvk) if !exists { - logger.Error(nil, "FTC does not exist for GVK") - return worker.StatusError + // This could happen if: + // 1) The InformerManager is not yet up-to-date. + // 2) We received an event from a FederatedObject without a corresponding FTC. + // + // For case 1, when the InformerManager becomes up-to-date, all the source objects will be enqueued once anyway, + // so it is safe to skip processing this time round. We do not have to process orphaned FederatedObjects. + // For case 2, the federate controller does not have to process FederatedObjects without a corresponding FTC. + logger.V(3).Info("FTC does not exist for GVK") + return worker.StatusAllOK } sourceGVR := ftc.GetSourceTypeGVR() ctx, logger = logging.InjectLoggerValues(ctx, "ftc", ftc.Name, "gvr", sourceGVR) - sourceObject, err := c.sourceObjectFromStore(key) + lister, hasSynced, exists := c.informerManager.GetResourceLister(key.gvk) + if !exists { + // Once again, this could happen if: + // 1) The InformerManager is not yet up-to-date. + // 2) We received an event from a FederatedObject without a corresponding FTC. + // + // See above comment for an explanation of the handling logic. + logger.V(3).Info("Lister does not exist for GVK") + return worker.StatusAllOK + } + if !hasSynced() { + // If lister has not synced, simply reenqueue after a short delay + logger.V(2).Info("Lister not yet synced, will reenqueue") + return worker.Result{ + Success: true, + RequeueAfter: pointer.Duration(100 * time.Millisecond), + } + } + + sourceObject, err := getSourceObjectFromLister(lister, key) if err != nil && apierrors.IsNotFound(err) { - logger.V(3).Info(fmt.Sprintf("No source object for found, skip federating")) + logger.V(3).Info(fmt.Sprintf("No source object found, skip federating")) return worker.StatusAllOK } if err != nil { @@ -331,7 +358,7 @@ func (c *FederateController) reconcile(ctx context.Context, key workerKey) (stat ) if apierrors.IsInvalid(err) { - // if the federated object template is invalid, reenqueueing will not help solve the problem. instead, + // If the federated object template is invalid, reenqueueing will not help solve the problem. Instead, // we should wait for the source object template to be updated - which will trigger its own reconcile. return worker.StatusErrorNoRetry } @@ -378,27 +405,6 @@ func (c *FederateController) reconcile(ctx context.Context, key workerKey) (stat return worker.StatusAllOK } -func (c *FederateController) sourceObjectFromStore(key workerKey) (*unstructured.Unstructured, error) { - lister, hasSynced, exists := c.informerManager.GetResourceLister(key.gvk) - if !exists { - return nil, fmt.Errorf("lister for %s does not exist", key.gvk) - } - if !hasSynced() { - return nil, fmt.Errorf("lister for %s not synced", key.gvk) - } - - var obj runtime.Object - var err error - - if key.namespace == "" { - obj, err = lister.Get(key.name) - } else { - obj, err = lister.ByNamespace(key.namespace).Get(key.name) - } - - return obj.(*unstructured.Unstructured), err -} - func (c *FederateController) ensureFinalizer( ctx context.Context, sourceGVR schema.GroupVersionResource, @@ -551,3 +557,14 @@ func (c *FederateController) handleExistingFederatedObject( return true, nil } + +func getSourceObjectFromLister(lister cache.GenericLister, key workerKey) (*unstructured.Unstructured, error) { + var obj runtime.Object + var err error + if key.namespace == "" { + obj, err = lister.Get(key.name) + } else { + obj, err = lister.ByNamespace(key.namespace).Get(key.name) + } + return obj.(*unstructured.Unstructured), err +} From 9e6b6da881947ac6dc87ca7416746a0b02270855 Mon Sep 17 00:00:00 2001 From: "hawjia.lim" Date: Thu, 20 Jul 2023 18:56:25 +0800 Subject: [PATCH 076/173] fix(federate-controller): remove unnecessary log --- pkg/controllers/federate/controller.go | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/pkg/controllers/federate/controller.go b/pkg/controllers/federate/controller.go index 94a8647f..f58cf80c 100644 --- a/pkg/controllers/federate/controller.go +++ b/pkg/controllers/federate/controller.go @@ -244,7 +244,6 @@ func (c *FederateController) reconcile(ctx context.Context, key workerKey) (stat // For case 1, when the InformerManager becomes up-to-date, all the source objects will be enqueued once anyway, // so it is safe to skip processing this time round. We do not have to process orphaned FederatedObjects. // For case 2, the federate controller does not have to process FederatedObjects without a corresponding FTC. - logger.V(3).Info("FTC does not exist for GVK") return worker.StatusAllOK } @@ -258,12 +257,11 @@ func (c *FederateController) reconcile(ctx context.Context, key workerKey) (stat // 2) We received an event from a FederatedObject without a corresponding FTC. // // See above comment for an explanation of the handling logic. - logger.V(3).Info("Lister does not exist for GVK") return worker.StatusAllOK } if !hasSynced() { - // If lister has not synced, simply reenqueue after a short delay - logger.V(2).Info("Lister not yet synced, will reenqueue") + // If lister is not yet synced, simply reenqueue after a short delay. + logger.V(3).Info("Lister for source type not yet synced, will reenqueue") return worker.Result{ Success: true, RequeueAfter: pointer.Duration(100 * time.Millisecond), From 1f422a8205a0122ea4e27ac15e598fbf83bd3361 Mon Sep 17 00:00:00 2001 From: "hawjia.lim" Date: Thu, 20 Jul 2023 19:21:34 +0800 Subject: [PATCH 077/173] chore(config): add no-federated-resource annotation to crds --- ...beadmiral.io_clustercollectedstatuses.yaml | 94 ++--- ...ubeadmiral.io_clusterfederatedobjects.yaml | 380 ++++++++--------- ...ubeadmiral.io_clusteroverridepolicies.yaml | 324 +++++++------- ...eadmiral.io_clusterpropagatedversions.yaml | 105 +++-- ...admiral.io_clusterpropagationpolicies.yaml | 1 + ...core.kubeadmiral.io_collectedstatuses.yaml | 94 ++--- ...core.kubeadmiral.io_federatedclusters.yaml | 360 ++++++++-------- .../core.kubeadmiral.io_federatedobjects.yaml | 380 ++++++++--------- ...e.kubeadmiral.io_federatedtypeconfigs.yaml | 276 ++++++------ .../core.kubeadmiral.io_overridepolicies.yaml | 324 +++++++------- ...ore.kubeadmiral.io_propagatedversions.yaml | 105 +++-- ...re.kubeadmiral.io_propagationpolicies.yaml | 1 + ..._schedulerpluginwebhookconfigurations.yaml | 159 +++---- ...ore.kubeadmiral.io_schedulingprofiles.yaml | 398 ++++++++---------- hack/generate-groups.sh | 10 +- 15 files changed, 1361 insertions(+), 1650 deletions(-) diff --git a/config/crds/core.kubeadmiral.io_clustercollectedstatuses.yaml b/config/crds/core.kubeadmiral.io_clustercollectedstatuses.yaml index 5159d603..5ed6d579 100644 --- a/config/crds/core.kubeadmiral.io_clustercollectedstatuses.yaml +++ b/config/crds/core.kubeadmiral.io_clustercollectedstatuses.yaml @@ -4,6 +4,7 @@ kind: CustomResourceDefinition metadata: annotations: controller-gen.kubebuilder.io/version: v0.11.1 + kubeadmiral.io/no-federated-resource: "true" creationTimestamp: null name: clustercollectedstatuses.core.kubeadmiral.io spec: @@ -13,58 +14,49 @@ spec: listKind: ClusterCollectedStatusList plural: clustercollectedstatuses shortNames: - - ccs + - ccs singular: clustercollectedstatus scope: Cluster versions: - - name: v1alpha1 - schema: - openAPIV3Schema: - description: ClusterCollectedStatus stores the collected fields of Kubernetes - objects from member clusters, that are propagated by a ClusterFederatedObject. - properties: - apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' - type: string - clusters: - description: Clusters is the list of member clusters and collected fields - for its propagated Kubernetes object. - items: - description: CollectedFieldsWithCluster stores the collected fields - of a Kubernetes object in a member cluster. - properties: - cluster: - description: Cluster is the name of the member cluster. - type: string - collectedFields: - description: CollectedFields is the the set of fields collected - for the Kubernetes object. - x-kubernetes-preserve-unknown-fields: true - error: - description: Error records any errors encountered while collecting - fields from the cluster. - type: string - required: - - cluster - - collectedFields + - name: v1alpha1 + schema: + openAPIV3Schema: + description: ClusterCollectedStatus stores the collected fields of Kubernetes objects from member clusters, that are propagated by a ClusterFederatedObject. + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + clusters: + description: Clusters is the list of member clusters and collected fields for its propagated Kubernetes object. + items: + description: CollectedFieldsWithCluster stores the collected fields of a Kubernetes object in a member cluster. + properties: + cluster: + description: Cluster is the name of the member cluster. + type: string + collectedFields: + description: CollectedFields is the the set of fields collected for the Kubernetes object. + x-kubernetes-preserve-unknown-fields: true + error: + description: Error records any errors encountered while collecting fields from the cluster. + type: string + required: + - cluster + - collectedFields + type: object + type: array + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + lastUpdateTime: + description: LastUpdateTime is the last time that a collection was performed. + format: date-time + type: string + metadata: type: object - type: array - kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' - type: string - lastUpdateTime: - description: LastUpdateTime is the last time that a collection was performed. - format: date-time - type: string - metadata: - type: object - required: - - clusters - - lastUpdateTime - type: object - served: true - storage: true + required: + - clusters + - lastUpdateTime + type: object + served: true + storage: true diff --git a/config/crds/core.kubeadmiral.io_clusterfederatedobjects.yaml b/config/crds/core.kubeadmiral.io_clusterfederatedobjects.yaml index 42c354c7..4f361f67 100644 --- a/config/crds/core.kubeadmiral.io_clusterfederatedobjects.yaml +++ b/config/crds/core.kubeadmiral.io_clusterfederatedobjects.yaml @@ -4,6 +4,7 @@ kind: CustomResourceDefinition metadata: annotations: controller-gen.kubebuilder.io/version: v0.11.1 + kubeadmiral.io/no-federated-resource: "true" creationTimestamp: null name: clusterfederatedobjects.core.kubeadmiral.io spec: @@ -13,213 +14,178 @@ spec: listKind: ClusterFederatedObjectList plural: clusterfederatedobjects shortNames: - - cfo + - cfo singular: clusterfederatedobject scope: Cluster versions: - - name: v1alpha1 - schema: - openAPIV3Schema: - description: ClusterFederatedObject describes a cluster-scoped Kubernetes - object and how it should be propagated to different member clusters. - properties: - apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' - type: string - kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' - type: string - metadata: - type: object - spec: - description: Spec defines the desired behavior of the FederatedObject. - properties: - follows: - description: Follows defines other objects, or "leaders", that the - Kubernetes object should follow during propagation, i.e. the Kubernetes - object should be propagated to all member clusters that its "leaders" - are placed in. - items: - description: LeaderReference contains the identifying metadata of - a "leader" Kubernetes object. - properties: - group: - type: string - kind: - type: string - name: - type: string - namespace: - type: string - required: - - kind - - name - type: object - type: array - overrides: - description: Overrides describe the overrides that should be applied - to the base template of the Kubernetes object before it is propagated - to individual member clusters. - items: - description: OverrideWithController describes the overrides that - will be applied to a Kubernetes object before it is propagated - to individual member clusters. - properties: - clusters: - description: Override is the list of member clusters and their - respective override patches. - items: - description: ClusterReferenceWithPatches represents a single - member cluster and a list of override patches for the cluster. - properties: - cluster: - description: Cluster is the name of the member cluster. - type: string - patches: - description: Patches is the list of override patches for - the member cluster. - items: - description: OverridePatch defines a JSON patch. - properties: - op: - type: string - path: - type: string - value: - x-kubernetes-preserve-unknown-fields: true - required: - - path - type: object - type: array - required: - - cluster - type: object - type: array - controller: - description: Controller identifies the controller responsible - for this override. - type: string - required: - - clusters - - controller - type: object - type: array - placements: - description: Placements describe the member clusters that the Kubernetes - object will be propagated to, which is a union of all the listed - clusters. - items: - description: PlacementWithController describes the member clusters - that a Kubernetes object should be propagated to. - properties: - controller: - description: Controller identifies the controller responsible - for this placement. - type: string - placement: - description: Placement is the list of member clusters that the - Kubernetes object should be propagated to. - items: - description: ClusterReference represents a single member cluster. - properties: - cluster: - description: Cluster is the name of the member cluster. - type: string - required: - - cluster - type: object - type: array - required: - - controller - - placement - type: object - type: array - template: - description: Template is the base template of the Kubernetes object - to be propagated. - x-kubernetes-preserve-unknown-fields: true - required: - - template - type: object - status: - description: Status describes the most recently observed status of the - FederatedObject. - properties: - clusters: - description: Clusters contains the propagation status of the Kubernetes - object for individual member clusters. - items: - description: PropagationStatus describes the propagation of a Kubernetes - object to a given member cluster. - properties: - cluster: - description: Cluster is the name of the member cluster. - type: string - lastObservedGeneration: - description: LastObservedGeneration is the last observed generation - of the Kubernetes object in the member cluster. - format: int64 - type: integer - status: - description: Status describes the current status of propagating - the Kubernetes object to the member cluster. - type: string - required: - - cluster - - status - type: object - type: array - collisionCount: - description: CollisionCount can be used in conjunction with RevisionHistory - to implement rollbacks. - format: int32 - type: integer - conditions: - description: Conditions describe the current state of this FederatedObject. - items: - description: GenericFederatedObjectCondition contains the current - details about a particular condition of a FederatedObject. - properties: - lastTransitionTime: - description: LastTransitionTime is the last time the status - of this condition changed. - format: date-time - type: string - lastUpdateTime: - description: LastUpdateTime is the last time a reconciliation - for this condition occurred. - format: date-time - type: string - reason: - description: Reason is the reason for the last status change - of this condition. - type: string - status: - description: Status is the status of the condition, one of True, - False or Unknown. - type: string - type: - description: Type is the type of the condition. - type: string - required: - - status - - type - type: object - type: array - syncedGeneration: - description: SyncedGeneration is the generation of this FederatedObject - when it was last synced to selected member clusters. - format: int64 - type: integer - type: object - required: - - spec - type: object - served: true - storage: true - subresources: - status: {} + - name: v1alpha1 + schema: + openAPIV3Schema: + description: ClusterFederatedObject describes a cluster-scoped Kubernetes object and how it should be propagated to different member clusters. + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: Spec defines the desired behavior of the FederatedObject. + properties: + follows: + description: Follows defines other objects, or "leaders", that the Kubernetes object should follow during propagation, i.e. the Kubernetes object should be propagated to all member clusters that its "leaders" are placed in. + items: + description: LeaderReference contains the identifying metadata of a "leader" Kubernetes object. + properties: + group: + type: string + kind: + type: string + name: + type: string + namespace: + type: string + required: + - kind + - name + type: object + type: array + overrides: + description: Overrides describe the overrides that should be applied to the base template of the Kubernetes object before it is propagated to individual member clusters. + items: + description: OverrideWithController describes the overrides that will be applied to a Kubernetes object before it is propagated to individual member clusters. + properties: + clusters: + description: Override is the list of member clusters and their respective override patches. + items: + description: ClusterReferenceWithPatches represents a single member cluster and a list of override patches for the cluster. + properties: + cluster: + description: Cluster is the name of the member cluster. + type: string + patches: + description: Patches is the list of override patches for the member cluster. + items: + description: OverridePatch defines a JSON patch. + properties: + op: + type: string + path: + type: string + value: + x-kubernetes-preserve-unknown-fields: true + required: + - path + type: object + type: array + required: + - cluster + type: object + type: array + controller: + description: Controller identifies the controller responsible for this override. + type: string + required: + - clusters + - controller + type: object + type: array + placements: + description: Placements describe the member clusters that the Kubernetes object will be propagated to, which is a union of all the listed clusters. + items: + description: PlacementWithController describes the member clusters that a Kubernetes object should be propagated to. + properties: + controller: + description: Controller identifies the controller responsible for this placement. + type: string + placement: + description: Placement is the list of member clusters that the Kubernetes object should be propagated to. + items: + description: ClusterReference represents a single member cluster. + properties: + cluster: + description: Cluster is the name of the member cluster. + type: string + required: + - cluster + type: object + type: array + required: + - controller + - placement + type: object + type: array + template: + description: Template is the base template of the Kubernetes object to be propagated. + x-kubernetes-preserve-unknown-fields: true + required: + - template + type: object + status: + description: Status describes the most recently observed status of the FederatedObject. + properties: + clusters: + description: Clusters contains the propagation status of the Kubernetes object for individual member clusters. + items: + description: PropagationStatus describes the propagation of a Kubernetes object to a given member cluster. + properties: + cluster: + description: Cluster is the name of the member cluster. + type: string + lastObservedGeneration: + description: LastObservedGeneration is the last observed generation of the Kubernetes object in the member cluster. + format: int64 + type: integer + status: + description: Status describes the current status of propagating the Kubernetes object to the member cluster. + type: string + required: + - cluster + - status + type: object + type: array + collisionCount: + description: CollisionCount can be used in conjunction with RevisionHistory to implement rollbacks. + format: int32 + type: integer + conditions: + description: Conditions describe the current state of this FederatedObject. + items: + description: GenericFederatedObjectCondition contains the current details about a particular condition of a FederatedObject. + properties: + lastTransitionTime: + description: LastTransitionTime is the last time the status of this condition changed. + format: date-time + type: string + lastUpdateTime: + description: LastUpdateTime is the last time a reconciliation for this condition occurred. + format: date-time + type: string + reason: + description: Reason is the reason for the last status change of this condition. + type: string + status: + description: Status is the status of the condition, one of True, False or Unknown. + type: string + type: + description: Type is the type of the condition. + type: string + required: + - status + - type + type: object + type: array + syncedGeneration: + description: SyncedGeneration is the generation of this FederatedObject when it was last synced to selected member clusters. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} diff --git a/config/crds/core.kubeadmiral.io_clusteroverridepolicies.yaml b/config/crds/core.kubeadmiral.io_clusteroverridepolicies.yaml index 1d918ecf..5d8bcdea 100644 --- a/config/crds/core.kubeadmiral.io_clusteroverridepolicies.yaml +++ b/config/crds/core.kubeadmiral.io_clusteroverridepolicies.yaml @@ -4,6 +4,7 @@ kind: CustomResourceDefinition metadata: annotations: controller-gen.kubebuilder.io/version: v0.11.1 + kubeadmiral.io/no-federated-resource: "true" creationTimestamp: null name: clusteroverridepolicies.core.kubeadmiral.io spec: @@ -13,187 +14,154 @@ spec: listKind: ClusterOverridePolicyList plural: clusteroverridepolicies shortNames: - - cop + - cop singular: clusteroverridepolicy scope: Cluster versions: - - name: v1alpha1 - schema: - openAPIV3Schema: - description: ClusterOverridePolicy describes the override rules for a resource. - properties: - apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' - type: string - kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' - type: string - metadata: - type: object - spec: - properties: - overrideRules: - description: OverrideRules specify the override rules. Each rule specifies - the overriders and the clusters these overriders should be applied - to. - items: - properties: - overriders: - description: Overriders specify the overriders to be applied - in the target clusters. - properties: - jsonpatch: - description: JsonPatch specifies overriders in a syntax - similar to RFC6902 JSON Patch. - items: - properties: - operator: - description: Operator specifies the operation. If - omitted, defaults to "replace". - type: string - path: - description: Path is a JSON pointer (RFC 6901) specifying - the location within the resource document where - the operation is performed. Each key in the path - should be prefixed with "/", while "~" and "/" should - be escaped as "~0" and "~1" respectively. For example, - to add a label "kubeadmiral.io/label", the path - should be "/metadata/labels/kubeadmiral.io~1label". - type: string - value: - description: Value is the value(s) required by the - operation. - x-kubernetes-preserve-unknown-fields: true - required: - - path - type: object - type: array - type: object - targetClusters: - description: TargetClusters selects the clusters in which the - overriders in this rule should be applied. If multiple types - of selectors are specified, the overall result is the intersection - of all of them. - properties: - clusterAffinity: - description: ClusterAffinity selects FederatedClusters by - matching their labels and fields against expressions. - If multiple terms are specified, their results are ORed. - items: - properties: - matchExpressions: - description: A list of cluster selector requirements - by cluster labels. - items: - description: ClusterSelectorRequirement is a selector - that contains values, a key, and an operator that - relates the values and keys - properties: - key: - type: string - operator: - description: ClusterSelectorOperator is the - set of operators that can be used in a cluster - selector requirement. - enum: - - In - - NotIn - - Exists - - DoesNotExist - - Gt - - Lt - type: string - values: - items: + - name: v1alpha1 + schema: + openAPIV3Schema: + description: ClusterOverridePolicy describes the override rules for a resource. + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + properties: + overrideRules: + description: OverrideRules specify the override rules. Each rule specifies the overriders and the clusters these overriders should be applied to. + items: + properties: + overriders: + description: Overriders specify the overriders to be applied in the target clusters. + properties: + jsonpatch: + description: JsonPatch specifies overriders in a syntax similar to RFC6902 JSON Patch. + items: + properties: + operator: + description: Operator specifies the operation. If omitted, defaults to "replace". + type: string + path: + description: Path is a JSON pointer (RFC 6901) specifying the location within the resource document where the operation is performed. Each key in the path should be prefixed with "/", while "~" and "/" should be escaped as "~0" and "~1" respectively. For example, to add a label "kubeadmiral.io/label", the path should be "/metadata/labels/kubeadmiral.io~1label". + type: string + value: + description: Value is the value(s) required by the operation. + x-kubernetes-preserve-unknown-fields: true + required: + - path + type: object + type: array + type: object + targetClusters: + description: TargetClusters selects the clusters in which the overriders in this rule should be applied. If multiple types of selectors are specified, the overall result is the intersection of all of them. + properties: + clusterAffinity: + description: ClusterAffinity selects FederatedClusters by matching their labels and fields against expressions. If multiple terms are specified, their results are ORed. + items: + properties: + matchExpressions: + description: A list of cluster selector requirements by cluster labels. + items: + description: ClusterSelectorRequirement is a selector that contains values, a key, and an operator that relates the values and keys + properties: + key: + type: string + operator: + description: ClusterSelectorOperator is the set of operators that can be used in a cluster selector requirement. + enum: + - In + - NotIn + - Exists + - DoesNotExist + - Gt + - Lt + type: string + values: + items: + type: string + type: array + required: + - key + - operator + - values + type: object + type: array + matchFields: + description: A list of cluster selector requirements by cluster fields. + items: + description: ClusterSelectorRequirement is a selector that contains values, a key, and an operator that relates the values and keys + properties: + key: type: string - type: array - required: - - key - - operator - - values - type: object - type: array - matchFields: - description: A list of cluster selector requirements - by cluster fields. - items: - description: ClusterSelectorRequirement is a selector - that contains values, a key, and an operator that - relates the values and keys - properties: - key: - type: string - operator: - description: ClusterSelectorOperator is the - set of operators that can be used in a cluster - selector requirement. - enum: - - In - - NotIn - - Exists - - DoesNotExist - - Gt - - Lt - type: string - values: - items: + operator: + description: ClusterSelectorOperator is the set of operators that can be used in a cluster selector requirement. + enum: + - In + - NotIn + - Exists + - DoesNotExist + - Gt + - Lt type: string - type: array - required: - - key - - operator - - values - type: object - type: array + values: + items: + type: string + type: array + required: + - key + - operator + - values + type: object + type: array + type: object + type: array + clusterSelector: + additionalProperties: + type: string + description: ClusterSelector selects FederatedClusters by their labels. Empty labels selects all FederatedClusters. type: object - type: array - clusterSelector: - additionalProperties: - type: string - description: ClusterSelector selects FederatedClusters by - their labels. Empty labels selects all FederatedClusters. - type: object - clusters: - description: Clusters selects FederatedClusters by their - names. Empty Clusters selects all FederatedClusters. - items: - type: string - type: array - type: object - type: object - type: array - type: object - status: - properties: - refCount: - format: int64 - minimum: 0 - type: integer - typedRefCount: - items: - properties: - count: - format: int64 - minimum: 0 - type: integer - group: - type: string - resource: - type: string - required: - - count - - resource - type: object - type: array - type: object - required: - - spec - type: object - served: true - storage: true - subresources: - status: {} + clusters: + description: Clusters selects FederatedClusters by their names. Empty Clusters selects all FederatedClusters. + items: + type: string + type: array + type: object + type: object + type: array + type: object + status: + properties: + refCount: + format: int64 + minimum: 0 + type: integer + typedRefCount: + items: + properties: + count: + format: int64 + minimum: 0 + type: integer + group: + type: string + resource: + type: string + required: + - count + - resource + type: object + type: array + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} diff --git a/config/crds/core.kubeadmiral.io_clusterpropagatedversions.yaml b/config/crds/core.kubeadmiral.io_clusterpropagatedversions.yaml index 1b5c8f35..a1b58028 100644 --- a/config/crds/core.kubeadmiral.io_clusterpropagatedversions.yaml +++ b/config/crds/core.kubeadmiral.io_clusterpropagatedversions.yaml @@ -4,6 +4,7 @@ kind: CustomResourceDefinition metadata: annotations: controller-gen.kubebuilder.io/version: v0.11.1 + kubeadmiral.io/no-federated-resource: "true" creationTimestamp: null name: clusterpropagatedversions.core.kubeadmiral.io spec: @@ -15,61 +16,49 @@ spec: singular: clusterpropagatedversion scope: Cluster versions: - - name: v1alpha1 - schema: - openAPIV3Schema: - description: ClusterPropagatedVersion holds version information about the - state propagated from KubeFed APIs (configured by FederatedTypeConfig resources) - to member clusters. The name of a ClusterPropagatedVersion encodes the kind - and name of the resource it stores information for (i.e. -). If a target resource has a populated metadata.Generation field, - the generation will be stored with a prefix of `gen:` as the version for - the cluster. If metadata.Generation is not available, metadata.ResourceVersion - will be stored with a prefix of `rv:` as the version for the cluster. - properties: - apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' - type: string - kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' - type: string - metadata: - type: object - status: - description: PropagatedVersionStatus defines the observed state of PropagatedVersion - properties: - clusterVersions: - description: The last versions produced in each cluster for this resource. - items: - properties: - clusterName: - description: The name of the cluster the version is for. - type: string - version: - description: The last version produced for the resource by a - KubeFed operation. - type: string - required: - - clusterName - - version - type: object - type: array - overridesVersion: - description: The observed version of the overrides for this resource. - type: string - templateVersion: - description: The observed version of the template for this resource. - type: string - required: - - overridesVersion - - templateVersion - type: object - type: object - served: true - storage: true - subresources: - status: {} + - name: v1alpha1 + schema: + openAPIV3Schema: + description: ClusterPropagatedVersion holds version information about the state propagated from KubeFed APIs (configured by FederatedTypeConfig resources) to member clusters. The name of a ClusterPropagatedVersion encodes the kind and name of the resource it stores information for (i.e. -). If a target resource has a populated metadata.Generation field, the generation will be stored with a prefix of `gen:` as the version for the cluster. If metadata.Generation is not available, metadata.ResourceVersion will be stored with a prefix of `rv:` as the version for the cluster. + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + status: + description: PropagatedVersionStatus defines the observed state of PropagatedVersion + properties: + clusterVersions: + description: The last versions produced in each cluster for this resource. + items: + properties: + clusterName: + description: The name of the cluster the version is for. + type: string + version: + description: The last version produced for the resource by a KubeFed operation. + type: string + required: + - clusterName + - version + type: object + type: array + overridesVersion: + description: The observed version of the overrides for this resource. + type: string + templateVersion: + description: The observed version of the template for this resource. + type: string + required: + - overridesVersion + - templateVersion + type: object + type: object + served: true + storage: true + subresources: + status: {} diff --git a/config/crds/core.kubeadmiral.io_clusterpropagationpolicies.yaml b/config/crds/core.kubeadmiral.io_clusterpropagationpolicies.yaml index 828a179f..f3707846 100644 --- a/config/crds/core.kubeadmiral.io_clusterpropagationpolicies.yaml +++ b/config/crds/core.kubeadmiral.io_clusterpropagationpolicies.yaml @@ -4,6 +4,7 @@ kind: CustomResourceDefinition metadata: annotations: controller-gen.kubebuilder.io/version: v0.11.1 + kubeadmiral.io/no-federated-resource: "true" creationTimestamp: null name: clusterpropagationpolicies.core.kubeadmiral.io spec: diff --git a/config/crds/core.kubeadmiral.io_collectedstatuses.yaml b/config/crds/core.kubeadmiral.io_collectedstatuses.yaml index 815d4ed2..a455bd20 100644 --- a/config/crds/core.kubeadmiral.io_collectedstatuses.yaml +++ b/config/crds/core.kubeadmiral.io_collectedstatuses.yaml @@ -4,6 +4,7 @@ kind: CustomResourceDefinition metadata: annotations: controller-gen.kubebuilder.io/version: v0.11.1 + kubeadmiral.io/no-federated-resource: "true" creationTimestamp: null name: collectedstatuses.core.kubeadmiral.io spec: @@ -13,58 +14,49 @@ spec: listKind: CollectedStatusList plural: collectedstatuses shortNames: - - cs + - cs singular: collectedstatus scope: Namespaced versions: - - name: v1alpha1 - schema: - openAPIV3Schema: - description: CollectedStatus stores the collected fields of Kubernetes objects - from member clusters, that are propagated by a FederatedObject. - properties: - apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' - type: string - clusters: - description: Clusters is the list of member clusters and collected fields - for its propagated Kubernetes object. - items: - description: CollectedFieldsWithCluster stores the collected fields - of a Kubernetes object in a member cluster. - properties: - cluster: - description: Cluster is the name of the member cluster. - type: string - collectedFields: - description: CollectedFields is the the set of fields collected - for the Kubernetes object. - x-kubernetes-preserve-unknown-fields: true - error: - description: Error records any errors encountered while collecting - fields from the cluster. - type: string - required: - - cluster - - collectedFields + - name: v1alpha1 + schema: + openAPIV3Schema: + description: CollectedStatus stores the collected fields of Kubernetes objects from member clusters, that are propagated by a FederatedObject. + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + clusters: + description: Clusters is the list of member clusters and collected fields for its propagated Kubernetes object. + items: + description: CollectedFieldsWithCluster stores the collected fields of a Kubernetes object in a member cluster. + properties: + cluster: + description: Cluster is the name of the member cluster. + type: string + collectedFields: + description: CollectedFields is the the set of fields collected for the Kubernetes object. + x-kubernetes-preserve-unknown-fields: true + error: + description: Error records any errors encountered while collecting fields from the cluster. + type: string + required: + - cluster + - collectedFields + type: object + type: array + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + lastUpdateTime: + description: LastUpdateTime is the last time that a collection was performed. + format: date-time + type: string + metadata: type: object - type: array - kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' - type: string - lastUpdateTime: - description: LastUpdateTime is the last time that a collection was performed. - format: date-time - type: string - metadata: - type: object - required: - - clusters - - lastUpdateTime - type: object - served: true - storage: true + required: + - clusters + - lastUpdateTime + type: object + served: true + storage: true diff --git a/config/crds/core.kubeadmiral.io_federatedclusters.yaml b/config/crds/core.kubeadmiral.io_federatedclusters.yaml index be926b74..4aba2edc 100644 --- a/config/crds/core.kubeadmiral.io_federatedclusters.yaml +++ b/config/crds/core.kubeadmiral.io_federatedclusters.yaml @@ -4,6 +4,7 @@ kind: CustomResourceDefinition metadata: annotations: controller-gen.kubebuilder.io/version: v0.11.1 + kubeadmiral.io/no-federated-resource: "true" creationTimestamp: null name: federatedclusters.core.kubeadmiral.io spec: @@ -13,203 +14,180 @@ spec: listKind: FederatedClusterList plural: federatedclusters shortNames: - - fcluster + - fcluster singular: federatedcluster scope: Cluster versions: - - additionalPrinterColumns: - - jsonPath: .status.conditions[?(@.type=='Ready')].status - name: ready - type: string - - jsonPath: .status.conditions[?(@.type=='Joined')].status - name: joined - type: string - - jsonPath: .metadata.creationTimestamp - name: age - type: date - name: v1alpha1 - schema: - openAPIV3Schema: - description: FederatedCluster is the Schema for the federatedclusters API - properties: - apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' - type: string - kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' - type: string - metadata: - type: object - spec: - description: FederatedClusterSpec defines the desired state of FederatedCluster - properties: - apiEndpoint: - description: The API endpoint of the member cluster. This can be a - hostname, hostname:port, IP or IP:port. - type: string - insecure: - description: Access API endpoint with security. - type: boolean - secretRef: - description: Name of the secret containing the token required to access - the member cluster. The secret needs to exist in the fed system - namespace. - properties: - name: - description: Name of a secret within the enclosing namespace - type: string - required: - - name - type: object - taints: - description: If specified, the cluster's taints. - items: - description: The node this Taint is attached to has the "effect" - on any pod that does not tolerate the Taint. + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: ready + type: string + - jsonPath: .status.conditions[?(@.type=='Joined')].status + name: joined + type: string + - jsonPath: .metadata.creationTimestamp + name: age + type: date + name: v1alpha1 + schema: + openAPIV3Schema: + description: FederatedCluster is the Schema for the federatedclusters API + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: FederatedClusterSpec defines the desired state of FederatedCluster + properties: + apiEndpoint: + description: The API endpoint of the member cluster. This can be a hostname, hostname:port, IP or IP:port. + type: string + insecure: + description: Access API endpoint with security. + type: boolean + secretRef: + description: Name of the secret containing the token required to access the member cluster. The secret needs to exist in the fed system namespace. properties: - effect: - description: Required. The effect of the taint on pods that - do not tolerate the taint. Valid effects are NoSchedule, PreferNoSchedule - and NoExecute. - type: string - key: - description: Required. The taint key to be applied to a node. - type: string - timeAdded: - description: TimeAdded represents the time at which the taint - was added. It is only written for NoExecute taints. - format: date-time - type: string - value: - description: The taint value corresponding to the taint key. - type: string - required: - - effect - - key - type: object - type: array - useServiceAccount: - description: Whether to use service account token to authenticate - to the member cluster. - type: boolean - required: - - apiEndpoint - - secretRef - type: object - status: - description: FederatedClusterStatus defines the observed state of FederatedCluster - properties: - apiResourceTypes: - description: The list of api resource types defined in the federated - cluster - items: - description: APIResource represents a Kubernetes API resource. - properties: - group: - description: Group of the resource. - type: string - kind: - description: Kind of the resource. - type: string - pluralName: - description: Lower-cased plural name of the resource (e.g. configmaps). If - not provided, it will be computed by lower-casing the kind - and suffixing an 's'. - type: string - scope: - description: Scope of the resource. - type: string - version: - description: Version of the resource. + name: + description: Name of a secret within the enclosing namespace type: string required: - - kind - - pluralName - - scope - - version + - name type: object - type: array - conditions: - description: Conditions is an array of current cluster conditions. - items: - description: ClusterCondition describes current state of a cluster. - properties: - lastProbeTime: - description: Last time the condition was checked. - format: date-time - type: string - lastTransitionTime: - description: Last time the condition transit from one status - to another. - format: date-time - type: string - message: - description: Human readable message indicating details about - the current status. - type: string - reason: - description: Programmatic identifier indicating the reason for - the current status. - type: string - status: - description: Status of the condition, one of True, False, Unknown. - type: string - type: - description: Type of cluster condition, Ready or Offline. - type: string - required: - - lastProbeTime - - lastTransitionTime - - message - - reason - - status - - type - type: object - type: array - x-kubernetes-list-map-keys: - - type - x-kubernetes-list-type: map - joinPerformed: - description: Whether any effectual action was performed in the cluster - while joining. If true, clean-up is required on cluster removal - to undo the side-effects. - type: boolean - resources: - description: Resources describes the cluster's resources. - properties: - allocatable: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - description: Allocatable represents the total resources that are - allocatable for scheduling. + taints: + description: If specified, the cluster's taints. + items: + description: The node this Taint is attached to has the "effect" on any pod that does not tolerate the Taint. + properties: + effect: + description: Required. The effect of the taint on pods that do not tolerate the taint. Valid effects are NoSchedule, PreferNoSchedule and NoExecute. + type: string + key: + description: Required. The taint key to be applied to a node. + type: string + timeAdded: + description: TimeAdded represents the time at which the taint was added. It is only written for NoExecute taints. + format: date-time + type: string + value: + description: The taint value corresponding to the taint key. + type: string + required: + - effect + - key + type: object + type: array + useServiceAccount: + description: Whether to use service account token to authenticate to the member cluster. + type: boolean + required: + - apiEndpoint + - secretRef + type: object + status: + description: FederatedClusterStatus defines the observed state of FederatedCluster + properties: + apiResourceTypes: + description: The list of api resource types defined in the federated cluster + items: + description: APIResource represents a Kubernetes API resource. + properties: + group: + description: Group of the resource. + type: string + kind: + description: Kind of the resource. + type: string + pluralName: + description: Lower-cased plural name of the resource (e.g. configmaps). If not provided, it will be computed by lower-casing the kind and suffixing an 's'. + type: string + scope: + description: Scope of the resource. + type: string + version: + description: Version of the resource. + type: string + required: + - kind + - pluralName + - scope + - version type: object - available: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - description: Available represents the resources currently available - for scheduling. + type: array + conditions: + description: Conditions is an array of current cluster conditions. + items: + description: ClusterCondition describes current state of a cluster. + properties: + lastProbeTime: + description: Last time the condition was checked. + format: date-time + type: string + lastTransitionTime: + description: Last time the condition transit from one status to another. + format: date-time + type: string + message: + description: Human readable message indicating details about the current status. + type: string + reason: + description: Programmatic identifier indicating the reason for the current status. + type: string + status: + description: Status of the condition, one of True, False, Unknown. + type: string + type: + description: Type of cluster condition, Ready or Offline. + type: string + required: + - lastProbeTime + - lastTransitionTime + - message + - reason + - status + - type type: object - schedulableNodes: - description: SchedulableNodes represents number of nodes which - is ready and schedulable. - format: int64 - type: integer - type: object - type: object - type: object - served: true - storage: true - subresources: - status: {} + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + joinPerformed: + description: Whether any effectual action was performed in the cluster while joining. If true, clean-up is required on cluster removal to undo the side-effects. + type: boolean + resources: + description: Resources describes the cluster's resources. + properties: + allocatable: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: Allocatable represents the total resources that are allocatable for scheduling. + type: object + available: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: Available represents the resources currently available for scheduling. + type: object + schedulableNodes: + description: SchedulableNodes represents number of nodes which is ready and schedulable. + format: int64 + type: integer + type: object + type: object + type: object + served: true + storage: true + subresources: + status: {} diff --git a/config/crds/core.kubeadmiral.io_federatedobjects.yaml b/config/crds/core.kubeadmiral.io_federatedobjects.yaml index aa8e9cef..33f803e4 100644 --- a/config/crds/core.kubeadmiral.io_federatedobjects.yaml +++ b/config/crds/core.kubeadmiral.io_federatedobjects.yaml @@ -4,6 +4,7 @@ kind: CustomResourceDefinition metadata: annotations: controller-gen.kubebuilder.io/version: v0.11.1 + kubeadmiral.io/no-federated-resource: "true" creationTimestamp: null name: federatedobjects.core.kubeadmiral.io spec: @@ -13,213 +14,178 @@ spec: listKind: FederatedObjectList plural: federatedobjects shortNames: - - fo + - fo singular: federatedobject scope: Namespaced versions: - - name: v1alpha1 - schema: - openAPIV3Schema: - description: FederatedObject describes a namespace-scoped Kubernetes object - and how it should be propagated to different member clusters. - properties: - apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' - type: string - kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' - type: string - metadata: - type: object - spec: - description: Spec defines the desired behavior of the FederatedObject. - properties: - follows: - description: Follows defines other objects, or "leaders", that the - Kubernetes object should follow during propagation, i.e. the Kubernetes - object should be propagated to all member clusters that its "leaders" - are placed in. - items: - description: LeaderReference contains the identifying metadata of - a "leader" Kubernetes object. - properties: - group: - type: string - kind: - type: string - name: - type: string - namespace: - type: string - required: - - kind - - name - type: object - type: array - overrides: - description: Overrides describe the overrides that should be applied - to the base template of the Kubernetes object before it is propagated - to individual member clusters. - items: - description: OverrideWithController describes the overrides that - will be applied to a Kubernetes object before it is propagated - to individual member clusters. - properties: - clusters: - description: Override is the list of member clusters and their - respective override patches. - items: - description: ClusterReferenceWithPatches represents a single - member cluster and a list of override patches for the cluster. - properties: - cluster: - description: Cluster is the name of the member cluster. - type: string - patches: - description: Patches is the list of override patches for - the member cluster. - items: - description: OverridePatch defines a JSON patch. - properties: - op: - type: string - path: - type: string - value: - x-kubernetes-preserve-unknown-fields: true - required: - - path - type: object - type: array - required: - - cluster - type: object - type: array - controller: - description: Controller identifies the controller responsible - for this override. - type: string - required: - - clusters - - controller - type: object - type: array - placements: - description: Placements describe the member clusters that the Kubernetes - object will be propagated to, which is a union of all the listed - clusters. - items: - description: PlacementWithController describes the member clusters - that a Kubernetes object should be propagated to. - properties: - controller: - description: Controller identifies the controller responsible - for this placement. - type: string - placement: - description: Placement is the list of member clusters that the - Kubernetes object should be propagated to. - items: - description: ClusterReference represents a single member cluster. - properties: - cluster: - description: Cluster is the name of the member cluster. - type: string - required: - - cluster - type: object - type: array - required: - - controller - - placement - type: object - type: array - template: - description: Template is the base template of the Kubernetes object - to be propagated. - x-kubernetes-preserve-unknown-fields: true - required: - - template - type: object - status: - description: Status describes the most recently observed status of the - FederatedObject. - properties: - clusters: - description: Clusters contains the propagation status of the Kubernetes - object for individual member clusters. - items: - description: PropagationStatus describes the propagation of a Kubernetes - object to a given member cluster. - properties: - cluster: - description: Cluster is the name of the member cluster. - type: string - lastObservedGeneration: - description: LastObservedGeneration is the last observed generation - of the Kubernetes object in the member cluster. - format: int64 - type: integer - status: - description: Status describes the current status of propagating - the Kubernetes object to the member cluster. - type: string - required: - - cluster - - status - type: object - type: array - collisionCount: - description: CollisionCount can be used in conjunction with RevisionHistory - to implement rollbacks. - format: int32 - type: integer - conditions: - description: Conditions describe the current state of this FederatedObject. - items: - description: GenericFederatedObjectCondition contains the current - details about a particular condition of a FederatedObject. - properties: - lastTransitionTime: - description: LastTransitionTime is the last time the status - of this condition changed. - format: date-time - type: string - lastUpdateTime: - description: LastUpdateTime is the last time a reconciliation - for this condition occurred. - format: date-time - type: string - reason: - description: Reason is the reason for the last status change - of this condition. - type: string - status: - description: Status is the status of the condition, one of True, - False or Unknown. - type: string - type: - description: Type is the type of the condition. - type: string - required: - - status - - type - type: object - type: array - syncedGeneration: - description: SyncedGeneration is the generation of this FederatedObject - when it was last synced to selected member clusters. - format: int64 - type: integer - type: object - required: - - spec - type: object - served: true - storage: true - subresources: - status: {} + - name: v1alpha1 + schema: + openAPIV3Schema: + description: FederatedObject describes a namespace-scoped Kubernetes object and how it should be propagated to different member clusters. + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: Spec defines the desired behavior of the FederatedObject. + properties: + follows: + description: Follows defines other objects, or "leaders", that the Kubernetes object should follow during propagation, i.e. the Kubernetes object should be propagated to all member clusters that its "leaders" are placed in. + items: + description: LeaderReference contains the identifying metadata of a "leader" Kubernetes object. + properties: + group: + type: string + kind: + type: string + name: + type: string + namespace: + type: string + required: + - kind + - name + type: object + type: array + overrides: + description: Overrides describe the overrides that should be applied to the base template of the Kubernetes object before it is propagated to individual member clusters. + items: + description: OverrideWithController describes the overrides that will be applied to a Kubernetes object before it is propagated to individual member clusters. + properties: + clusters: + description: Override is the list of member clusters and their respective override patches. + items: + description: ClusterReferenceWithPatches represents a single member cluster and a list of override patches for the cluster. + properties: + cluster: + description: Cluster is the name of the member cluster. + type: string + patches: + description: Patches is the list of override patches for the member cluster. + items: + description: OverridePatch defines a JSON patch. + properties: + op: + type: string + path: + type: string + value: + x-kubernetes-preserve-unknown-fields: true + required: + - path + type: object + type: array + required: + - cluster + type: object + type: array + controller: + description: Controller identifies the controller responsible for this override. + type: string + required: + - clusters + - controller + type: object + type: array + placements: + description: Placements describe the member clusters that the Kubernetes object will be propagated to, which is a union of all the listed clusters. + items: + description: PlacementWithController describes the member clusters that a Kubernetes object should be propagated to. + properties: + controller: + description: Controller identifies the controller responsible for this placement. + type: string + placement: + description: Placement is the list of member clusters that the Kubernetes object should be propagated to. + items: + description: ClusterReference represents a single member cluster. + properties: + cluster: + description: Cluster is the name of the member cluster. + type: string + required: + - cluster + type: object + type: array + required: + - controller + - placement + type: object + type: array + template: + description: Template is the base template of the Kubernetes object to be propagated. + x-kubernetes-preserve-unknown-fields: true + required: + - template + type: object + status: + description: Status describes the most recently observed status of the FederatedObject. + properties: + clusters: + description: Clusters contains the propagation status of the Kubernetes object for individual member clusters. + items: + description: PropagationStatus describes the propagation of a Kubernetes object to a given member cluster. + properties: + cluster: + description: Cluster is the name of the member cluster. + type: string + lastObservedGeneration: + description: LastObservedGeneration is the last observed generation of the Kubernetes object in the member cluster. + format: int64 + type: integer + status: + description: Status describes the current status of propagating the Kubernetes object to the member cluster. + type: string + required: + - cluster + - status + type: object + type: array + collisionCount: + description: CollisionCount can be used in conjunction with RevisionHistory to implement rollbacks. + format: int32 + type: integer + conditions: + description: Conditions describe the current state of this FederatedObject. + items: + description: GenericFederatedObjectCondition contains the current details about a particular condition of a FederatedObject. + properties: + lastTransitionTime: + description: LastTransitionTime is the last time the status of this condition changed. + format: date-time + type: string + lastUpdateTime: + description: LastUpdateTime is the last time a reconciliation for this condition occurred. + format: date-time + type: string + reason: + description: Reason is the reason for the last status change of this condition. + type: string + status: + description: Status is the status of the condition, one of True, False or Unknown. + type: string + type: + description: Type is the type of the condition. + type: string + required: + - status + - type + type: object + type: array + syncedGeneration: + description: SyncedGeneration is the generation of this FederatedObject when it was last synced to selected member clusters. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} diff --git a/config/crds/core.kubeadmiral.io_federatedtypeconfigs.yaml b/config/crds/core.kubeadmiral.io_federatedtypeconfigs.yaml index c38a577f..14d563ac 100644 --- a/config/crds/core.kubeadmiral.io_federatedtypeconfigs.yaml +++ b/config/crds/core.kubeadmiral.io_federatedtypeconfigs.yaml @@ -4,6 +4,7 @@ kind: CustomResourceDefinition metadata: annotations: controller-gen.kubebuilder.io/version: v0.11.1 + kubeadmiral.io/no-federated-resource: "true" creationTimestamp: null name: federatedtypeconfigs.core.kubeadmiral.io spec: @@ -13,163 +14,132 @@ spec: listKind: FederatedTypeConfigList plural: federatedtypeconfigs shortNames: - - ftc + - ftc singular: federatedtypeconfig scope: Cluster versions: - - name: v1alpha1 - schema: - openAPIV3Schema: - description: FederatedTypeConfig specifies an API resource type to federate - and various type-specific options. - properties: - apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' - type: string - kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' - type: string - metadata: - type: object - spec: - properties: - autoMigration: - description: Configuration for AutoMigration. If left empty, the AutoMigration - feature will be disabled. - properties: - enabled: - description: Whether or not to automatically migrate unschedulable - pods to a different cluster. - type: boolean - required: - - enabled - type: object - controllers: - description: The controllers that must run before the source object - can be propagated to member clusters. Each inner slice specifies - a step. Step T must complete before step T+1 can commence. Controllers - within each step can execute in parallel. - items: + - name: v1alpha1 + schema: + openAPIV3Schema: + description: FederatedTypeConfig specifies an API resource type to federate and various type-specific options. + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + properties: + autoMigration: + description: Configuration for AutoMigration. If left empty, the AutoMigration feature will be disabled. + properties: + enabled: + description: Whether or not to automatically migrate unschedulable pods to a different cluster. + type: boolean + required: + - enabled + type: object + controllers: + description: The controllers that must run before the source object can be propagated to member clusters. Each inner slice specifies a step. Step T must complete before step T+1 can commence. Controllers within each step can execute in parallel. items: - type: string - type: array - type: array - pathDefinition: - description: Defines the paths to various fields in the target object's - schema. - properties: - availableReplicasStatus: - description: Path to a numeric field that reflects the number - of available replicas that the object currently has. E.g. `status.availableReplicas` - for Deployment and ReplicaSet. - type: string - labelSelector: - description: Path to a metav1.LabelSelector field that selects - the replicas for this object. E.g. `spec.selector` for Deployment - and ReplicaSet. - type: string - readyReplicasStatus: - description: Path to a numeric field that reflects the number - of ready replicas that the object currently has. E.g. `status.readyReplicas` - for Deployment and ReplicaSet. - type: string - replicasSpec: - description: Path to a numeric field that indicates the number - of replicas that an object can be divided into. E.g. `spec.replicas` - for Deployment and ReplicaSet. - type: string - replicasStatus: - description: Path to a numeric field that reflects the number - of replicas that the object currently has. E.g. `status.replicas` - for Deployment and ReplicaSet. - type: string - type: object - revisionHistory: - description: Configuration for RevisionHistory. If left empty, the - RevisionHistory feature will be disabled. - properties: - enabled: - description: Whether or not preserve a RevisionHistory for the - federated object during updates. - type: boolean - required: - - enabled - type: object - rolloutPlan: - description: Configuration for RolloutPlan. If left empty, the RolloutPlan - feature will be disabled. - properties: - enabled: - description: Whether or not to synchronize the rollout process - across clusters. - type: boolean - required: - - enabled - type: object - sourceType: - description: The API resource type to be federated. - properties: - group: - description: Group of the resource. - type: string - kind: - description: Kind of the resource. - type: string - pluralName: - description: Lower-cased plural name of the resource (e.g. configmaps). If - not provided, it will be computed by lower-casing the kind and - suffixing an 's'. - type: string - scope: - description: Scope of the resource. - type: string - version: - description: Version of the resource. - type: string - required: - - kind - - pluralName - - scope - - version - type: object - statusAggregation: - description: Configuration for StatusAggregation. If left empty, the - StatusAggregation feature will be disabled. - properties: - enabled: - description: Whether or not to enable status aggregation. - type: boolean - required: - - enabled - type: object - statusCollection: - description: Configuration for StatusCollection. If left empty, the - StatusCollection feature will be disabled. - properties: - enabled: - description: Whether or not to enable status collection. - type: boolean - fields: - description: Contains the fields to be collected during status - collection. Each field is a dot separated string that corresponds - to its path in the source object's schema. E.g. `metadata.creationTimestamp`. items: type: string type: array - required: - - enabled - type: object - required: - - sourceType - type: object - required: - - spec - type: object - served: true - storage: true - subresources: - status: {} + type: array + pathDefinition: + description: Defines the paths to various fields in the target object's schema. + properties: + availableReplicasStatus: + description: Path to a numeric field that reflects the number of available replicas that the object currently has. E.g. `status.availableReplicas` for Deployment and ReplicaSet. + type: string + labelSelector: + description: Path to a metav1.LabelSelector field that selects the replicas for this object. E.g. `spec.selector` for Deployment and ReplicaSet. + type: string + readyReplicasStatus: + description: Path to a numeric field that reflects the number of ready replicas that the object currently has. E.g. `status.readyReplicas` for Deployment and ReplicaSet. + type: string + replicasSpec: + description: Path to a numeric field that indicates the number of replicas that an object can be divided into. E.g. `spec.replicas` for Deployment and ReplicaSet. + type: string + replicasStatus: + description: Path to a numeric field that reflects the number of replicas that the object currently has. E.g. `status.replicas` for Deployment and ReplicaSet. + type: string + type: object + revisionHistory: + description: Configuration for RevisionHistory. If left empty, the RevisionHistory feature will be disabled. + properties: + enabled: + description: Whether or not preserve a RevisionHistory for the federated object during updates. + type: boolean + required: + - enabled + type: object + rolloutPlan: + description: Configuration for RolloutPlan. If left empty, the RolloutPlan feature will be disabled. + properties: + enabled: + description: Whether or not to synchronize the rollout process across clusters. + type: boolean + required: + - enabled + type: object + sourceType: + description: The API resource type to be federated. + properties: + group: + description: Group of the resource. + type: string + kind: + description: Kind of the resource. + type: string + pluralName: + description: Lower-cased plural name of the resource (e.g. configmaps). If not provided, it will be computed by lower-casing the kind and suffixing an 's'. + type: string + scope: + description: Scope of the resource. + type: string + version: + description: Version of the resource. + type: string + required: + - kind + - pluralName + - scope + - version + type: object + statusAggregation: + description: Configuration for StatusAggregation. If left empty, the StatusAggregation feature will be disabled. + properties: + enabled: + description: Whether or not to enable status aggregation. + type: boolean + required: + - enabled + type: object + statusCollection: + description: Configuration for StatusCollection. If left empty, the StatusCollection feature will be disabled. + properties: + enabled: + description: Whether or not to enable status collection. + type: boolean + fields: + description: Contains the fields to be collected during status collection. Each field is a dot separated string that corresponds to its path in the source object's schema. E.g. `metadata.creationTimestamp`. + items: + type: string + type: array + required: + - enabled + type: object + required: + - sourceType + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} diff --git a/config/crds/core.kubeadmiral.io_overridepolicies.yaml b/config/crds/core.kubeadmiral.io_overridepolicies.yaml index c2727763..f15c6e41 100644 --- a/config/crds/core.kubeadmiral.io_overridepolicies.yaml +++ b/config/crds/core.kubeadmiral.io_overridepolicies.yaml @@ -4,6 +4,7 @@ kind: CustomResourceDefinition metadata: annotations: controller-gen.kubebuilder.io/version: v0.11.1 + kubeadmiral.io/no-federated-resource: "true" creationTimestamp: null name: overridepolicies.core.kubeadmiral.io spec: @@ -13,187 +14,154 @@ spec: listKind: OverridePolicyList plural: overridepolicies shortNames: - - op + - op singular: overridepolicy scope: Namespaced versions: - - name: v1alpha1 - schema: - openAPIV3Schema: - description: OverridePolicy describes the override rules for a resource. - properties: - apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' - type: string - kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' - type: string - metadata: - type: object - spec: - properties: - overrideRules: - description: OverrideRules specify the override rules. Each rule specifies - the overriders and the clusters these overriders should be applied - to. - items: - properties: - overriders: - description: Overriders specify the overriders to be applied - in the target clusters. - properties: - jsonpatch: - description: JsonPatch specifies overriders in a syntax - similar to RFC6902 JSON Patch. - items: - properties: - operator: - description: Operator specifies the operation. If - omitted, defaults to "replace". - type: string - path: - description: Path is a JSON pointer (RFC 6901) specifying - the location within the resource document where - the operation is performed. Each key in the path - should be prefixed with "/", while "~" and "/" should - be escaped as "~0" and "~1" respectively. For example, - to add a label "kubeadmiral.io/label", the path - should be "/metadata/labels/kubeadmiral.io~1label". - type: string - value: - description: Value is the value(s) required by the - operation. - x-kubernetes-preserve-unknown-fields: true - required: - - path - type: object - type: array - type: object - targetClusters: - description: TargetClusters selects the clusters in which the - overriders in this rule should be applied. If multiple types - of selectors are specified, the overall result is the intersection - of all of them. - properties: - clusterAffinity: - description: ClusterAffinity selects FederatedClusters by - matching their labels and fields against expressions. - If multiple terms are specified, their results are ORed. - items: - properties: - matchExpressions: - description: A list of cluster selector requirements - by cluster labels. - items: - description: ClusterSelectorRequirement is a selector - that contains values, a key, and an operator that - relates the values and keys - properties: - key: - type: string - operator: - description: ClusterSelectorOperator is the - set of operators that can be used in a cluster - selector requirement. - enum: - - In - - NotIn - - Exists - - DoesNotExist - - Gt - - Lt - type: string - values: - items: + - name: v1alpha1 + schema: + openAPIV3Schema: + description: OverridePolicy describes the override rules for a resource. + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + properties: + overrideRules: + description: OverrideRules specify the override rules. Each rule specifies the overriders and the clusters these overriders should be applied to. + items: + properties: + overriders: + description: Overriders specify the overriders to be applied in the target clusters. + properties: + jsonpatch: + description: JsonPatch specifies overriders in a syntax similar to RFC6902 JSON Patch. + items: + properties: + operator: + description: Operator specifies the operation. If omitted, defaults to "replace". + type: string + path: + description: Path is a JSON pointer (RFC 6901) specifying the location within the resource document where the operation is performed. Each key in the path should be prefixed with "/", while "~" and "/" should be escaped as "~0" and "~1" respectively. For example, to add a label "kubeadmiral.io/label", the path should be "/metadata/labels/kubeadmiral.io~1label". + type: string + value: + description: Value is the value(s) required by the operation. + x-kubernetes-preserve-unknown-fields: true + required: + - path + type: object + type: array + type: object + targetClusters: + description: TargetClusters selects the clusters in which the overriders in this rule should be applied. If multiple types of selectors are specified, the overall result is the intersection of all of them. + properties: + clusterAffinity: + description: ClusterAffinity selects FederatedClusters by matching their labels and fields against expressions. If multiple terms are specified, their results are ORed. + items: + properties: + matchExpressions: + description: A list of cluster selector requirements by cluster labels. + items: + description: ClusterSelectorRequirement is a selector that contains values, a key, and an operator that relates the values and keys + properties: + key: + type: string + operator: + description: ClusterSelectorOperator is the set of operators that can be used in a cluster selector requirement. + enum: + - In + - NotIn + - Exists + - DoesNotExist + - Gt + - Lt + type: string + values: + items: + type: string + type: array + required: + - key + - operator + - values + type: object + type: array + matchFields: + description: A list of cluster selector requirements by cluster fields. + items: + description: ClusterSelectorRequirement is a selector that contains values, a key, and an operator that relates the values and keys + properties: + key: type: string - type: array - required: - - key - - operator - - values - type: object - type: array - matchFields: - description: A list of cluster selector requirements - by cluster fields. - items: - description: ClusterSelectorRequirement is a selector - that contains values, a key, and an operator that - relates the values and keys - properties: - key: - type: string - operator: - description: ClusterSelectorOperator is the - set of operators that can be used in a cluster - selector requirement. - enum: - - In - - NotIn - - Exists - - DoesNotExist - - Gt - - Lt - type: string - values: - items: + operator: + description: ClusterSelectorOperator is the set of operators that can be used in a cluster selector requirement. + enum: + - In + - NotIn + - Exists + - DoesNotExist + - Gt + - Lt type: string - type: array - required: - - key - - operator - - values - type: object - type: array + values: + items: + type: string + type: array + required: + - key + - operator + - values + type: object + type: array + type: object + type: array + clusterSelector: + additionalProperties: + type: string + description: ClusterSelector selects FederatedClusters by their labels. Empty labels selects all FederatedClusters. type: object - type: array - clusterSelector: - additionalProperties: - type: string - description: ClusterSelector selects FederatedClusters by - their labels. Empty labels selects all FederatedClusters. - type: object - clusters: - description: Clusters selects FederatedClusters by their - names. Empty Clusters selects all FederatedClusters. - items: - type: string - type: array - type: object - type: object - type: array - type: object - status: - properties: - refCount: - format: int64 - minimum: 0 - type: integer - typedRefCount: - items: - properties: - count: - format: int64 - minimum: 0 - type: integer - group: - type: string - resource: - type: string - required: - - count - - resource - type: object - type: array - type: object - required: - - spec - type: object - served: true - storage: true - subresources: - status: {} + clusters: + description: Clusters selects FederatedClusters by their names. Empty Clusters selects all FederatedClusters. + items: + type: string + type: array + type: object + type: object + type: array + type: object + status: + properties: + refCount: + format: int64 + minimum: 0 + type: integer + typedRefCount: + items: + properties: + count: + format: int64 + minimum: 0 + type: integer + group: + type: string + resource: + type: string + required: + - count + - resource + type: object + type: array + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} diff --git a/config/crds/core.kubeadmiral.io_propagatedversions.yaml b/config/crds/core.kubeadmiral.io_propagatedversions.yaml index d18bbd69..52940220 100644 --- a/config/crds/core.kubeadmiral.io_propagatedversions.yaml +++ b/config/crds/core.kubeadmiral.io_propagatedversions.yaml @@ -4,6 +4,7 @@ kind: CustomResourceDefinition metadata: annotations: controller-gen.kubebuilder.io/version: v0.11.1 + kubeadmiral.io/no-federated-resource: "true" creationTimestamp: null name: propagatedversions.core.kubeadmiral.io spec: @@ -15,61 +16,49 @@ spec: singular: propagatedversion scope: Namespaced versions: - - name: v1alpha1 - schema: - openAPIV3Schema: - description: PropagatedVersion holds version information about the state propagated - from KubeFed APIs (configured by FederatedTypeConfig resources) to member - clusters. The name of a PropagatedVersion encodes the kind and name of the - resource it stores information for (i.e. -). - If a target resource has a populated metadata.Generation field, the generation - will be stored with a prefix of `gen:` as the version for the cluster. If - metadata.Generation is not available, metadata.ResourceVersion will be stored - with a prefix of `rv:` as the version for the cluster. - properties: - apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' - type: string - kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' - type: string - metadata: - type: object - status: - description: PropagatedVersionStatus defines the observed state of PropagatedVersion - properties: - clusterVersions: - description: The last versions produced in each cluster for this resource. - items: - properties: - clusterName: - description: The name of the cluster the version is for. - type: string - version: - description: The last version produced for the resource by a - KubeFed operation. - type: string - required: - - clusterName - - version - type: object - type: array - overridesVersion: - description: The observed version of the overrides for this resource. - type: string - templateVersion: - description: The observed version of the template for this resource. - type: string - required: - - overridesVersion - - templateVersion - type: object - type: object - served: true - storage: true - subresources: - status: {} + - name: v1alpha1 + schema: + openAPIV3Schema: + description: PropagatedVersion holds version information about the state propagated from KubeFed APIs (configured by FederatedTypeConfig resources) to member clusters. The name of a PropagatedVersion encodes the kind and name of the resource it stores information for (i.e. -). If a target resource has a populated metadata.Generation field, the generation will be stored with a prefix of `gen:` as the version for the cluster. If metadata.Generation is not available, metadata.ResourceVersion will be stored with a prefix of `rv:` as the version for the cluster. + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + status: + description: PropagatedVersionStatus defines the observed state of PropagatedVersion + properties: + clusterVersions: + description: The last versions produced in each cluster for this resource. + items: + properties: + clusterName: + description: The name of the cluster the version is for. + type: string + version: + description: The last version produced for the resource by a KubeFed operation. + type: string + required: + - clusterName + - version + type: object + type: array + overridesVersion: + description: The observed version of the overrides for this resource. + type: string + templateVersion: + description: The observed version of the template for this resource. + type: string + required: + - overridesVersion + - templateVersion + type: object + type: object + served: true + storage: true + subresources: + status: {} diff --git a/config/crds/core.kubeadmiral.io_propagationpolicies.yaml b/config/crds/core.kubeadmiral.io_propagationpolicies.yaml index 780b1563..424b87d8 100644 --- a/config/crds/core.kubeadmiral.io_propagationpolicies.yaml +++ b/config/crds/core.kubeadmiral.io_propagationpolicies.yaml @@ -4,6 +4,7 @@ kind: CustomResourceDefinition metadata: annotations: controller-gen.kubebuilder.io/version: v0.11.1 + kubeadmiral.io/no-federated-resource: "true" creationTimestamp: null name: propagationpolicies.core.kubeadmiral.io spec: diff --git a/config/crds/core.kubeadmiral.io_schedulerpluginwebhookconfigurations.yaml b/config/crds/core.kubeadmiral.io_schedulerpluginwebhookconfigurations.yaml index 2f77a84e..f0c47f7a 100644 --- a/config/crds/core.kubeadmiral.io_schedulerpluginwebhookconfigurations.yaml +++ b/config/crds/core.kubeadmiral.io_schedulerpluginwebhookconfigurations.yaml @@ -4,6 +4,7 @@ kind: CustomResourceDefinition metadata: annotations: controller-gen.kubebuilder.io/version: v0.11.1 + kubeadmiral.io/no-federated-resource: "true" creationTimestamp: null name: schedulerpluginwebhookconfigurations.core.kubeadmiral.io spec: @@ -15,96 +16,72 @@ spec: singular: schedulerpluginwebhookconfiguration scope: Cluster versions: - - name: v1alpha1 - schema: - openAPIV3Schema: - description: SchedulerPluginWebhookConfiguration is a webhook that can be - used as a scheduler plugin. - properties: - apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' - type: string - kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' - type: string - metadata: - type: object - spec: - properties: - filterPath: - description: Path for the filter call, empty if not supported. This - path is appended to the URLPrefix when issuing the filter call to - webhook. - type: string - httpTimeout: - default: 5s - description: HTTPTimeout specifies the timeout duration for a call - to the webhook. Timeout fails the scheduling of the workload. Defaults - to 5 seconds. - format: duration - type: string - payloadVersions: - description: PayloadVersions is an ordered list of preferred request - and response versions the webhook expects. The scheduler will try - to use the first version in the list which it supports. If none - of the versions specified in this list supported by the scheduler, - scheduling will fail for this object. - items: + - name: v1alpha1 + schema: + openAPIV3Schema: + description: SchedulerPluginWebhookConfiguration is a webhook that can be used as a scheduler plugin. + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + properties: + filterPath: + description: Path for the filter call, empty if not supported. This path is appended to the URLPrefix when issuing the filter call to webhook. type: string - minItems: 1 - type: array - scorePath: - description: Path for the score call, empty if not supported. This - verb is appended to the URLPrefix when issuing the score call to - webhook. - type: string - selectPath: - description: Path for the select call, empty if not supported. This - verb is appended to the URLPrefix when issuing the select call to - webhook. - type: string - tlsConfig: - description: TLSConfig specifies the transport layer security config. - properties: - caData: - description: CAData holds PEM-encoded bytes (typically read from - a root certificates bundle). - format: byte - type: string - certData: - description: CertData holds PEM-encoded bytes (typically read - from a client certificate file). - format: byte - type: string - insecure: - description: Server should be accessed without verifying the TLS - certificate. For testing only. - type: boolean - keyData: - description: KeyData holds PEM-encoded bytes (typically read from - a client certificate key file). - format: byte - type: string - serverName: - description: ServerName is passed to the server for SNI and is - used in the client to check server certificates against. If - ServerName is empty, the hostname used to contact the server - is used. + httpTimeout: + default: 5s + description: HTTPTimeout specifies the timeout duration for a call to the webhook. Timeout fails the scheduling of the workload. Defaults to 5 seconds. + format: duration + type: string + payloadVersions: + description: PayloadVersions is an ordered list of preferred request and response versions the webhook expects. The scheduler will try to use the first version in the list which it supports. If none of the versions specified in this list supported by the scheduler, scheduling will fail for this object. + items: type: string - type: object - urlPrefix: - description: URLPrefix at which the webhook is available - type: string - required: - - payloadVersions - - urlPrefix - type: object - required: - - spec - type: object - served: true - storage: true + minItems: 1 + type: array + scorePath: + description: Path for the score call, empty if not supported. This verb is appended to the URLPrefix when issuing the score call to webhook. + type: string + selectPath: + description: Path for the select call, empty if not supported. This verb is appended to the URLPrefix when issuing the select call to webhook. + type: string + tlsConfig: + description: TLSConfig specifies the transport layer security config. + properties: + caData: + description: CAData holds PEM-encoded bytes (typically read from a root certificates bundle). + format: byte + type: string + certData: + description: CertData holds PEM-encoded bytes (typically read from a client certificate file). + format: byte + type: string + insecure: + description: Server should be accessed without verifying the TLS certificate. For testing only. + type: boolean + keyData: + description: KeyData holds PEM-encoded bytes (typically read from a client certificate key file). + format: byte + type: string + serverName: + description: ServerName is passed to the server for SNI and is used in the client to check server certificates against. If ServerName is empty, the hostname used to contact the server is used. + type: string + type: object + urlPrefix: + description: URLPrefix at which the webhook is available + type: string + required: + - payloadVersions + - urlPrefix + type: object + required: + - spec + type: object + served: true + storage: true diff --git a/config/crds/core.kubeadmiral.io_schedulingprofiles.yaml b/config/crds/core.kubeadmiral.io_schedulingprofiles.yaml index db2e1a5e..1ac860d9 100644 --- a/config/crds/core.kubeadmiral.io_schedulingprofiles.yaml +++ b/config/crds/core.kubeadmiral.io_schedulingprofiles.yaml @@ -4,6 +4,7 @@ kind: CustomResourceDefinition metadata: annotations: controller-gen.kubebuilder.io/version: v0.11.1 + kubeadmiral.io/no-federated-resource: "true" creationTimestamp: null name: schedulingprofiles.core.kubeadmiral.io spec: @@ -13,234 +14,179 @@ spec: listKind: SchedulingProfileList plural: schedulingprofiles shortNames: - - sp + - sp singular: schedulingprofile scope: Cluster versions: - - name: v1alpha1 - schema: - openAPIV3Schema: - description: SchedulingProfile configures the plugins to use when scheduling - a resource - properties: - apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' - type: string - kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' - type: string - metadata: - type: object - spec: - properties: - pluginConfig: - description: PluginConfig is an optional set of custom plugin arguments - for each plugin. Omitting config args for a plugin is equivalent - to using the default config for that plugin. - items: - description: PluginConfig specifies arguments that should be passed - to a plugin at the time of initialization. A plugin that is invoked - at multiple extension points is initialized once. Args can have - arbitrary structure. It is up to the plugin to process these Args. - properties: - args: - description: Args defines the arguments passed to the plugins - at the time of initialization. Args can have arbitrary structure. - x-kubernetes-preserve-unknown-fields: true - name: - description: Name defines the name of plugin being configured. - type: string - required: - - name - type: object - type: array - plugins: - description: Plugins specify the set of plugins that should be enabled - or disabled. Enabled plugins are the ones that should be enabled - in addition to the default plugins. Disabled plugins are any of - the default plugins that should be disabled. When no enabled or - disabled plugin is specified for an extension point, default plugins - for that extension point will be used if there is any. - properties: - filter: - description: Filter is the list of plugins that should be invoked - during the filter phase. - properties: - disabled: - description: Disabled specifies default plugins that should - be disabled. - items: - description: Plugin specifies a plugin type, name and its - weight when applicable. Weight is used only for Score - plugins. - properties: - name: - description: Name defines the name of the plugin. - type: string - type: - description: Type defines the type of the plugin. Type - should be omitted when referencing in-tree plugins. - enum: - - Webhook - type: string - wait: - description: Weight defines the weight of the plugin. - format: int64 - minimum: 0 - type: integer - type: object - type: array - enabled: - description: Enabled specifies plugins that should be enabled - in addition to the default plugins. Enabled plugins are - called in the order specified here, after default plugins. - If they need to be invoked before default plugins, default - plugins must be disabled and re-enabled here in desired - order. - items: - description: Plugin specifies a plugin type, name and its - weight when applicable. Weight is used only for Score - plugins. - properties: - name: - description: Name defines the name of the plugin. - type: string - type: - description: Type defines the type of the plugin. Type - should be omitted when referencing in-tree plugins. - enum: - - Webhook - type: string - wait: - description: Weight defines the weight of the plugin. - format: int64 - minimum: 0 - type: integer - type: object - type: array - type: object - score: - description: Score is the list of plugins that should be invoked - during the score phase. + - name: v1alpha1 + schema: + openAPIV3Schema: + description: SchedulingProfile configures the plugins to use when scheduling a resource + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + properties: + pluginConfig: + description: PluginConfig is an optional set of custom plugin arguments for each plugin. Omitting config args for a plugin is equivalent to using the default config for that plugin. + items: + description: PluginConfig specifies arguments that should be passed to a plugin at the time of initialization. A plugin that is invoked at multiple extension points is initialized once. Args can have arbitrary structure. It is up to the plugin to process these Args. properties: - disabled: - description: Disabled specifies default plugins that should - be disabled. - items: - description: Plugin specifies a plugin type, name and its - weight when applicable. Weight is used only for Score - plugins. - properties: - name: - description: Name defines the name of the plugin. - type: string - type: - description: Type defines the type of the plugin. Type - should be omitted when referencing in-tree plugins. - enum: - - Webhook - type: string - wait: - description: Weight defines the weight of the plugin. - format: int64 - minimum: 0 - type: integer - type: object - type: array - enabled: - description: Enabled specifies plugins that should be enabled - in addition to the default plugins. Enabled plugins are - called in the order specified here, after default plugins. - If they need to be invoked before default plugins, default - plugins must be disabled and re-enabled here in desired - order. - items: - description: Plugin specifies a plugin type, name and its - weight when applicable. Weight is used only for Score - plugins. - properties: - name: - description: Name defines the name of the plugin. - type: string - type: - description: Type defines the type of the plugin. Type - should be omitted when referencing in-tree plugins. - enum: - - Webhook - type: string - wait: - description: Weight defines the weight of the plugin. - format: int64 - minimum: 0 - type: integer - type: object - type: array + args: + description: Args defines the arguments passed to the plugins at the time of initialization. Args can have arbitrary structure. + x-kubernetes-preserve-unknown-fields: true + name: + description: Name defines the name of plugin being configured. + type: string + required: + - name type: object - select: - description: Select is the list of plugins that should be invoked - during the select phase. - properties: - disabled: - description: Disabled specifies default plugins that should - be disabled. - items: - description: Plugin specifies a plugin type, name and its - weight when applicable. Weight is used only for Score - plugins. - properties: - name: - description: Name defines the name of the plugin. - type: string - type: - description: Type defines the type of the plugin. Type - should be omitted when referencing in-tree plugins. - enum: - - Webhook - type: string - wait: - description: Weight defines the weight of the plugin. - format: int64 - minimum: 0 - type: integer - type: object - type: array - enabled: - description: Enabled specifies plugins that should be enabled - in addition to the default plugins. Enabled plugins are - called in the order specified here, after default plugins. - If they need to be invoked before default plugins, default - plugins must be disabled and re-enabled here in desired - order. - items: - description: Plugin specifies a plugin type, name and its - weight when applicable. Weight is used only for Score - plugins. - properties: - name: - description: Name defines the name of the plugin. - type: string - type: - description: Type defines the type of the plugin. Type - should be omitted when referencing in-tree plugins. - enum: - - Webhook - type: string - wait: - description: Weight defines the weight of the plugin. - format: int64 - minimum: 0 - type: integer - type: object - type: array - type: object - type: object - type: object - required: - - spec - type: object - served: true - storage: true + type: array + plugins: + description: Plugins specify the set of plugins that should be enabled or disabled. Enabled plugins are the ones that should be enabled in addition to the default plugins. Disabled plugins are any of the default plugins that should be disabled. When no enabled or disabled plugin is specified for an extension point, default plugins for that extension point will be used if there is any. + properties: + filter: + description: Filter is the list of plugins that should be invoked during the filter phase. + properties: + disabled: + description: Disabled specifies default plugins that should be disabled. + items: + description: Plugin specifies a plugin type, name and its weight when applicable. Weight is used only for Score plugins. + properties: + name: + description: Name defines the name of the plugin. + type: string + type: + description: Type defines the type of the plugin. Type should be omitted when referencing in-tree plugins. + enum: + - Webhook + type: string + wait: + description: Weight defines the weight of the plugin. + format: int64 + minimum: 0 + type: integer + type: object + type: array + enabled: + description: Enabled specifies plugins that should be enabled in addition to the default plugins. Enabled plugins are called in the order specified here, after default plugins. If they need to be invoked before default plugins, default plugins must be disabled and re-enabled here in desired order. + items: + description: Plugin specifies a plugin type, name and its weight when applicable. Weight is used only for Score plugins. + properties: + name: + description: Name defines the name of the plugin. + type: string + type: + description: Type defines the type of the plugin. Type should be omitted when referencing in-tree plugins. + enum: + - Webhook + type: string + wait: + description: Weight defines the weight of the plugin. + format: int64 + minimum: 0 + type: integer + type: object + type: array + type: object + score: + description: Score is the list of plugins that should be invoked during the score phase. + properties: + disabled: + description: Disabled specifies default plugins that should be disabled. + items: + description: Plugin specifies a plugin type, name and its weight when applicable. Weight is used only for Score plugins. + properties: + name: + description: Name defines the name of the plugin. + type: string + type: + description: Type defines the type of the plugin. Type should be omitted when referencing in-tree plugins. + enum: + - Webhook + type: string + wait: + description: Weight defines the weight of the plugin. + format: int64 + minimum: 0 + type: integer + type: object + type: array + enabled: + description: Enabled specifies plugins that should be enabled in addition to the default plugins. Enabled plugins are called in the order specified here, after default plugins. If they need to be invoked before default plugins, default plugins must be disabled and re-enabled here in desired order. + items: + description: Plugin specifies a plugin type, name and its weight when applicable. Weight is used only for Score plugins. + properties: + name: + description: Name defines the name of the plugin. + type: string + type: + description: Type defines the type of the plugin. Type should be omitted when referencing in-tree plugins. + enum: + - Webhook + type: string + wait: + description: Weight defines the weight of the plugin. + format: int64 + minimum: 0 + type: integer + type: object + type: array + type: object + select: + description: Select is the list of plugins that should be invoked during the select phase. + properties: + disabled: + description: Disabled specifies default plugins that should be disabled. + items: + description: Plugin specifies a plugin type, name and its weight when applicable. Weight is used only for Score plugins. + properties: + name: + description: Name defines the name of the plugin. + type: string + type: + description: Type defines the type of the plugin. Type should be omitted when referencing in-tree plugins. + enum: + - Webhook + type: string + wait: + description: Weight defines the weight of the plugin. + format: int64 + minimum: 0 + type: integer + type: object + type: array + enabled: + description: Enabled specifies plugins that should be enabled in addition to the default plugins. Enabled plugins are called in the order specified here, after default plugins. If they need to be invoked before default plugins, default plugins must be disabled and re-enabled here in desired order. + items: + description: Plugin specifies a plugin type, name and its weight when applicable. Weight is used only for Score plugins. + properties: + name: + description: Name defines the name of the plugin. + type: string + type: + description: Type defines the type of the plugin. Type should be omitted when referencing in-tree plugins. + enum: + - Webhook + type: string + wait: + description: Weight defines the weight of the plugin. + format: int64 + minimum: 0 + type: integer + type: object + type: array + type: object + type: object + type: object + required: + - spec + type: object + served: true + storage: true diff --git a/hack/generate-groups.sh b/hack/generate-groups.sh index a11d2df2..7686f835 100644 --- a/hack/generate-groups.sh +++ b/hack/generate-groups.sh @@ -51,6 +51,8 @@ for group in "${groups[@]}"; do INPUT_DIRS+=("${INPUT_BASE}/${group}") done +NO_FEDERATED_ANNOTATION="kubeadmiral.io/no-federated-resource" + # generate code function codegen::join() { local IFS="$1" @@ -61,7 +63,13 @@ function codegen::join() { # generate manifests echo "Generating manifests" ${GOBIN}/controller-gen crd paths=$(codegen::join ";" "${INPUT_DIRS[@]}") output:crd:artifacts:config=config/crds -# apply CRD patches + +# patch CRDs with no-federate annotation +for crd_file in config/crds/*.yaml; do + yq eval -i ".metadata.annotations[\"${NO_FEDERATED_ANNOTATION}\"] = \"true\"" "${crd_file}" +done + +# apply other CRD patches for patch_file in config/crds/patches/*.sh; do if [[ $patch_file == *.src.sh ]]; then continue From cc196f2bc2959e0641e5b5f47e708c3c1023dd93 Mon Sep 17 00:00:00 2001 From: "hawjia.lim" Date: Thu, 20 Jul 2023 19:54:27 +0800 Subject: [PATCH 078/173] refactor(federate-controller): use ergonimic way to get from lister --- pkg/controllers/federate/controller.go | 17 +++-------------- pkg/controllers/federate/util.go | 7 +++++-- 2 files changed, 8 insertions(+), 16 deletions(-) diff --git a/pkg/controllers/federate/controller.go b/pkg/controllers/federate/controller.go index f58cf80c..07867fbe 100644 --- a/pkg/controllers/federate/controller.go +++ b/pkg/controllers/federate/controller.go @@ -223,7 +223,7 @@ func (c *FederateController) HasSynced() bool { func (c *FederateController) reconcile(ctx context.Context, key workerKey) (status worker.Result) { _ = c.metrics.Rate("federate.throughput", 1) ctx, logger := logging.InjectLogger(ctx, c.logger) - ctx, logger = logging.InjectLoggerValues(ctx, "source-object", key.ObjectKey()) + ctx, logger = logging.InjectLoggerValues(ctx, "source-object", key.QualifiedName().String()) startTime := time.Now() logger.V(3).Info("Start reconcile") @@ -268,7 +268,7 @@ func (c *FederateController) reconcile(ctx context.Context, key workerKey) (stat } } - sourceObject, err := getSourceObjectFromLister(lister, key) + sourceUns, err := lister.Get(key.QualifiedName().String()) if err != nil && apierrors.IsNotFound(err) { logger.V(3).Info(fmt.Sprintf("No source object found, skip federating")) return worker.StatusAllOK @@ -277,7 +277,7 @@ func (c *FederateController) reconcile(ctx context.Context, key workerKey) (stat logger.Error(err, "Failed to get source object from store") return worker.StatusError } - sourceObject = sourceObject.DeepCopy() + sourceObject := sourceUns.(*unstructured.Unstructured).DeepCopy() fedObjectName := naming.GenerateFederatedObjectName(sourceObject.GetName(), ftc.Name) ctx, logger = logging.InjectLoggerValues(ctx, "federated-object", fedObjectName) @@ -555,14 +555,3 @@ func (c *FederateController) handleExistingFederatedObject( return true, nil } - -func getSourceObjectFromLister(lister cache.GenericLister, key workerKey) (*unstructured.Unstructured, error) { - var obj runtime.Object - var err error - if key.namespace == "" { - obj, err = lister.Get(key.name) - } else { - obj, err = lister.ByNamespace(key.namespace).Get(key.name) - } - return obj.(*unstructured.Unstructured), err -} diff --git a/pkg/controllers/federate/util.go b/pkg/controllers/federate/util.go index 7596a202..96802afd 100644 --- a/pkg/controllers/federate/util.go +++ b/pkg/controllers/federate/util.go @@ -47,8 +47,11 @@ type workerKey struct { gvk schema.GroupVersionKind } -func (k workerKey) ObjectKey() string { - return fmt.Sprintf("%s/%s", k.namespace, k.name) +func (k workerKey) QualifiedName() common.QualifiedName { + return common.QualifiedName{ + Namespace: k.namespace, + Name: k.name, + } } func templateForSourceObject( From 2a057a1a84c44cf6a67b880ff337cab54c1f7403 Mon Sep 17 00:00:00 2001 From: "hawjia.lim" Date: Fri, 21 Jul 2023 11:17:15 +0800 Subject: [PATCH 079/173] refactor(federate-controller): inject gvk into logger --- pkg/controllers/federate/controller.go | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/pkg/controllers/federate/controller.go b/pkg/controllers/federate/controller.go index 07867fbe..1147b546 100644 --- a/pkg/controllers/federate/controller.go +++ b/pkg/controllers/federate/controller.go @@ -223,7 +223,8 @@ func (c *FederateController) HasSynced() bool { func (c *FederateController) reconcile(ctx context.Context, key workerKey) (status worker.Result) { _ = c.metrics.Rate("federate.throughput", 1) ctx, logger := logging.InjectLogger(ctx, c.logger) - ctx, logger = logging.InjectLoggerValues(ctx, "source-object", key.QualifiedName().String()) + ctx, logger = logging.InjectLoggerValues(ctx, "source-object", key.QualifiedName().String(), "gvk", key.gvk) + startTime := time.Now() logger.V(3).Info("Start reconcile") @@ -232,9 +233,6 @@ func (c *FederateController) reconcile(ctx context.Context, key workerKey) (stat logger.WithValues("duration", time.Since(startTime), "status", status.String()).V(3).Info("Finished reconcile") }() - sourceGVK := key.gvk - ctx, logger = logging.InjectLoggerValues(ctx, "gvk", sourceGVK) - ftc, exists := c.informerManager.GetResourceFTC(key.gvk) if !exists { // This could happen if: @@ -303,7 +301,7 @@ func (c *FederateController) reconcile(ctx context.Context, key workerKey) (stat ownedbySource := false for _, ref := range fedObject.GetOwnerReferences() { - if schema.FromAPIVersionAndKind(ref.APIVersion, ref.Kind) == sourceGVK && + if schema.FromAPIVersionAndKind(ref.APIVersion, ref.Kind) == key.gvk && sourceObject.GetName() == ref.Name { ownedbySource = true break From 322111a96500245036af41ccdd7aa7f9577a09dd Mon Sep 17 00:00:00 2001 From: "hawjia.lim" Date: Fri, 21 Jul 2023 12:28:21 +0800 Subject: [PATCH 080/173] fix(federate-controller): wait for cfo cache sync --- pkg/controllers/federate/controller.go | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/pkg/controllers/federate/controller.go b/pkg/controllers/federate/controller.go index 1147b546..7df1ca18 100644 --- a/pkg/controllers/federate/controller.go +++ b/pkg/controllers/federate/controller.go @@ -217,7 +217,8 @@ func (c *FederateController) Run(ctx context.Context) { } func (c *FederateController) HasSynced() bool { - return c.informerManager.HasSynced() && c.fedObjectInformer.Informer().HasSynced() + return c.informerManager.HasSynced() && c.fedObjectInformer.Informer().HasSynced() && + c.clusterFedObjectInformer.Informer().HasSynced() } func (c *FederateController) reconcile(ctx context.Context, key workerKey) (status worker.Result) { From 14ba643f4336579f63b29aae3a957e77e61b8bf8 Mon Sep 17 00:00:00 2001 From: "hawjia.lim" Date: Fri, 21 Jul 2023 13:32:55 +0800 Subject: [PATCH 081/173] fix(federate-controller): add logs for cache sync --- pkg/controllers/federate/controller.go | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/pkg/controllers/federate/controller.go b/pkg/controllers/federate/controller.go index 7df1ca18..7964f5bf 100644 --- a/pkg/controllers/federate/controller.go +++ b/pkg/controllers/federate/controller.go @@ -105,8 +105,6 @@ func NewFederateController( clusterFedObjectInformer: clusterFedObjectInformer, fedClient: fedClient, dynamicClient: dynamicClient, - worker: nil, - eventRecorder: nil, metrics: metrics, logger: logger.WithValues("controller", FederateControllerName), } @@ -209,9 +207,12 @@ func (c *FederateController) Run(ctx context.Context) { defer logger.Info("Stopping controller") if !cache.WaitForNamedCacheSync(FederateControllerName, ctx.Done(), c.HasSynced) { + logger.Error(nil, "Timed out waiting for cache sync") return } + logger.Info("Caches are synced") + c.worker.Run(ctx) <-ctx.Done() } From 1abf3660473a3efa85faa91a1569145b7562abcc Mon Sep 17 00:00:00 2001 From: "hawjia.lim" Date: Fri, 21 Jul 2023 18:55:11 +0800 Subject: [PATCH 082/173] feat(informer-manager): add ftc update handlers --- pkg/util/informermanager/informermanager.go | 49 ++++- .../informermanager/informermanager_test.go | 200 ++++++++++++++++-- pkg/util/informermanager/interface.go | 8 + 3 files changed, 240 insertions(+), 17 deletions(-) diff --git a/pkg/util/informermanager/informermanager.go b/pkg/util/informermanager/informermanager.go index f5afccc5..d54c49a9 100644 --- a/pkg/util/informermanager/informermanager.go +++ b/pkg/util/informermanager/informermanager.go @@ -25,6 +25,7 @@ import ( apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/util/sets" "k8s.io/apimachinery/pkg/util/wait" "k8s.io/client-go/dynamic" "k8s.io/client-go/dynamic/dynamicinformer" @@ -51,8 +52,10 @@ type informerManager struct { ftcInformer fedcorev1a1informers.FederatedTypeConfigInformer eventHandlerGenerators []*EventHandlerGenerator + ftcUpdateHandlers []FTCUpdateHandler - gvkMapping *bijection.Bijection[string, schema.GroupVersionKind] + initialFTCs sets.Set[string] + gvkMapping *bijection.Bijection[string, schema.GroupVersionKind] lastObservedFTCs map[string]*fedcorev1a1.FederatedTypeConfig informers map[string]informers.GenericInformer @@ -71,10 +74,13 @@ func NewInformerManager( manager := &informerManager{ lock: sync.RWMutex{}, started: false, + shutdown: false, client: client, informerTweakListOptions: informerTweakListOptions, ftcInformer: ftcInformer, eventHandlerGenerators: []*EventHandlerGenerator{}, + ftcUpdateHandlers: []FTCUpdateHandler{}, + initialFTCs: sets.New[string](), gvkMapping: bijection.NewBijection[string, schema.GroupVersionKind](), lastObservedFTCs: map[string]*fedcorev1a1.FederatedTypeConfig{}, informers: map[string]informers.GenericInformer{}, @@ -200,13 +206,18 @@ func (m *informerManager) processFTC( ctx, cancel := context.WithCancel(ctx) go informer.Informer().Run(ctx.Done()) - m.lastObservedFTCs[ftcName] = ftc m.informers[ftcName] = informer m.informerCancelFuncs[ftcName] = cancel m.eventHandlerRegistrations[ftcName] = map[*EventHandlerGenerator]cache.ResourceEventHandlerRegistration{} m.lastAppliedFTCsCache[ftcName] = map[*EventHandlerGenerator]*fedcorev1a1.FederatedTypeConfig{} } + for _, handler := range m.ftcUpdateHandlers { + handler(m.lastObservedFTCs[ftcName], ftc) + } + m.lastObservedFTCs[ftcName] = ftc + m.initialFTCs.Delete(ftcName) + if !informer.Informer().HasSynced() { logger.V(3).Info("Informer for FederatedTypeConfig not synced, will not register event handlers yet") return nil, true, 100 * time.Millisecond @@ -265,11 +276,18 @@ func (m *informerManager) processFTCDeletionUnlocked(ctx context.Context, ftcNam m.gvkMapping.DeleteT1(ftcName) - delete(m.lastObservedFTCs, ftcName) delete(m.informers, ftcName) delete(m.informerCancelFuncs, ftcName) delete(m.eventHandlerRegistrations, ftcName) + lastObservedFTC := m.lastObservedFTCs[ftcName] + for _, handler := range m.ftcUpdateHandlers { + handler(lastObservedFTC, nil) + } + + delete(m.lastObservedFTCs, ftcName) + m.initialFTCs.Delete(ftcName) + return nil } @@ -285,6 +303,18 @@ func (m *informerManager) AddEventHandlerGenerator(generator *EventHandlerGenera return nil } +func (m *informerManager) AddFTCUpdateHandler(handler FTCUpdateHandler) error { + m.lock.Lock() + defer m.lock.Unlock() + + if m.started { + return fmt.Errorf("failed to add FTCUpdateHandler: InformerManager is already started") + } + + m.ftcUpdateHandlers = append(m.ftcUpdateHandlers, handler) + return nil +} + func (m *informerManager) GetFederatedTypeConfigLister() fedcorev1a1listers.FederatedTypeConfigLister { return m.ftcInformer.Lister() } @@ -326,7 +356,9 @@ func (m *informerManager) GetResourceFTC(gvk schema.GroupVersionKind) (*fedcorev } func (m *informerManager) HasSynced() bool { - return m.ftcInformer.Informer().HasSynced() + m.lock.RLock() + defer m.lock.RUnlock() + return m.ftcInformer.Informer().HasSynced() && len(m.initialFTCs) == 0 } func (m *informerManager) Start(ctx context.Context) { @@ -344,11 +376,18 @@ func (m *informerManager) Start(ctx context.Context) { m.started = true - if !cache.WaitForCacheSync(ctx.Done(), m.HasSynced) { + if !cache.WaitForCacheSync(ctx.Done(), m.ftcInformer.Informer().HasSynced) { logger.Error(nil, "Failed to wait for InformerManager cache sync") return } + // Populate the intial snapshot of FTCs + + ftcs := m.ftcInformer.Informer().GetStore().List() + for _, ftc := range ftcs { + m.initialFTCs.Insert(ftc.(*fedcorev1a1.FederatedTypeConfig).GetName()) + } + go wait.UntilWithContext(ctx, m.worker, 0) go func() { diff --git a/pkg/util/informermanager/informermanager_test.go b/pkg/util/informermanager/informermanager_test.go index 8e1cb36d..c8fb801c 100644 --- a/pkg/util/informermanager/informermanager_test.go +++ b/pkg/util/informermanager/informermanager_test.go @@ -55,14 +55,21 @@ func TestInformerManager(t *testing.T) { defaultFTCs := []*fedcorev1a1.FederatedTypeConfig{deploymentFTC, configmapFTC, secretFTC} defaultObjs := []*unstructured.Unstructured{} generators := []*EventHandlerGenerator{} + handlers := []FTCUpdateHandler{} ctx, cancel := context.WithCancel(ctx) - manager, _, _ := bootstrapInformerManagerWithFakeClients(g, ctx, defaultFTCs, defaultObjs, generators) + manager, _, _ := bootstrapInformerManagerWithFakeClients(g, ctx, defaultFTCs, defaultObjs, generators, handlers) defer func() { cancel() _ = wait.PollInfinite(time.Millisecond, func() (done bool, err error) { return manager.IsShutdown(), nil }) }() + ctxWithTimeout, timeoutCancel := context.WithTimeout(ctx, time.Second) + defer timeoutCancel() + if !cache.WaitForCacheSync(ctxWithTimeout.Done(), manager.HasSynced) { + g.Fail("Timed out waiting for InformerManager cache sync") + } + // 2. Verify that the GVK mapping for each FTC is eventually available for _, ftc := range defaultFTCs { @@ -91,14 +98,28 @@ func TestInformerManager(t *testing.T) { defaultFTCs := []*fedcorev1a1.FederatedTypeConfig{} defaultObjs := []*unstructured.Unstructured{} generators := []*EventHandlerGenerator{} + handlers := []FTCUpdateHandler{} ctx, cancel := context.WithCancel(ctx) - manager, _, fedClient := bootstrapInformerManagerWithFakeClients(g, ctx, defaultFTCs, defaultObjs, generators) + manager, _, fedClient := bootstrapInformerManagerWithFakeClients( + g, + ctx, + defaultFTCs, + defaultObjs, + generators, + handlers, + ) defer func() { cancel() _ = wait.PollInfinite(time.Millisecond, func() (done bool, err error) { return manager.IsShutdown(), nil }) }() + ctxWithTimeout, timeoutCancel := context.WithTimeout(ctx, time.Second) + defer timeoutCancel() + if !cache.WaitForCacheSync(ctxWithTimeout.Done(), manager.HasSynced) { + g.Fail("Timed out waiting for InformerManager cache sync") + } + ftc := daemonsetFTC gvk := ftc.GetSourceTypeGVK() @@ -133,14 +154,21 @@ func TestInformerManager(t *testing.T) { defaultFTCs := []*fedcorev1a1.FederatedTypeConfig{deploymentFTC, configmapFTC, secretFTC} defaultObjs := []*unstructured.Unstructured{} generators := []*EventHandlerGenerator{} + handlers := []FTCUpdateHandler{} ctx, cancel := context.WithCancel(ctx) - manager, _, _ := bootstrapInformerManagerWithFakeClients(g, ctx, defaultFTCs, defaultObjs, generators) + manager, _, _ := bootstrapInformerManagerWithFakeClients(g, ctx, defaultFTCs, defaultObjs, generators, handlers) defer func() { cancel() _ = wait.PollInfinite(time.Millisecond, func() (done bool, err error) { return manager.IsShutdown(), nil }) }() + ctxWithTimeout, timeoutCancel := context.WithTimeout(ctx, time.Second) + defer timeoutCancel() + if !cache.WaitForCacheSync(ctxWithTimeout.Done(), manager.HasSynced) { + g.Fail("Timed out waiting for InformerManager cache sync") + } + // 2. Verify that the listers for each FTC is eventually available for _, ftc := range defaultFTCs { @@ -171,14 +199,28 @@ func TestInformerManager(t *testing.T) { defaultFTCs := []*fedcorev1a1.FederatedTypeConfig{} defaultObjs := []*unstructured.Unstructured{} generators := []*EventHandlerGenerator{} + handlers := []FTCUpdateHandler{} ctx, cancel := context.WithCancel(ctx) - manager, _, fedClient := bootstrapInformerManagerWithFakeClients(g, ctx, defaultFTCs, defaultObjs, generators) + manager, _, fedClient := bootstrapInformerManagerWithFakeClients( + g, + ctx, + defaultFTCs, + defaultObjs, + generators, + handlers, + ) defer func() { cancel() _ = wait.PollInfinite(time.Millisecond, func() (done bool, err error) { return manager.IsShutdown(), nil }) }() + ctxWithTimeout, timeoutCancel := context.WithTimeout(ctx, time.Second) + defer timeoutCancel() + if !cache.WaitForCacheSync(ctxWithTimeout.Done(), manager.HasSynced) { + g.Fail("Timed out waiting for InformerManager cache sync") + } + ftc := daemonsetFTC gvk := ftc.GetSourceTypeGVK() @@ -231,6 +273,7 @@ func TestInformerManager(t *testing.T) { Generator: neverRegistered.GenerateEventHandler, }, } + handlers := []FTCUpdateHandler{} ctx, cancel := context.WithCancel(ctx) manager, dynamicClient, _ := bootstrapInformerManagerWithFakeClients( @@ -239,12 +282,19 @@ func TestInformerManager(t *testing.T) { defaultFTCs, defaultObjs, generators, + handlers, ) defer func() { cancel() _ = wait.PollInfinite(time.Millisecond, func() (done bool, err error) { return manager.IsShutdown(), nil }) }() + ctxWithTimeout, timeoutCancel := context.WithTimeout(ctx, time.Second) + defer timeoutCancel() + if !cache.WaitForCacheSync(ctxWithTimeout.Done(), manager.HasSynced) { + g.Fail("Timed out waiting for InformerManager cache sync") + } + // 2. Verify alwaysRegistered is eventually registered for all existing FTCs. for _, ftc := range defaultFTCs { @@ -328,6 +378,7 @@ func TestInformerManager(t *testing.T) { Generator: neverRegistered.GenerateEventHandler, }, } + handlers := []FTCUpdateHandler{} ctx, cancel := context.WithCancel(ctx) manager, dynamicClient, fedClient := bootstrapInformerManagerWithFakeClients( @@ -336,12 +387,19 @@ func TestInformerManager(t *testing.T) { defaultFTCs, defaultObjs, generators, + handlers, ) defer func() { cancel() _ = wait.PollInfinite(time.Millisecond, func() (done bool, err error) { return manager.IsShutdown(), nil }) }() + ctxWithTimeout, timeoutCancel := context.WithTimeout(ctx, time.Second) + defer timeoutCancel() + if !cache.WaitForCacheSync(ctxWithTimeout.Done(), manager.HasSynced) { + g.Fail("Timed out waiting for InformerManager cache sync") + } + // 2. Verify that alwaysRegistered is not registered initially for daemonset alwaysRegistered.AssertConsistently(g, time.Second*2) @@ -426,9 +484,17 @@ func TestInformerManager(t *testing.T) { defaultFTCs := []*fedcorev1a1.FederatedTypeConfig{ftc} defaultObjs := []*unstructured.Unstructured{} generators := []*EventHandlerGenerator{generator} + handlers := []FTCUpdateHandler{} ctx, cancel := context.WithCancel(ctx) - manager, _, fedClient := bootstrapInformerManagerWithFakeClients(g, ctx, defaultFTCs, defaultObjs, generators) + manager, _, fedClient := bootstrapInformerManagerWithFakeClients( + g, + ctx, + defaultFTCs, + defaultObjs, + generators, + handlers, + ) defer func() { cancel() _ = wait.PollInfinite(time.Millisecond, func() (done bool, err error) { return manager.IsShutdown(), nil }) @@ -469,6 +535,7 @@ func TestInformerManager(t *testing.T) { defaultFTCs := []*fedcorev1a1.FederatedTypeConfig{ftc} defaultObjs := []*unstructured.Unstructured{dp1} generators := []*EventHandlerGenerator{generator} + handlers := []FTCUpdateHandler{} ctx, cancel := context.WithCancel(ctx) manager, dynamicClient, fedClient := bootstrapInformerManagerWithFakeClients( @@ -477,12 +544,19 @@ func TestInformerManager(t *testing.T) { defaultFTCs, defaultObjs, generators, + handlers, ) defer func() { cancel() _ = wait.PollInfinite(time.Millisecond, func() (done bool, err error) { return manager.IsShutdown(), nil }) }() + ctxWithTimeout, timeoutCancel := context.WithTimeout(ctx, time.Second) + defer timeoutCancel() + if !cache.WaitForCacheSync(ctxWithTimeout.Done(), manager.HasSynced) { + g.Fail("Timed out waiting for InformerManager cache sync") + } + // 2. Verify that handler is not registered initially. handler.AssertConsistently(g, time.Second*2) @@ -528,6 +602,7 @@ func TestInformerManager(t *testing.T) { defaultFTCs := []*fedcorev1a1.FederatedTypeConfig{ftc} defaultObjs := []*unstructured.Unstructured{dp1} generators := []*EventHandlerGenerator{generator} + handlers := []FTCUpdateHandler{} ctx, cancel := context.WithCancel(ctx) manager, dynamicClient, fedClient := bootstrapInformerManagerWithFakeClients( @@ -536,12 +611,19 @@ func TestInformerManager(t *testing.T) { defaultFTCs, defaultObjs, generators, + handlers, ) defer func() { cancel() _ = wait.PollInfinite(time.Millisecond, func() (done bool, err error) { return manager.IsShutdown(), nil }) }() + ctxWithTimeout, timeoutCancel := context.WithTimeout(ctx, time.Second) + defer timeoutCancel() + if !cache.WaitForCacheSync(ctxWithTimeout.Done(), manager.HasSynced) { + g.Fail("Timed out waiting for InformerManager cache sync") + } + // 2. Verify that handler is registered initially. handler.ExpectGenerateEvents(ftc.Name, 1) @@ -586,6 +668,7 @@ func TestInformerManager(t *testing.T) { defaultFTCs := []*fedcorev1a1.FederatedTypeConfig{ftc} defaultObjs := []*unstructured.Unstructured{dp1} generators := []*EventHandlerGenerator{generator} + handlers := []FTCUpdateHandler{} ctx, cancel := context.WithCancel(ctx) manager, dynamicClient, fedClient := bootstrapInformerManagerWithFakeClients( @@ -594,6 +677,7 @@ func TestInformerManager(t *testing.T) { defaultFTCs, defaultObjs, generators, + handlers, ) defer func() { cancel() @@ -645,6 +729,7 @@ func TestInformerManager(t *testing.T) { defaultFTCs := []*fedcorev1a1.FederatedTypeConfig{ftc} defaultObjs := []*unstructured.Unstructured{dp1} generators := []*EventHandlerGenerator{generator} + handlers := []FTCUpdateHandler{} ctx, cancel := context.WithCancel(ctx) manager, dynamicClient, fedClient := bootstrapInformerManagerWithFakeClients( @@ -653,12 +738,19 @@ func TestInformerManager(t *testing.T) { defaultFTCs, defaultObjs, generators, + handlers, ) defer func() { cancel() _ = wait.PollInfinite(time.Millisecond, func() (done bool, err error) { return manager.IsShutdown(), nil }) }() + ctxWithTimeout, timeoutCancel := context.WithTimeout(ctx, time.Second) + defer timeoutCancel() + if !cache.WaitForCacheSync(ctxWithTimeout.Done(), manager.HasSynced) { + g.Fail("Timed out waiting for InformerManager cache sync") + } + // 2. Verify that handler is registered initially handler.ExpectGenerateEvents(ftc.Name, 1) @@ -710,6 +802,7 @@ func TestInformerManager(t *testing.T) { defaultFTCs := []*fedcorev1a1.FederatedTypeConfig{deploymentFTC, configmapFTC, secretFTC} defaultObjs := []*unstructured.Unstructured{dp1, cm1, sc1} generators := []*EventHandlerGenerator{generator1, generator2} + handlers := []FTCUpdateHandler{} ctx, cancel := context.WithCancel(ctx) manager, dynamicClient, fedClient := bootstrapInformerManagerWithFakeClients( @@ -718,12 +811,19 @@ func TestInformerManager(t *testing.T) { defaultFTCs, defaultObjs, generators, + handlers, ) defer func() { cancel() _ = wait.PollInfinite(time.Millisecond, func() (done bool, err error) { return manager.IsShutdown(), nil }) }() + ctxWithTimeout, timeoutCancel := context.WithTimeout(ctx, time.Second) + defer timeoutCancel() + if !cache.WaitForCacheSync(ctxWithTimeout.Done(), manager.HasSynced) { + g.Fail("Timed out waiting for InformerManager cache sync") + } + // 2. Verify that handler1 and handler2 is registered initially for all FTCs for _, ftc := range defaultFTCs { @@ -809,6 +909,7 @@ func TestInformerManager(t *testing.T) { defaultFTCs := []*fedcorev1a1.FederatedTypeConfig{deploymentFTC, configmapFTC, secretFTC} defaultObjs := []*unstructured.Unstructured{dp1, cm1, sc1} generators := []*EventHandlerGenerator{generator1, generator2} + handlers := []FTCUpdateHandler{} managerCtx, managerCancel := context.WithCancel(ctx) @@ -819,12 +920,19 @@ func TestInformerManager(t *testing.T) { defaultFTCs, defaultObjs, generators, + handlers, ) defer func() { cancel() _ = wait.PollInfinite(time.Millisecond, func() (done bool, err error) { return manager.IsShutdown(), nil }) }() + ctxWithTimeout, timeoutCancel := context.WithTimeout(ctx, time.Second) + defer timeoutCancel() + if !cache.WaitForCacheSync(ctxWithTimeout.Done(), manager.HasSynced) { + g.Fail("Timed out waiting for InformerManager cache sync") + } + // 2. Verify that handler1 and handler2 is registered initially for all FTCs for _, ftc := range defaultFTCs { @@ -872,6 +980,75 @@ func TestInformerManager(t *testing.T) { handler1.AssertConsistently(g, time.Second*2) handler2.AssertConsistently(g, time.Second*2) }) + + t.Run("ftc update event handlers should be called on ftc events", func(t *testing.T) { + g := gomega.NewWithT(t) + + // 1. Bootstrap environment + + generation := &atomic.Int64{} + generation.Store(1) + + // assertionCh is used to achieve 3 things: + // 1. It is used to pass assertions to the main goroutine. + // 2. It is used as an implicit lock to ensure FTC events are not squashed by the InformerManager. + // 3. It is used to ensure that the last event has been processed before the main goroutine sends an update. + assertionCh := make(chan func()) + + ftc := deploymentFTC.DeepCopy() + ftc.SetGeneration(generation.Load()) + + handler := func(lastObserved, latest *fedcorev1a1.FederatedTypeConfig) { + curGeneration := generation.Load() + if curGeneration == 1 { + assertionCh <- func() { + g.Expect(lastObserved).To(gomega.BeNil()) + g.Expect(latest.GetGeneration()).To(gomega.BeNumerically("==", 1)) + } + } else { + assertionCh <- func() { + g.Expect(lastObserved.GetGeneration()).To(gomega.BeNumerically("==", curGeneration-1)) + g.Expect(latest.GetGeneration()).To(gomega.BeNumerically("==", curGeneration)) + } + } + } + + defaultFTCs := []*fedcorev1a1.FederatedTypeConfig{ftc} + defaultObjs := []*unstructured.Unstructured{} + generators := []*EventHandlerGenerator{} + handlers := []FTCUpdateHandler{handler} + + ctx, cancel := context.WithCancel(ctx) + manager, _, fedClient := bootstrapInformerManagerWithFakeClients( + g, + ctx, + defaultFTCs, + defaultObjs, + generators, + handlers, + ) + defer func() { + cancel() + _ = wait.PollInfinite(time.Millisecond, func() (done bool, err error) { return manager.IsShutdown(), nil }) + }() + + fn := <-assertionCh + fn() + + // 3. Generate FTC update events + + for i := 0; i < 5; i++ { + generation.Add(1) + ftc.SetGeneration(generation.Load()) + + var err error + ftc, err = fedClient.CoreV1alpha1().FederatedTypeConfigs().Update(ctx, ftc, metav1.UpdateOptions{}) + g.Expect(err).ToNot(gomega.HaveOccurred()) + + fn = <-assertionCh + fn() + } + }) } func bootstrapInformerManagerWithFakeClients( @@ -880,6 +1057,7 @@ func bootstrapInformerManagerWithFakeClients( ftcs []*fedcorev1a1.FederatedTypeConfig, objects []*unstructured.Unstructured, eventHandlerGenerators []*EventHandlerGenerator, + ftcUpdateHandlers []FTCUpdateHandler, ) (InformerManager, dynamicclient.Interface, fedclient.Interface) { scheme := runtime.NewScheme() @@ -910,15 +1088,13 @@ func bootstrapInformerManagerWithFakeClients( g.Expect(err).ToNot(gomega.HaveOccurred()) } + for _, handler := range ftcUpdateHandlers { + err := informerManager.AddFTCUpdateHandler(handler) + g.Expect(err).ToNot(gomega.HaveOccurred()) + } + factory.Start(ctx.Done()) informerManager.Start(ctx) - ctxWithTimeout, cancel := context.WithTimeout(ctx, time.Second) - defer cancel() - - if !cache.WaitForCacheSync(ctxWithTimeout.Done(), informerManager.HasSynced) { - g.Fail("Timed out waiting for InformerManager cache sync") - } - return informerManager, dynamicClient, fedClient } diff --git a/pkg/util/informermanager/interface.go b/pkg/util/informermanager/interface.go index 42ef46f6..aad8ab74 100644 --- a/pkg/util/informermanager/interface.go +++ b/pkg/util/informermanager/interface.go @@ -39,6 +39,11 @@ type EventHandlerGenerator struct { Generator func(ftc *fedcorev1a1.FederatedTypeConfig) cache.ResourceEventHandler } +// FTCUpdateHandler is called by InformerManager each time it finishes processing an FTC. This allows controllers to +// hook into the InformerManager's view of an FTC's lifecycle. When a new FTC is observed, lastObserved will be nil. +// When a FTC deletion is observed, latest will be nil. +type FTCUpdateHandler func(lastObserved, latest *fedcorev1a1.FederatedTypeConfig) + // InformerManager provides an interface for controllers that need to dynamically register event handlers and access // objects based on FederatedTypeConfigs. InformerManager will listen to FTC events and maintain informers for the // source type of each FTC. @@ -48,6 +53,9 @@ type EventHandlerGenerator struct { type InformerManager interface { // Adds an EventHandler used to generate and register ResourceEventHandlers for each FTC's source type informer. AddEventHandlerGenerator(generator *EventHandlerGenerator) error + // Adds a FTCUpdateHandler that is called each time the InformerManager finishes processing an FTC. + AddFTCUpdateHandler(handler FTCUpdateHandler) error + // Returns a lister for the given GroupResourceVersion if it exists. The lister for each FTC's source type will // eventually exist. GetResourceLister(gvk schema.GroupVersionKind) (lister cache.GenericLister, informerSynced cache.InformerSynced, exists bool) From 956aa394c5b43bae77b0346798c98e208826cc65 Mon Sep 17 00:00:00 2001 From: "hawjia.lim" Date: Sat, 22 Jul 2023 13:16:47 +0800 Subject: [PATCH 083/173] feat(informer-manager): fix call handler on deletion --- pkg/util/informermanager/informermanager.go | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/pkg/util/informermanager/informermanager.go b/pkg/util/informermanager/informermanager.go index d54c49a9..42e3c7b2 100644 --- a/pkg/util/informermanager/informermanager.go +++ b/pkg/util/informermanager/informermanager.go @@ -281,11 +281,13 @@ func (m *informerManager) processFTCDeletionUnlocked(ctx context.Context, ftcNam delete(m.eventHandlerRegistrations, ftcName) lastObservedFTC := m.lastObservedFTCs[ftcName] - for _, handler := range m.ftcUpdateHandlers { - handler(lastObservedFTC, nil) + if lastObservedFTC != nil { + for _, handler := range m.ftcUpdateHandlers { + handler(lastObservedFTC, nil) + } } - delete(m.lastObservedFTCs, ftcName) + m.initialFTCs.Delete(ftcName) return nil From 14bc93987416495b624c2115371e63bb990e5bf5 Mon Sep 17 00:00:00 2001 From: "lihanbo.0316" Date: Thu, 13 Jul 2023 17:03:44 +0800 Subject: [PATCH 084/173] refactor: policyRC controller --- .../app/controllermanager.go | 2 + cmd/controller-manager/app/core.go | 18 ++ pkg/controllers/policyrc/controller.go | 294 ++++++++---------- pkg/controllers/policyrc/counter.go | 1 - pkg/controllers/scheduler/util.go | 5 +- pkg/controllers/util/genericinformer.go | 1 - pkg/controllers/util/hash/hash.go | 1 - .../util/history/controller_history.go | 1 - pkg/controllers/util/resourceclient.go | 1 - pkg/controllers/util/resourceinformer.go | 3 +- pkg/util/fedobjectadapters/adapters.go | 16 + 11 files changed, 162 insertions(+), 181 deletions(-) diff --git a/cmd/controller-manager/app/controllermanager.go b/cmd/controller-manager/app/controllermanager.go index 87811a6a..b6495627 100644 --- a/cmd/controller-manager/app/controllermanager.go +++ b/cmd/controller-manager/app/controllermanager.go @@ -39,10 +39,12 @@ const ( FederateControllerName = "federate" MonitorControllerName = "monitor" FollowerControllerName = "follower" + PolicyRCControllerName = "policyrc" ) var knownControllers = map[string]controllermanager.StartControllerFunc{ FederateControllerName: startFederateController, + PolicyRCControllerName: startPolicyRCController, } var controllersDisabledByDefault = sets.New(MonitorControllerName) diff --git a/cmd/controller-manager/app/core.go b/cmd/controller-manager/app/core.go index 31feff65..cca8f46b 100644 --- a/cmd/controller-manager/app/core.go +++ b/cmd/controller-manager/app/core.go @@ -25,6 +25,7 @@ import ( "github.com/kubewharf/kubeadmiral/pkg/controllermanager" controllercontext "github.com/kubewharf/kubeadmiral/pkg/controllers/context" "github.com/kubewharf/kubeadmiral/pkg/controllers/federate" + "github.com/kubewharf/kubeadmiral/pkg/controllers/policyrc" ) func startFederateController( @@ -51,3 +52,20 @@ func startFederateController( return federateController, nil } + +func startPolicyRCController(ctx context.Context, controllerCtx *controllercontext.Context) (controllermanager.Controller, error) { + policyRCController, err := policyrc.NewPolicyRCController( + controllerCtx.RestConfig, + controllerCtx.FedInformerFactory, + controllerCtx.Metrics, + klog.Background(), + controllerCtx.WorkerCount, + ) + if err != nil { + return nil, fmt.Errorf("error creating policyRC controller: %w", err) + } + + go policyRCController.Run(ctx) + + return policyRCController, nil +} diff --git a/pkg/controllers/policyrc/controller.go b/pkg/controllers/policyrc/controller.go index f894d772..dfd1ceca 100644 --- a/pkg/controllers/policyrc/controller.go +++ b/pkg/controllers/policyrc/controller.go @@ -1,4 +1,3 @@ -//go:build exclude /* Copyright 2023 The KubeAdmiral Authors. @@ -20,175 +19,149 @@ package policyrc import ( "context" "fmt" - "strings" "time" apierrors "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" - utilruntime "k8s.io/apimachinery/pkg/util/runtime" + pkgruntime "k8s.io/apimachinery/pkg/runtime" "k8s.io/client-go/rest" "k8s.io/client-go/tools/cache" "k8s.io/klog/v2" fedcorev1a1 "github.com/kubewharf/kubeadmiral/pkg/apis/core/v1alpha1" "github.com/kubewharf/kubeadmiral/pkg/client/generic" + fedinformers "github.com/kubewharf/kubeadmiral/pkg/client/informers/externalversions" + fedcorev1a1informers "github.com/kubewharf/kubeadmiral/pkg/client/informers/externalversions/core/v1alpha1" "github.com/kubewharf/kubeadmiral/pkg/controllers/common" "github.com/kubewharf/kubeadmiral/pkg/controllers/override" - "github.com/kubewharf/kubeadmiral/pkg/controllers/scheduler" "github.com/kubewharf/kubeadmiral/pkg/controllers/util" - "github.com/kubewharf/kubeadmiral/pkg/controllers/util/delayingdeliver" - "github.com/kubewharf/kubeadmiral/pkg/controllers/util/worker" "github.com/kubewharf/kubeadmiral/pkg/stats" + "github.com/kubewharf/kubeadmiral/pkg/util/fedobjectadapters" + "github.com/kubewharf/kubeadmiral/pkg/util/logging" + "github.com/kubewharf/kubeadmiral/pkg/util/worker" ) const ( ControllerName = "policyrc-controller" ) -var PolicyrcControllerName = common.DefaultPrefix + "policyrc-controller" - -type informerPair struct { - store cache.Store - controller cache.Controller -} - type Controller struct { - // name of controller: -policyrc-controller - name string - - // Informer store and controller for the federated type, PropagationPolicy, - // ClusterPropagationPolicy, OverridePolicy and ClusterOverridePolicy respectively. - federated, pp, cpp, op, cop informerPair - - client generic.Client + fedObjectInformer fedcorev1a1informers.FederatedObjectInformer + clusterFedObjectInformer fedcorev1a1informers.ClusterFederatedObjectInformer + overridePolicyInformer fedcorev1a1informers.OverridePolicyInformer + clusterOverridePolicyInformer fedcorev1a1informers.ClusterOverridePolicyInformer + propagationPolicyInformer fedcorev1a1informers.PropagationPolicyInformer + clusterPropagationPolicyInformer fedcorev1a1informers.ClusterPropagationPolicyInformer ppCounter, opCounter *Counter // updates the local counter upon fed object updates - countWorker worker.ReconcileWorker + countWorker worker.ReconcileWorker[common.QualifiedName] // pushes values from local counter to apiserver - persistPpWorker, persistOpWorker worker.ReconcileWorker - - typeConfig *fedcorev1a1.FederatedTypeConfig + persistPpWorker, persistOpWorker worker.ReconcileWorker[common.QualifiedName] + client generic.Client metrics stats.Metrics logger klog.Logger } -func StartController(controllerConfig *util.ControllerConfig, - stopChan <-chan struct{}, typeConfig *fedcorev1a1.FederatedTypeConfig, -) error { - controller, err := newController(controllerConfig, typeConfig) - if err != nil { - return err - } - controller.logger.Info("Starting policyrc controller") - controller.Run(stopChan) - return nil -} - -func newController(controllerConfig *util.ControllerConfig, - typeConfig *fedcorev1a1.FederatedTypeConfig, +func NewPolicyRCController( + restConfig *rest.Config, + fedInformerFactory fedinformers.SharedInformerFactory, + metrics stats.Metrics, + logger klog.Logger, + workerCount int, ) (*Controller, error) { - federatedAPIResource := typeConfig.GetFederatedType() - - userAgent := fmt.Sprintf("%s-policyrc-controller", strings.ToLower(federatedAPIResource.Kind)) - configWithUserAgent := rest.CopyConfig(controllerConfig.KubeConfig) - rest.AddUserAgent(configWithUserAgent, userAgent) c := &Controller{ - name: userAgent, - typeConfig: typeConfig, - metrics: controllerConfig.Metrics, - logger: klog.LoggerWithValues(klog.Background(), "controller", ControllerName, "ftc", typeConfig.Name), + client: generic.NewForConfigOrDie(restConfig), + fedObjectInformer: fedInformerFactory.Core().V1alpha1().FederatedObjects(), + clusterFedObjectInformer: fedInformerFactory.Core().V1alpha1().ClusterFederatedObjects(), + propagationPolicyInformer: fedInformerFactory.Core().V1alpha1().PropagationPolicies(), + clusterPropagationPolicyInformer: fedInformerFactory.Core().V1alpha1().ClusterPropagationPolicies(), + overridePolicyInformer: fedInformerFactory.Core().V1alpha1().OverridePolicies(), + clusterOverridePolicyInformer: fedInformerFactory.Core().V1alpha1().ClusterOverridePolicies(), + metrics: metrics, + logger: logger.WithValues("controller", ControllerName), + } + + if _, err := c.fedObjectInformer.Informer().AddEventHandler(util.NewTriggerOnAllChanges(func(o pkgruntime.Object) { + c.countWorker.Enqueue(common.NewQualifiedName(o)) + })); err != nil { + return nil, err + } + + if _, err := c.clusterFedObjectInformer.Informer().AddEventHandler(util.NewTriggerOnAllChanges(func(o pkgruntime.Object) { + c.countWorker.Enqueue(common.NewQualifiedName(o)) + })); err != nil { + return nil, err } - c.countWorker = worker.NewReconcileWorker( + c.countWorker = worker.NewReconcileWorker[common.QualifiedName]( + "policyrc-controller-count-worker", + nil, c.reconcileCount, worker.RateLimiterOptions{}, 1, // currently only one worker is meaningful due to the global mutex - controllerConfig.Metrics, - delayingdeliver.NewMetricTags("policyrc-controller-count-worker", c.typeConfig.GetFederatedType().Kind), + metrics, ) - c.persistPpWorker = worker.NewReconcileWorker( - func(qualifiedName common.QualifiedName) worker.Result { - return c.reconcilePersist("propagation-policy", qualifiedName, c.pp.store, c.cpp.store, c.ppCounter) + c.persistPpWorker = worker.NewReconcileWorker[common.QualifiedName]( + "policyrc-controller-persist-worker", + nil, + func(ctx context.Context, qualifiedName common.QualifiedName) worker.Result { + return c.reconcilePersist( + ctx, + "propagation-policy", + qualifiedName, + c.propagationPolicyInformer.Informer().GetStore(), + c.clusterPropagationPolicyInformer.Informer().GetStore(), + c.ppCounter, + ) }, worker.RateLimiterOptions{}, - controllerConfig.WorkerCount, - controllerConfig.Metrics, - delayingdeliver.NewMetricTags("policyrc-controller-persist-worker", c.typeConfig.GetFederatedType().Kind), + workerCount, + metrics, ) - c.persistOpWorker = worker.NewReconcileWorker( - func(qualifiedName common.QualifiedName) worker.Result { - return c.reconcilePersist("override-policy", qualifiedName, c.op.store, c.cop.store, c.opCounter) + + c.persistOpWorker = worker.NewReconcileWorker[common.QualifiedName]( + "policyrc-controller-persist-worker", + nil, + func(ctx context.Context, qualifiedName common.QualifiedName) worker.Result { + return c.reconcilePersist( + ctx, + "override-policy", + qualifiedName, + c.overridePolicyInformer.Informer().GetStore(), + c.clusterOverridePolicyInformer.Informer().GetStore(), + c.opCounter, + ) }, worker.RateLimiterOptions{}, - controllerConfig.WorkerCount, - controllerConfig.Metrics, - delayingdeliver.NewMetricTags("policyrc-controller-persist-worker", c.typeConfig.GetFederatedType().Kind), + workerCount, + metrics, ) - targetNamespace := controllerConfig.TargetNamespace + persistPpWorkerTrigger := func(o pkgruntime.Object) { + c.persistPpWorker.Enqueue(common.NewQualifiedName(o)) + } - federatedClient, err := util.NewResourceClient(configWithUserAgent, &federatedAPIResource) - if err != nil { + if _, err := c.propagationPolicyInformer.Informer().AddEventHandler(util.NewTriggerOnAllChanges(persistPpWorkerTrigger)); err != nil { return nil, err } - c.federated.store, c.federated.controller = util.NewResourceInformer( - federatedClient, - targetNamespace, - c.countWorker.EnqueueObject, - controllerConfig.Metrics, - ) - c.client = generic.NewForConfigOrDie(configWithUserAgent) - c.pp.store, c.pp.controller, err = util.NewGenericInformer( - configWithUserAgent, - targetNamespace, - &fedcorev1a1.PropagationPolicy{}, - 0, - c.persistPpWorker.EnqueueObject, - controllerConfig.Metrics, - ) - if err != nil { + if _, err := c.clusterPropagationPolicyInformer.Informer().AddEventHandler(util.NewTriggerOnAllChanges(persistPpWorkerTrigger)); err != nil { return nil, err } - c.cpp.store, c.cpp.controller, err = util.NewGenericInformer( - configWithUserAgent, - targetNamespace, - &fedcorev1a1.ClusterPropagationPolicy{}, - 0, - c.persistPpWorker.EnqueueObject, - controllerConfig.Metrics, - ) - if err != nil { - return nil, err + persistOpWorkerTrigger := func(o pkgruntime.Object) { + c.persistOpWorker.Enqueue(common.NewQualifiedName(o)) } - c.op.store, c.op.controller, err = util.NewGenericInformer( - configWithUserAgent, - targetNamespace, - &fedcorev1a1.OverridePolicy{}, - 0, - c.persistOpWorker.EnqueueObject, - controllerConfig.Metrics, - ) - if err != nil { + if _, err := c.overridePolicyInformer.Informer().AddEventHandler(util.NewTriggerOnAllChanges(persistOpWorkerTrigger)); err != nil { return nil, err } - c.cop.store, c.cop.controller, err = util.NewGenericInformer( - configWithUserAgent, - targetNamespace, - &fedcorev1a1.ClusterOverridePolicy{}, - 0, - c.persistOpWorker.EnqueueObject, - controllerConfig.Metrics, - ) - if err != nil { + if _, err := c.clusterOverridePolicyInformer.Informer().AddEventHandler(util.NewTriggerOnAllChanges(persistOpWorkerTrigger)); err != nil { return nil, err } @@ -207,30 +180,39 @@ func newController(controllerConfig *util.ControllerConfig, return c, nil } -func (c *Controller) Run(stopChan <-chan struct{}) { - c.logger.Info("Starting controller") - defer c.logger.Info("Stopping controller") - - for _, pair := range []informerPair{c.federated, c.pp, c.cpp, c.op, c.cop} { - go pair.controller.Run(stopChan) - } +func (c *Controller) Run(ctx context.Context) { + ctx, logger := logging.InjectLogger(ctx, c.logger) - c.countWorker.Run(stopChan) + logger.Info("Starting controller") + defer c.logger.Info("Stopping controller") // wait for all counts to finish sync before persisting the values - if !cache.WaitForNamedCacheSync(c.name, stopChan, c.HasSynced) { - utilruntime.HandleError(fmt.Errorf("timed out waiting for caches to sync for controller: %s", c.name)) + if !cache.WaitForNamedCacheSync(ControllerName, ctx.Done(), c.HasSynced) { + logger.Error(nil, "Timed out waiting for caches to sync") + return } - c.persistPpWorker.Run(stopChan) - c.persistOpWorker.Run(stopChan) + logger.Info("Caches are synced") + c.countWorker.Run(ctx) + c.persistPpWorker.Run(ctx) + c.persistOpWorker.Run(ctx) + <-ctx.Done() } func (c *Controller) HasSynced() bool { - return c.federated.controller.HasSynced() + return c.propagationPolicyInformer.Informer().HasSynced() && + c.clusterPropagationPolicyInformer.Informer().HasSynced() && + c.overridePolicyInformer.Informer().HasSynced() && + c.clusterOverridePolicyInformer.Informer().HasSynced() && + c.fedObjectInformer.Informer().HasSynced() && + c.clusterFedObjectInformer.Informer().HasSynced() +} + +func (c *Controller) IsControllerReady() bool { + return c.HasSynced() } -func (c *Controller) reconcileCount(qualifiedName common.QualifiedName) (status worker.Result) { - logger := c.logger.WithValues("object", qualifiedName.String()) +func (c *Controller) reconcileCount(ctx context.Context, qualifiedName common.QualifiedName) (status worker.Result) { + ctx, logger := logging.InjectLoggerValues(ctx, "object", qualifiedName.String()) c.metrics.Rate("policyrc-count-controller.throughput", 1) logger.V(3).Info("Policyrc count controller starting to reconcile") @@ -241,19 +223,15 @@ func (c *Controller) reconcileCount(qualifiedName common.QualifiedName) (status Info("Policyrc count controller finished reconciling") }() - fedObjAny, fedObjExists, err := c.federated.store.GetByKey(qualifiedName.String()) - if err != nil { - utilruntime.HandleError(err) + fedObj, err := fedobjectadapters.GetFromLister(c.fedObjectInformer.Lister(), c.clusterFedObjectInformer.Lister(), qualifiedName.Namespace, qualifiedName.Name) + if err != nil && !apierrors.IsNotFound(err) { + logger.Error(err, "Failed to get federated object") return worker.StatusError } - var fedObj *unstructured.Unstructured - if fedObjExists { - fedObj = fedObjAny.(*unstructured.Unstructured) - } var newPps []PolicyKey - if fedObjExists { - newPolicy, newHasPolicy := scheduler.MatchedPolicyKey(fedObj, c.typeConfig.GetNamespaced()) + if fedObj != nil { + newPolicy, newHasPolicy := fedobjectadapters.MatchedPolicyKey(fedObj, fedObj.GetNamespace() != "") if newHasPolicy { newPps = []PolicyKey{PolicyKey(newPolicy)} } @@ -263,7 +241,7 @@ func (c *Controller) reconcileCount(qualifiedName common.QualifiedName) (status c.ppCounter.Update(ObjectKey(qualifiedName), newPps) var newOps []PolicyKey - if fedObjExists { + if fedObj != nil { if op, exists := fedObj.GetLabels()[override.OverridePolicyNameLabel]; exists { newOps = append(newOps, PolicyKey{Namespace: fedObj.GetNamespace(), Name: op}) } @@ -279,12 +257,13 @@ func (c *Controller) reconcileCount(qualifiedName common.QualifiedName) (status } func (c *Controller) reconcilePersist( + ctx context.Context, metricName string, qualifiedName common.QualifiedName, nsScopeStore, clusterScopeStore cache.Store, counter *Counter, ) worker.Result { - logger := c.logger.WithValues("object", qualifiedName.String()) + ctx, logger := logging.InjectLoggerValues(ctx, "object", qualifiedName.String()) c.metrics.Rate(fmt.Sprintf("policyrc-persist-%s-controller.throughput", metricName), 1) logger.V(3).Info("Policyrc persist controller starting to reconcile") @@ -301,7 +280,7 @@ func (c *Controller) reconcilePersist( policyAny, exists, err := store.GetByKey(qualifiedName.String()) if err != nil { - utilruntime.HandleError(err) + logger.Error(err, "Failed to get policy") return worker.StatusError } @@ -315,50 +294,21 @@ func (c *Controller) reconcilePersist( status := policy.GetRefCountedStatus() - group := c.typeConfig.GetTargetType().Group - resource := c.typeConfig.GetTargetType().Name - - var matchedTypedRefCount *fedcorev1a1.TypedRefCount - for i := range status.TypedRefCount { - typed := &status.TypedRefCount[i] - if typed.Group == group && typed.Resource == resource { - matchedTypedRefCount = typed - break - } - } - - if matchedTypedRefCount == nil { - status.TypedRefCount = append(status.TypedRefCount, fedcorev1a1.TypedRefCount{ - Group: group, - Resource: resource, - }) - matchedTypedRefCount = &status.TypedRefCount[len(status.TypedRefCount)-1] - } - - newTypedRefCount := counter.GetPolicyCounts([]PolicyKey{PolicyKey(qualifiedName)})[0] + newRefCount := counter.GetPolicyCounts([]PolicyKey{PolicyKey(qualifiedName)})[0] hasChange := false - if newTypedRefCount != matchedTypedRefCount.Count { - matchedTypedRefCount.Count = newTypedRefCount - hasChange = true - } - - sum := int64(0) - for _, typed := range status.TypedRefCount { - sum += typed.Count - } - if sum != status.RefCount { - status.RefCount = sum + if newRefCount != status.RefCount { + status.RefCount = newRefCount hasChange = true } if hasChange { - err := c.client.UpdateStatus(context.TODO(), policy) + err := c.client.UpdateStatus(ctx, policy) if err != nil { if apierrors.IsConflict(err) { return worker.StatusConflict } - utilruntime.HandleError(err) + logger.Error(err, "Failed to update policy status") return worker.StatusError } } diff --git a/pkg/controllers/policyrc/counter.go b/pkg/controllers/policyrc/counter.go index f17f8427..641d5891 100644 --- a/pkg/controllers/policyrc/counter.go +++ b/pkg/controllers/policyrc/counter.go @@ -1,4 +1,3 @@ -//go:build exclude /* Copyright 2023 The KubeAdmiral Authors. diff --git a/pkg/controllers/scheduler/util.go b/pkg/controllers/scheduler/util.go index 6172c02b..2a2b69d9 100644 --- a/pkg/controllers/scheduler/util.go +++ b/pkg/controllers/scheduler/util.go @@ -24,8 +24,9 @@ import ( "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/client-go/dynamic" - fedcorev1a1 "github.com/kubewharf/kubeadmiral/pkg/apis/core/v1alpha1" fedtypesv1a1 "github.com/kubewharf/kubeadmiral/pkg/apis/types/v1alpha1" + + fedcorev1a1 "github.com/kubewharf/kubeadmiral/pkg/apis/core/v1alpha1" "github.com/kubewharf/kubeadmiral/pkg/controllers/common" "github.com/kubewharf/kubeadmiral/pkg/controllers/util" utilunstructured "github.com/kubewharf/kubeadmiral/pkg/controllers/util/unstructured" @@ -35,7 +36,7 @@ const ( operationReplace = "replace" ) -func MatchedPolicyKey(obj *unstructured.Unstructured, isNamespaced bool) (result common.QualifiedName, ok bool) { +func MatchedPolicyKey(obj fedcorev1a1.GenericFederatedObject, isNamespaced bool) (result common.QualifiedName, ok bool) { labels := obj.GetLabels() if policyName, exists := labels[PropagationPolicyNameLabel]; exists && isNamespaced { diff --git a/pkg/controllers/util/genericinformer.go b/pkg/controllers/util/genericinformer.go index 74621164..d46d8cbe 100644 --- a/pkg/controllers/util/genericinformer.go +++ b/pkg/controllers/util/genericinformer.go @@ -1,4 +1,3 @@ -//go:build exclude /* Copyright 2019 The Kubernetes Authors. diff --git a/pkg/controllers/util/hash/hash.go b/pkg/controllers/util/hash/hash.go index 94a81902..2d239839 100644 --- a/pkg/controllers/util/hash/hash.go +++ b/pkg/controllers/util/hash/hash.go @@ -1,4 +1,3 @@ -//go:build exclude /* Copyright 2015 The Kubernetes Authors. diff --git a/pkg/controllers/util/history/controller_history.go b/pkg/controllers/util/history/controller_history.go index d217c6d5..463d9a46 100644 --- a/pkg/controllers/util/history/controller_history.go +++ b/pkg/controllers/util/history/controller_history.go @@ -1,4 +1,3 @@ -//go:build exclude /* Copyright 2023 The KubeAdmiral Authors. diff --git a/pkg/controllers/util/resourceclient.go b/pkg/controllers/util/resourceclient.go index 0e595de3..4df9ad2b 100644 --- a/pkg/controllers/util/resourceclient.go +++ b/pkg/controllers/util/resourceclient.go @@ -1,4 +1,3 @@ -//go:build exclude /* Copyright 2018 The Kubernetes Authors. diff --git a/pkg/controllers/util/resourceinformer.go b/pkg/controllers/util/resourceinformer.go index 8159c58a..bffaead1 100644 --- a/pkg/controllers/util/resourceinformer.go +++ b/pkg/controllers/util/resourceinformer.go @@ -1,4 +1,3 @@ -//go:build exclude /* Copyright 2018 The Kubernetes Authors. @@ -38,8 +37,8 @@ import ( "k8s.io/client-go/tools/cache" "k8s.io/klog/v2" - "github.com/kubewharf/kubeadmiral/pkg/controllers/util/managedlabel" "github.com/kubewharf/kubeadmiral/pkg/stats" + "github.com/kubewharf/kubeadmiral/pkg/util/managedlabel" ) var mutex = sync.Mutex{} diff --git a/pkg/util/fedobjectadapters/adapters.go b/pkg/util/fedobjectadapters/adapters.go index 7b4b02ad..c7761adf 100644 --- a/pkg/util/fedobjectadapters/adapters.go +++ b/pkg/util/fedobjectadapters/adapters.go @@ -9,6 +9,8 @@ import ( fedcorev1a1 "github.com/kubewharf/kubeadmiral/pkg/apis/core/v1alpha1" fedcorev1a1client "github.com/kubewharf/kubeadmiral/pkg/client/clientset/versioned/typed/core/v1alpha1" fedcorev1a1listers "github.com/kubewharf/kubeadmiral/pkg/client/listers/core/v1alpha1" + "github.com/kubewharf/kubeadmiral/pkg/controllers/common" + "github.com/kubewharf/kubeadmiral/pkg/controllers/scheduler" ) func ensureNilInterface( @@ -121,3 +123,17 @@ func Delete( return fedv1a1Client.FederatedObjects(namespace).Delete(ctx, name, opts) } } + +func MatchedPolicyKey(obj fedcorev1a1.GenericFederatedObject, isNamespaced bool) (result common.QualifiedName, ok bool) { + labels := obj.GetLabels() + + if policyName, exists := labels[scheduler.PropagationPolicyNameLabel]; exists && isNamespaced { + return common.QualifiedName{Namespace: obj.GetNamespace(), Name: policyName}, true + } + + if policyName, exists := labels[scheduler.ClusterPropagationPolicyNameLabel]; exists { + return common.QualifiedName{Namespace: "", Name: policyName}, true + } + + return common.QualifiedName{}, false +} From c796fd6f39709048ed4c12c336adba1b2418f4fe Mon Sep 17 00:00:00 2001 From: "lihanbo.0316" Date: Fri, 14 Jul 2023 16:05:39 +0800 Subject: [PATCH 085/173] refactor: override controller --- .../app/controllermanager.go | 2 + cmd/controller-manager/app/core.go | 20 + .../override/overridepolicy_controller.go | 478 ++++++++++-------- pkg/controllers/override/util.go | 42 +- pkg/controllers/util/clusterselector/util.go | 1 - pkg/controllers/util/meta.go | 1 - pkg/controllers/util/overrides.go | 70 +-- 7 files changed, 318 insertions(+), 296 deletions(-) diff --git a/cmd/controller-manager/app/controllermanager.go b/cmd/controller-manager/app/controllermanager.go index b6495627..a9558a01 100644 --- a/cmd/controller-manager/app/controllermanager.go +++ b/cmd/controller-manager/app/controllermanager.go @@ -40,11 +40,13 @@ const ( MonitorControllerName = "monitor" FollowerControllerName = "follower" PolicyRCControllerName = "policyrc" + OverrideControllerName = "overridepolicy" ) var knownControllers = map[string]controllermanager.StartControllerFunc{ FederateControllerName: startFederateController, PolicyRCControllerName: startPolicyRCController, + OverrideControllerName: startOverridePolicyController, } var controllersDisabledByDefault = sets.New(MonitorControllerName) diff --git a/cmd/controller-manager/app/core.go b/cmd/controller-manager/app/core.go index cca8f46b..4a20d178 100644 --- a/cmd/controller-manager/app/core.go +++ b/cmd/controller-manager/app/core.go @@ -25,6 +25,7 @@ import ( "github.com/kubewharf/kubeadmiral/pkg/controllermanager" controllercontext "github.com/kubewharf/kubeadmiral/pkg/controllers/context" "github.com/kubewharf/kubeadmiral/pkg/controllers/federate" + "github.com/kubewharf/kubeadmiral/pkg/controllers/override" "github.com/kubewharf/kubeadmiral/pkg/controllers/policyrc" ) @@ -69,3 +70,22 @@ func startPolicyRCController(ctx context.Context, controllerCtx *controllerconte return policyRCController, nil } + +func startOverridePolicyController(ctx context.Context, controllerCtx *controllercontext.Context) (controllermanager.Controller, error) { + overrideController, err := override.NewOverridePolicyController( + controllerCtx.KubeClientset, + controllerCtx.FedClientset, + controllerCtx.FedInformerFactory, + controllerCtx.InformerManager, + controllerCtx.Metrics, + klog.Background(), + controllerCtx.WorkerCount, + ) + if err != nil { + return nil, fmt.Errorf("error creating override controller: %w", err) + } + + go overrideController.Run(ctx) + + return overrideController, nil +} diff --git a/pkg/controllers/override/overridepolicy_controller.go b/pkg/controllers/override/overridepolicy_controller.go index f1ad8a5d..3d870150 100644 --- a/pkg/controllers/override/overridepolicy_controller.go +++ b/pkg/controllers/override/overridepolicy_controller.go @@ -1,4 +1,3 @@ -//go:build exclude /* Copyright 2023 The KubeAdmiral Authors. @@ -20,29 +19,33 @@ package override import ( "context" "fmt" - "strings" "time" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/equality" apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" - utilruntime "k8s.io/apimachinery/pkg/util/runtime" - kubeclient "k8s.io/client-go/kubernetes" - "k8s.io/client-go/rest" + "k8s.io/apimachinery/pkg/labels" + pkgruntime "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/selection" + "k8s.io/client-go/kubernetes" "k8s.io/client-go/tools/cache" "k8s.io/client-go/tools/record" "k8s.io/klog/v2" fedcorev1a1 "github.com/kubewharf/kubeadmiral/pkg/apis/core/v1alpha1" + fedclient "github.com/kubewharf/kubeadmiral/pkg/client/clientset/versioned" + fedinformers "github.com/kubewharf/kubeadmiral/pkg/client/informers/externalversions" + fedcorev1a1informers "github.com/kubewharf/kubeadmiral/pkg/client/informers/externalversions/core/v1alpha1" "github.com/kubewharf/kubeadmiral/pkg/controllers/common" "github.com/kubewharf/kubeadmiral/pkg/controllers/util" - "github.com/kubewharf/kubeadmiral/pkg/controllers/util/delayingdeliver" - "github.com/kubewharf/kubeadmiral/pkg/controllers/util/eventsink" - "github.com/kubewharf/kubeadmiral/pkg/controllers/util/pendingcontrollers" - "github.com/kubewharf/kubeadmiral/pkg/controllers/util/worker" "github.com/kubewharf/kubeadmiral/pkg/stats" + "github.com/kubewharf/kubeadmiral/pkg/util/eventsink" + "github.com/kubewharf/kubeadmiral/pkg/util/fedobjectadapters" + "github.com/kubewharf/kubeadmiral/pkg/util/informermanager" + "github.com/kubewharf/kubeadmiral/pkg/util/logging" + "github.com/kubewharf/kubeadmiral/pkg/util/pendingcontrollers" + "github.com/kubewharf/kubeadmiral/pkg/util/worker" ) const ( @@ -54,224 +57,255 @@ const ( var PrefixedControllerName = common.DefaultPrefix + ControllerName -// OverrideController adds override rules specified in OverridePolicies +// Controller adds override rules specified in OverridePolicies // to federated objects. type Controller struct { - // name of controller - name string - - // FederatedTypeConfig for this controller - typeConfig *fedcorev1a1.FederatedTypeConfig - - // Store for federated objects - federatedStore cache.Store - // Controller for federated objects - federatedController cache.Controller - // Client for federated objects - federatedClient util.ResourceClient - - // Store for OverridePolicy - overridePolicyStore cache.Store - // Controller for OverridePolicy - overridePolicyController cache.Controller - - // Store for ClusterOverridePolicy - clusterOverridePolicyStore cache.Store - // Controller for ClusterOverridePolicy - clusterOverridePolicyController cache.Controller - - // Store for FederatedCluster - clusterStore cache.Store - // Controller for FederatedCluster - clusterController cache.Controller - - worker worker.ReconcileWorker + worker worker.ReconcileWorker[common.QualifiedName] + + informerManager informermanager.InformerManager + fedObjectInformer fedcorev1a1informers.FederatedObjectInformer + clusterFedObjectInformer fedcorev1a1informers.ClusterFederatedObjectInformer + overridePolicyInformer fedcorev1a1informers.OverridePolicyInformer + clusterOverridePolicyInformer fedcorev1a1informers.ClusterOverridePolicyInformer + federatedClusterInformer fedcorev1a1informers.FederatedClusterInformer + + fedClient fedclient.Interface eventRecorder record.EventRecorder metrics stats.Metrics + logger klog.Logger } -func StartController( - controllerConfig *util.ControllerConfig, - stopChan <-chan struct{}, - typeConfig *fedcorev1a1.FederatedTypeConfig, -) error { - controller, err := newController(controllerConfig, typeConfig) - if err != nil { - return err - } - klog.V(4).Infof("Starting %s", controller.name) - controller.Run(stopChan) - return nil -} - -func newController( - controllerConfig *util.ControllerConfig, - typeConfig *fedcorev1a1.FederatedTypeConfig, +func NewOverridePolicyController( + kubeClient kubernetes.Interface, + fedClient fedclient.Interface, + fedInformerFactory fedinformers.SharedInformerFactory, + informerManager informermanager.InformerManager, + metrics stats.Metrics, + logger klog.Logger, + workerCount int, ) (*Controller, error) { - userAgent := fmt.Sprintf("%s-%s", strings.ToLower(typeConfig.GetFederatedType().Kind), ControllerName) - configWithUserAgent := rest.CopyConfig(controllerConfig.KubeConfig) - rest.AddUserAgent(configWithUserAgent, userAgent) - - kubeClient := kubeclient.NewForConfigOrDie(configWithUserAgent) - recorder := eventsink.NewDefederatingRecorderMux(kubeClient, userAgent, 4) - c := &Controller{ - name: userAgent, - typeConfig: typeConfig, - eventRecorder: recorder, - metrics: controllerConfig.Metrics, + informerManager: informerManager, + fedObjectInformer: fedInformerFactory.Core().V1alpha1().FederatedObjects(), + clusterFedObjectInformer: fedInformerFactory.Core().V1alpha1().ClusterFederatedObjects(), + overridePolicyInformer: fedInformerFactory.Core().V1alpha1().OverridePolicies(), + clusterOverridePolicyInformer: fedInformerFactory.Core().V1alpha1().ClusterOverridePolicies(), + federatedClusterInformer: fedInformerFactory.Core().V1alpha1().FederatedClusters(), + fedClient: fedClient, + metrics: metrics, + logger: logger.WithValues("controller", ControllerName), } - var err error - - federatedApiResource := typeConfig.GetFederatedType() - c.federatedClient, err = util.NewResourceClient(configWithUserAgent, &federatedApiResource) - if err != nil { - return nil, fmt.Errorf("NewResourceClient failed: %w", err) - } - - c.worker = worker.NewReconcileWorker( + c.eventRecorder = eventsink.NewDefederatingRecorderMux(kubeClient, ControllerName, 4) + c.worker = worker.NewReconcileWorker[common.QualifiedName]( + ControllerName, + nil, c.reconcile, worker.RateLimiterOptions{}, - controllerConfig.WorkerCount, - controllerConfig.Metrics, - delayingdeliver.NewMetricTags(c.name, federatedApiResource.Kind), - ) - enqueueObj := c.worker.EnqueueObject - c.federatedStore, c.federatedController = util.NewResourceInformer( - c.federatedClient, - controllerConfig.TargetNamespace, - enqueueObj, - controllerConfig.Metrics, + workerCount, + metrics, ) - getPolicyHandlers := func(labelKey string) *cache.ResourceEventHandlerFuncs { - return &cache.ResourceEventHandlerFuncs{ - // Policy added/updated: we need to reconcile all fedObjects referencing this policy - AddFunc: func(obj interface{}) { - policy := obj.(fedcorev1a1.GenericOverridePolicy) - c.enqueueFedObjectsUsingPolicy(policy, labelKey) - }, - UpdateFunc: func(oldObj, newObj interface{}) { - oldPolicy := oldObj.(fedcorev1a1.GenericOverridePolicy) - newPolicy := newObj.(fedcorev1a1.GenericOverridePolicy) - if !equality.Semantic.DeepEqual(oldPolicy.GetSpec(), newPolicy.GetSpec()) { - c.enqueueFedObjectsUsingPolicy(newPolicy, labelKey) - } - }, - DeleteFunc: nil, - } + if _, err := c.fedObjectInformer.Informer().AddEventHandler(util.NewTriggerOnAllChanges(func(o pkgruntime.Object) { + fedObj := o.(*fedcorev1a1.FederatedObject) + c.worker.Enqueue(common.QualifiedName{Namespace: fedObj.Namespace, Name: fedObj.Name}) + })); err != nil { + return nil, err } - c.overridePolicyStore, c.overridePolicyController, err = util.NewGenericInformerWithEventHandler( - controllerConfig.KubeConfig, - "", - &fedcorev1a1.OverridePolicy{}, - util.NoResyncPeriod, - getPolicyHandlers(OverridePolicyNameLabel), - controllerConfig.Metrics, - ) - if err != nil { + if _, err := c.clusterFedObjectInformer.Informer().AddEventHandler(util.NewTriggerOnAllChanges(func(o pkgruntime.Object) { + fedObj := o.(*fedcorev1a1.ClusterFederatedObject) + c.worker.Enqueue(common.QualifiedName{Name: fedObj.Name}) + })); err != nil { return nil, err } - c.clusterOverridePolicyStore, c.clusterOverridePolicyController, err = util.NewGenericInformerWithEventHandler( - controllerConfig.KubeConfig, - "", - &fedcorev1a1.ClusterOverridePolicy{}, - util.NoResyncPeriod, - getPolicyHandlers(ClusterOverridePolicyNameLabel), - controllerConfig.Metrics, - ) - if err != nil { + if _, err := c.overridePolicyInformer.Informer().AddEventHandler(util.NewTriggerOnAllChanges(func(o pkgruntime.Object) { + policy := o.(fedcorev1a1.GenericOverridePolicy) + c.enqueueFedObjectsUsingPolicy(policy, OverridePolicyNameLabel) + })); err != nil { return nil, err } - c.clusterStore, c.clusterController, err = util.NewGenericInformerWithEventHandler( - controllerConfig.KubeConfig, - "", - &fedcorev1a1.FederatedCluster{}, - util.NoResyncPeriod, - &cache.ResourceEventHandlerFuncs{ - /* - No need to reconcile on Add and Delete. Since we only resolve overrides for - scheduled clusters, there's no point in reconciling before scheduler does rescheduling. - */ - AddFunc: nil, - DeleteFunc: nil, - // We only care about label change, since that is the only cluster change - // that can affect overrider computation. - // Currently MatchFields only matches /metadata/name. - // If we extend MatchFields to match new fields, we may need to revise UpdateFunc - // to expand the trigger conditions. - UpdateFunc: func(oldObj, newObj interface{}) { - oldCluster := oldObj.(*fedcorev1a1.FederatedCluster) - newCluster := newObj.(*fedcorev1a1.FederatedCluster) - if !equality.Semantic.DeepEqual(oldCluster.Labels, newCluster.Labels) { - c.reconcileOnClusterChange(newCluster) - } - }, + if _, err := c.clusterOverridePolicyInformer.Informer().AddEventHandler(util.NewTriggerOnAllChanges(func(o pkgruntime.Object) { + policy := o.(fedcorev1a1.GenericOverridePolicy) + c.enqueueFedObjectsUsingPolicy(policy, OverridePolicyNameLabel) + })); err != nil { + return nil, err + } + + if _, err := c.federatedClusterInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{ + /* + No need to reconcile on Add and Delete. Since we only resolve overrides for + scheduled clusters, there's no point in reconciling before scheduler does rescheduling. + */ + AddFunc: nil, + DeleteFunc: nil, + // We only care about label change, since that is the only cluster change + // that can affect overrider computation. + // Currently MatchFields only matches /metadata/name. + // If we extend MatchFields to match new fields, we may need to revise UpdateFunc + // to expand the trigger conditions. + UpdateFunc: func(oldObj, newObj interface{}) { + oldCluster := oldObj.(*fedcorev1a1.FederatedCluster) + newCluster := newObj.(*fedcorev1a1.FederatedCluster) + if !equality.Semantic.DeepEqual(oldCluster.Labels, newCluster.Labels) { + c.reconcileOnClusterChange(newCluster) + } }, - controllerConfig.Metrics, - ) - if err != nil { + }); err != nil { return nil, err } + informerManager.AddFTCUpdateHandler(func(lastObserved, latest *fedcorev1a1.FederatedTypeConfig) { + if lastObserved == nil && latest != nil { + c.enqueueFederatedObjectsForFTC(latest) + return + } + }) + return c, nil } +func (c *Controller) enqueueFederatedObjectsForFTC(ftc *fedcorev1a1.FederatedTypeConfig) { + logger := c.logger.WithValues("ftc", ftc.GetName()) + + logger.V(2).Info("Enqueue federated objects for FTC") + + allObjects := []fedcorev1a1.GenericFederatedObject{} + fedObjects, err := c.fedObjectInformer.Lister().List(labels.Everything()) + if err != nil { + c.logger.Error(err, "Failed to enqueue FederatedObjects for override policy") + return + } + for _, obj := range fedObjects { + allObjects = append(allObjects, obj) + } + clusterFedObjects, err := c.clusterFedObjectInformer.Lister().List(labels.Everything()) + if err != nil { + c.logger.Error(err, "Failed to enqueue ClusterFederatedObjects for override policy") + return + } + for _, obj := range clusterFedObjects { + allObjects = append(allObjects, obj) + } + + for _, obj := range allObjects { + sourceGVK, err := obj.GetSpec().GetTemplateGVK() + if err != nil { + c.logger.Error(err, "Failed to get source GVK from FederatedObject, will not enqueue") + continue + } + if sourceGVK == ftc.GetSourceTypeGVK() { + c.worker.Enqueue(common.NewQualifiedName(obj)) + } + } +} + func (c *Controller) enqueueFedObjectsUsingPolicy(policy fedcorev1a1.GenericOverridePolicy, labelKey string) { - klog.V(2).Infof("%s observed a policy change for %s %q", c.name, util.GetResourceKind(policy), policy.GetKey()) - for _, fedObjectInterface := range c.federatedStore.List() { - fedObject := fedObjectInterface.(*unstructured.Unstructured) - labelValue, exists := fedObject.GetLabels()[labelKey] - if exists && - // fedObject must reference the policy - labelValue == policy.GetName() && - // for ClusterOverridePolicy, fedObject can be cluster-scoped or belong to any namespace - // for OverridePolicy, policy and fedObject must belong to the same namespace; - (policy.GetNamespace() == "" || policy.GetNamespace() == fedObject.GetNamespace()) { - c.worker.EnqueueObject(fedObject) + logger := c.logger.WithValues("override-policy", policy.GetKey()) + logger.V(2).Info("observed a policy change") + + selector := labels.SelectorFromSet(labels.Set{labelKey: policy.GetName()}) + clusterFedObjects, err := c.clusterFedObjectInformer.Lister().List(selector) + if err != nil { + logger.Error(err, "Failed to list reference cluster federated objects") + return + } + + for _, clusterFedObject := range clusterFedObjects { + c.worker.Enqueue(common.QualifiedName{Name: clusterFedObject.GetName()}) + } + + if policy.GetNamespace() != "" { + fedObjects, err := c.fedObjectInformer.Lister().FederatedObjects(policy.GetNamespace()).List(selector) + if err != nil { + logger.Error(err, "Failed to list reference federated objects") + return + } + + for _, fedObject := range fedObjects { + c.worker.Enqueue(common.QualifiedName{ + Namespace: fedObject.GetNamespace(), + Name: fedObject.GetName(), + }) } } } func (c *Controller) reconcileOnClusterChange(cluster *fedcorev1a1.FederatedCluster) { - klog.V(2).Infof("%s observed a cluster change for %q", c.name, cluster.GetName()) - for _, fedObjectInterface := range c.federatedStore.List() { - fedObject := fedObjectInterface.(*unstructured.Unstructured) - labels := fedObject.GetLabels() - // only enqueue fedObjects with a policy since we only need to recompute policies that are already applied - if len(labels[OverridePolicyNameLabel]) > 0 || len(labels[ClusterOverridePolicyNameLabel]) > 0 { - c.worker.EnqueueObject(fedObject) + logger := c.logger.WithValues("federated-cluster", cluster.GetName()) + logger.V(2).Info("observed a cluster change") + + opRequirement, _ := labels.NewRequirement(OverridePolicyNameLabel, selection.Exists, nil) + copRequirement, _ := labels.NewRequirement(ClusterOverridePolicyNameLabel, selection.Exists, nil) + + for _, requirement := range []labels.Requirement{*opRequirement, *copRequirement} { + fedObjects, err := c.fedObjectInformer.Lister().List(labels.NewSelector().Add(requirement)) + if err != nil { + logger.Error(err, "Failed to list federated objects") + return + } + for _, fedObject := range fedObjects { + c.worker.Enqueue(common.QualifiedName{ + Namespace: fedObject.Namespace, + Name: fedObject.Name, + }) + } + + // no need to list cluster federated object for override policy + if requirement.Key() == ClusterOverridePolicyNameLabel { + clusterFedObjects, err := c.clusterFedObjectInformer.Lister().List(labels.NewSelector().Add(requirement)) + if err != nil { + logger.Error(err, "Failed to list cluster federated objects") + return + } + for _, clusterFedObject := range clusterFedObjects { + c.worker.Enqueue(common.QualifiedName{ + Name: clusterFedObject.Name, + }) + } } } } -func (c *Controller) reconcile(qualifiedName common.QualifiedName) worker.Result { - kind := c.typeConfig.GetFederatedType().Kind - key := qualifiedName.String() +func (c *Controller) reconcile(ctx context.Context, qualifiedName common.QualifiedName) (status worker.Result) { + ctx, keyedLogger := logging.InjectLoggerValues(ctx, "federated-name", qualifiedName.String()) - c.metrics.Rate(fmt.Sprintf("%v.throughput", c.name), 1) - klog.V(4).Infof("%s starting to reconcile %s %v", c.name, kind, key) + c.metrics.Rate(fmt.Sprintf("%v.throughput", ControllerName), 1) + keyedLogger.V(3).Info("Starting to reconcile") startTime := time.Now() defer func() { - c.metrics.Duration(fmt.Sprintf("%s.latency", c.name), startTime) - klog.V(4).Infof("%s finished reconciling %s %v (duration: %v)", c.name, kind, key, time.Since(startTime)) + c.metrics.Duration(fmt.Sprintf("%s.latency", ControllerName), startTime) + keyedLogger.WithValues("duration", time.Since(startTime), "status", status).V(3).Info("Finished reconciling") }() fedObject, err := c.getFederatedObject(qualifiedName) if err != nil { - utilruntime.HandleError(err) + keyedLogger.Error(err, "Failed to get federated object") return worker.StatusError } + if fedObject == nil || fedObject.GetDeletionTimestamp() != nil { return worker.StatusAllOK } + templateGVK, err := fedObject.GetSpec().GetTemplateGVK() + if err != nil { + keyedLogger.Error(err, "Failed to get template gvk") + return worker.StatusError + } + + ctx, keyedLogger = logging.InjectLoggerValues(ctx, "source-gvk", templateGVK.String()) + typeConfig, exist := c.informerManager.GetResourceFTC(templateGVK) + if !exist { + keyedLogger.V(3).Info("Resource ftc not found") + return worker.StatusAllOK + } + + ctx, keyedLogger = logging.InjectLoggerValues(ctx, "ftc", typeConfig.Name) if ok, err := pendingcontrollers.ControllerDependenciesFulfilled(fedObject, PrefixedControllerName); err != nil { - utilruntime.HandleError(fmt.Errorf("failed to check controller dependencies for %s %q: %w", kind, key, err)) + keyedLogger.Error(err, "Failed to check controller dependencies") return worker.StatusError } else if !ok { return worker.StatusAllOK @@ -280,11 +314,12 @@ func (c *Controller) reconcile(qualifiedName common.QualifiedName) worker.Result // TODO: don't apply a policy until it has the required finalizer for deletion protection policies, recheckOnErr, err := lookForMatchedPolicies( fedObject, - c.typeConfig.GetNamespaced(), - c.overridePolicyStore, - c.clusterOverridePolicyStore, + fedObject.GetNamespace() != "", + c.overridePolicyInformer.Lister(), + c.clusterOverridePolicyInformer.Lister(), ) if err != nil { + keyedLogger.Error(err, "Failed to look for matched policy") c.eventRecorder.Eventf( fedObject, corev1.EventTypeWarning, @@ -300,7 +335,7 @@ func (c *Controller) reconcile(qualifiedName common.QualifiedName) worker.Result placedClusters, err := c.getPlacedClusters(fedObject) if err != nil { - utilruntime.HandleError(fmt.Errorf("failed to get placed clusters for %s %q: %w", kind, key, err)) + keyedLogger.Error(err, "Failed to get placed clusters") return worker.StatusError } @@ -325,7 +360,7 @@ func (c *Controller) reconcile(qualifiedName common.QualifiedName) worker.Result currentOverrides, err := util.GetOverrides(fedObject, PrefixedControllerName) if err != nil { - utilruntime.HandleError(fmt.Errorf("failed to get overrides from %s %q: %w", kind, key, err)) + keyedLogger.Error(err, "Failed to get overrides") return worker.StatusError } @@ -334,7 +369,7 @@ func (c *Controller) reconcile(qualifiedName common.QualifiedName) worker.Result if needsUpdate { err = util.SetOverrides(fedObject, PrefixedControllerName, overrides) if err != nil { - utilruntime.HandleError(fmt.Errorf("failed to set overrides for %s %q: %w", kind, key, err)) + keyedLogger.Error(err, "Failed to set overrides") return worker.StatusError } } @@ -343,22 +378,21 @@ func (c *Controller) reconcile(qualifiedName common.QualifiedName) worker.Result fedObject, PrefixedControllerName, needsUpdate, - c.typeConfig.GetControllers(), + typeConfig.GetControllers(), ) if err != nil { - utilruntime.HandleError(fmt.Errorf("failed to update pending controllers for %s %q: %w", kind, key, err)) + keyedLogger.Error(err, "Failed to update pending controllers") return worker.StatusError } needsUpdate = needsUpdate || pendingControllersUpdated if needsUpdate { - _, err = c.federatedClient.Resources(fedObject.GetNamespace()). - Update(context.TODO(), fedObject, metav1.UpdateOptions{}) + _, err = fedobjectadapters.Update(context.Background(), c.fedClient.CoreV1alpha1(), fedObject, metav1.UpdateOptions{}) if err != nil { if apierrors.IsConflict(err) { return worker.StatusConflict } - utilruntime.HandleError(fmt.Errorf("failed to update %s %q for applying overrides: %w", kind, key, err)) + keyedLogger.Error(err, "Failed to update federated object for applying overrides") return worker.StatusAllOK } @@ -373,53 +407,61 @@ func (c *Controller) reconcile(qualifiedName common.QualifiedName) worker.Result return worker.StatusAllOK } -func (c *Controller) getPlacedClusters(fedObject *unstructured.Unstructured) ([]*fedcorev1a1.FederatedCluster, error) { - placementObj, err := util.UnmarshalGenericPlacements(fedObject) +func (c *Controller) getPlacedClusters(fedObject fedcorev1a1.GenericFederatedObject) ([]*fedcorev1a1.FederatedCluster, error) { + placedClusterNames := fedObject.GetSpec().GetPlacementUnion() + clusterObjs, err := c.federatedClusterInformer.Lister().List(labels.Everything()) if err != nil { - return nil, fmt.Errorf("failed to unmarshal placements: %w", err) + return nil, fmt.Errorf("failed to list federated cluster: %w", err) } - placedClusterNames := placementObj.ClusterNameUnion() - - clusterObjs := c.clusterStore.List() placedClusters := make([]*fedcorev1a1.FederatedCluster, 0, len(clusterObjs)) for _, clusterObj := range clusterObjs { - cluster, ok := clusterObj.(*fedcorev1a1.FederatedCluster) - if !ok { - return nil, fmt.Errorf("got wrong type %T from cluster store", cluster) - } - if _, exists := placedClusterNames[cluster.Name]; exists { - placedClusters = append(placedClusters, cluster) + if _, exists := placedClusterNames[clusterObj.Name]; exists { + placedClusters = append(placedClusters, clusterObj) } } return placedClusters, nil } -func (c *Controller) Run(stopChan <-chan struct{}) { - go c.federatedController.Run(stopChan) - go c.overridePolicyController.Run(stopChan) - go c.clusterOverridePolicyController.Run(stopChan) - go c.clusterController.Run(stopChan) - - if !cache.WaitForNamedCacheSync(c.name, stopChan, - c.federatedController.HasSynced, - c.overridePolicyController.HasSynced, - c.clusterOverridePolicyController.HasSynced, - c.clusterController.HasSynced, - ) { - utilruntime.HandleError(fmt.Errorf("timed out waiting for caches to sync for controller: %s", c.name)) - } - c.worker.Run(stopChan) +func (c *Controller) Run(ctx context.Context) { + ctx, logger := logging.InjectLogger(ctx, c.logger) + + logger.Info("Starting controller") + defer logger.Info("Stopping controller") + + if !cache.WaitForNamedCacheSync(ControllerName, ctx.Done(), c.HasSynced) { + logger.Error(nil, "Timed out waiting for caches to sync") + return + } + logger.Info("Caches are synced") + c.worker.Run(ctx) + <-ctx.Done() +} + +func (c *Controller) HasSynced() bool { + return c.federatedClusterInformer.Informer().HasSynced() && + c.overridePolicyInformer.Informer().HasSynced() && + c.clusterOverridePolicyInformer.Informer().HasSynced() && + c.fedObjectInformer.Informer().HasSynced() && + c.clusterFedObjectInformer.Informer().HasSynced() && + c.informerManager.HasSynced() } -func (c *Controller) getFederatedObject(qualifiedName common.QualifiedName) (*unstructured.Unstructured, error) { - cachedObj, exist, err := c.federatedStore.GetByKey(qualifiedName.String()) - if err != nil { +func (c *Controller) IsControllerReady() bool { + return c.HasSynced() +} + +func (c *Controller) getFederatedObject(qualifiedName common.QualifiedName) (fedcorev1a1.GenericFederatedObject, error) { + cachedObj, err := fedobjectadapters.GetFromLister(c.fedObjectInformer.Lister(), c.clusterFedObjectInformer.Lister(), + qualifiedName.Namespace, qualifiedName.Name) + if err != nil && !apierrors.IsNotFound(err) { return nil, err } - if !exist { + + if apierrors.IsNotFound(err) { return nil, nil } - return cachedObj.(*unstructured.Unstructured).DeepCopy(), nil + + return cachedObj.DeepCopyGenericFederatedObject(), nil } diff --git a/pkg/controllers/override/util.go b/pkg/controllers/override/util.go index e9b44ead..cea0b9f7 100644 --- a/pkg/controllers/override/util.go +++ b/pkg/controllers/override/util.go @@ -1,4 +1,3 @@ -//go:build exclude /* Copyright 2023 The KubeAdmiral Authors. @@ -21,13 +20,12 @@ import ( "encoding/json" "fmt" + "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/tools/cache" fedcorev1a1 "github.com/kubewharf/kubeadmiral/pkg/apis/core/v1alpha1" - fedtypesv1a1 "github.com/kubewharf/kubeadmiral/pkg/apis/types/v1alpha1" + fedcorev1a1listers "github.com/kubewharf/kubeadmiral/pkg/client/listers/core/v1alpha1" "github.com/kubewharf/kubeadmiral/pkg/controllers/util" "github.com/kubewharf/kubeadmiral/pkg/controllers/util/clusterselector" ) @@ -44,10 +42,10 @@ that match the obj in the stores. Returns the policy if found, whether a recheck is needed on error, and encountered error if any. */ func lookForMatchedPolicies( - obj *unstructured.Unstructured, + obj fedcorev1a1.GenericFederatedObject, isNamespaced bool, - overridePolicyStore cache.Store, - clusterOverridePolicyStore cache.Store, + overridePolicyStore fedcorev1a1listers.OverridePolicyLister, + clusterOverridePolicyStore fedcorev1a1listers.ClusterOverridePolicyLister, ) ([]fedcorev1a1.GenericOverridePolicy, bool, error) { policies := make([]fedcorev1a1.GenericOverridePolicy, 0) @@ -59,17 +57,14 @@ func lookForMatchedPolicies( return nil, false, fmt.Errorf("policy name cannot be empty") } - matchedPolicyObj, exists, err := clusterOverridePolicyStore.GetByKey(clusterPolicyName) - if err != nil { + matchedPolicy, err := clusterOverridePolicyStore.Get(clusterPolicyName) + if err != nil && !errors.IsNotFound(err) { return nil, true, err } - if !exists { + if errors.IsNotFound(err) { return nil, false, fmt.Errorf("ClusterOverridePolicy %s not found", clusterPolicyName) } - matchedPolicy, ok := matchedPolicyObj.(*fedcorev1a1.ClusterOverridePolicy) - if !ok { - return nil, false, fmt.Errorf("object retrieved from store is not a ClusterOverridePolicy") - } + policies = append(policies, matchedPolicy) } @@ -79,17 +74,12 @@ func lookForMatchedPolicies( return nil, false, fmt.Errorf("policy name cannot be empty") } - key := obj.GetNamespace() + "/" + policyName - matchedPolicyObj, exists, err := overridePolicyStore.GetByKey(key) - if err != nil { + matchedPolicy, err := overridePolicyStore.OverridePolicies(obj.GetNamespace()).Get(policyName) + if err != nil && !errors.IsNotFound(err) { return nil, true, err } - if !exists { - return nil, false, fmt.Errorf("OverridePolicy %s not found", key) - } - matchedPolicy, ok := matchedPolicyObj.(*fedcorev1a1.OverridePolicy) - if !ok { - return nil, false, fmt.Errorf("object retrieved from store is not an OverridePolicy") + if errors.IsNotFound(err) { + return nil, false, fmt.Errorf("OverridePolicy %s/%s not found", matchedPolicy.Namespace, matchedPolicy.Name) } policies = append(policies, matchedPolicy) } @@ -104,7 +94,7 @@ func parseOverrides( overridesMap := make(util.OverridesMap) for _, cluster := range clusters { - patches := make(fedtypesv1a1.OverridePatches, 0) + patches := make(fedcorev1a1.OverridePatches, 0) spec := policy.GetSpec() for i, rule := range spec.OverrideRules { @@ -223,8 +213,8 @@ func isClusterMatchedByClusterAffinity( func policyJsonPatchOverriderToOverridePatch( overrider *fedcorev1a1.JsonPatchOverrider, -) (*fedtypesv1a1.OverridePatch, error) { - overridePatch := &fedtypesv1a1.OverridePatch{ +) (*fedcorev1a1.OverridePatch, error) { + overridePatch := &fedcorev1a1.OverridePatch{ Op: overrider.Operator, Path: overrider.Path, } diff --git a/pkg/controllers/util/clusterselector/util.go b/pkg/controllers/util/clusterselector/util.go index fa6a7bcf..5f3e2f2a 100644 --- a/pkg/controllers/util/clusterselector/util.go +++ b/pkg/controllers/util/clusterselector/util.go @@ -1,4 +1,3 @@ -//go:build exclude /* Copyright 2023 The KubeAdmiral Authors. diff --git a/pkg/controllers/util/meta.go b/pkg/controllers/util/meta.go index a2feb5bd..eccfa583 100644 --- a/pkg/controllers/util/meta.go +++ b/pkg/controllers/util/meta.go @@ -1,4 +1,3 @@ -//go:build exclude /* Copyright 2016 The Kubernetes Authors. diff --git a/pkg/controllers/util/overrides.go b/pkg/controllers/util/overrides.go index dcf65626..76a2cbc4 100644 --- a/pkg/controllers/util/overrides.go +++ b/pkg/controllers/util/overrides.go @@ -1,4 +1,3 @@ -//go:build exclude /* Copyright 2018 The Kubernetes Authors. @@ -30,8 +29,7 @@ import ( "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/util/sets" - fedtypesv1a1 "github.com/kubewharf/kubeadmiral/pkg/apis/types/v1alpha1" - "github.com/kubewharf/kubeadmiral/pkg/controllers/common" + fedcorev1a1 "github.com/kubewharf/kubeadmiral/pkg/apis/core/v1alpha1" ) // Namespace and name may not be overridden since these fields are the @@ -53,41 +51,23 @@ var invalidPaths = sets.NewString( ) // Mapping of clusterName to overrides for the cluster -type OverridesMap map[string]fedtypesv1a1.OverridePatches - -func UnmarshalGenericOverrides(uns *unstructured.Unstructured) (*fedtypesv1a1.GenericObjectWithOverrides, error) { - obj := &fedtypesv1a1.GenericObjectWithOverrides{} - err := UnstructuredToInterface(uns, obj) - if err != nil { - return nil, err - } - return obj, nil -} +type OverridesMap map[string]fedcorev1a1.OverridePatches // GetOverrides returns a map of overrides populated from the given // unstructured object. -func GetOverrides(rawObj *unstructured.Unstructured, controller string) (OverridesMap, error) { +func GetOverrides(federatedObj fedcorev1a1.GenericFederatedObject, controller string) (OverridesMap, error) { overridesMap := make(OverridesMap) - if rawObj == nil { - return overridesMap, nil - } - - overrideObj, err := UnmarshalGenericOverrides(rawObj) - if err != nil { - return nil, err - } - - if overrideObj.Spec == nil || overrideObj.Spec.Overrides == nil { + if federatedObj == nil || federatedObj.GetSpec().Overrides == nil { // No overrides defined for the federated type return overridesMap, nil } - overrides := overrideObj.Spec.Overrides - var clusterOverrides []fedtypesv1a1.ClusterOverride + overrides := federatedObj.GetSpec().Overrides + var clusterOverrides []fedcorev1a1.ClusterReferenceWithPatches for i := range overrides { if overrides[i].Controller == controller { - clusterOverrides = overrides[i].Clusters + clusterOverrides = overrides[i].Override break } } @@ -97,7 +77,7 @@ func GetOverrides(rawObj *unstructured.Unstructured, controller string) (Overrid } for _, overrideItem := range clusterOverrides { - clusterName := overrideItem.ClusterName + clusterName := overrideItem.Cluster if _, ok := overridesMap[clusterName]; ok { return nil, errors.Errorf("cluster %q appears more than once", clusterName) } @@ -118,20 +98,15 @@ func GetOverrides(rawObj *unstructured.Unstructured, controller string) (Overrid // object from the provided overrides map. // // This function takes ownership of the `overridesMap` and may mutate it arbitrarily. -func SetOverrides(uns *unstructured.Unstructured, controller string, overridesMap OverridesMap) error { +func SetOverrides(federatedObj fedcorev1a1.GenericFederatedObject, controller string, overridesMap OverridesMap) error { for clusterName, clusterOverrides := range overridesMap { if len(clusterOverrides) == 0 { delete(overridesMap, clusterName) } } - overrideObj, err := UnmarshalGenericOverrides(uns) - if err != nil { - return err - } - index := -1 - for i, overrides := range overrideObj.Spec.Overrides { + for i, overrides := range federatedObj.GetSpec().Overrides { if overrides.Controller == controller { index = i break @@ -141,18 +116,18 @@ func SetOverrides(uns *unstructured.Unstructured, controller string, overridesMa if len(overridesMap) == 0 { // delete index if index != -1 { - overrideObj.Spec.Overrides = append(overrideObj.Spec.Overrides[:index], overrideObj.Spec.Overrides[(index+1):]...) + federatedObj.GetSpec().Overrides = append(federatedObj.GetSpec().Overrides[:index], federatedObj.GetSpec().Overrides[(index+1):]...) } } else { if index == -1 { - index = len(overrideObj.Spec.Overrides) - overrideObj.Spec.Overrides = append(overrideObj.Spec.Overrides, fedtypesv1a1.ControllerOverride{ + index = len(federatedObj.GetSpec().Overrides) + federatedObj.GetSpec().Overrides = append(federatedObj.GetSpec().Overrides, fedcorev1a1.OverrideWithController{ Controller: controller, }) } - overrides := &overrideObj.Spec.Overrides[index] - overrides.Clusters = nil + overrides := &federatedObj.GetSpec().Overrides[index] + overrides.Override = nil // Write in ascending order of cluster names for better readability clusterNames := make([]string, 0, len(overridesMap)) @@ -162,19 +137,14 @@ func SetOverrides(uns *unstructured.Unstructured, controller string, overridesMa sort.Strings(clusterNames) for _, clusterName := range clusterNames { clusterOverrides := overridesMap[clusterName] - overrides.Clusters = append(overrides.Clusters, fedtypesv1a1.ClusterOverride{ - ClusterName: clusterName, - Patches: clusterOverrides, + overrides.Override = append(overrides.Override, fedcorev1a1.ClusterReferenceWithPatches{ + Cluster: clusterName, + Patches: clusterOverrides, }) } } - overridesUns, err := InterfaceToUnstructured(overrideObj.Spec.Overrides) - if err != nil { - return err - } - - return unstructured.SetNestedField(uns.Object, overridesUns, common.OverridesPath...) + return nil } // UnstructuredToInterface converts an unstructured object to the @@ -201,7 +171,7 @@ func InterfaceToUnstructured(obj interface{}) (ret interface{}, err error) { } // ApplyJsonPatch applies the override on to the given unstructured object. -func ApplyJsonPatch(obj *unstructured.Unstructured, overrides fedtypesv1a1.OverridePatches) error { +func ApplyJsonPatch(obj *unstructured.Unstructured, overrides fedcorev1a1.OverridePatches) error { // TODO: Do the defaulting of "op" field to "replace" in API defaulting for i, overrideItem := range overrides { if overrideItem.Op == "" { From 09bd4de9aa4acc162eb6a43e14a5e475766cc1d8 Mon Sep 17 00:00:00 2001 From: "lihanbo.0316" Date: Mon, 24 Jul 2023 10:56:07 +0800 Subject: [PATCH 086/173] refactor: namespace auto propagation controller --- .../app/controllermanager.go | 20 +- cmd/controller-manager/app/core.go | 27 ++ pkg/controllers/nsautoprop/controller.go | 289 +++++++++--------- pkg/controllers/util/controllerconfig.go | 1 - .../util/federatedclient/podinformer.go | 1 + 5 files changed, 189 insertions(+), 149 deletions(-) diff --git a/cmd/controller-manager/app/controllermanager.go b/cmd/controller-manager/app/controllermanager.go index a9558a01..e4fc1339 100644 --- a/cmd/controller-manager/app/controllermanager.go +++ b/cmd/controller-manager/app/controllermanager.go @@ -35,18 +35,20 @@ import ( ) const ( - FederatedClusterControllerName = "cluster" - FederateControllerName = "federate" - MonitorControllerName = "monitor" - FollowerControllerName = "follower" - PolicyRCControllerName = "policyrc" - OverrideControllerName = "overridepolicy" + FederatedClusterControllerName = "cluster" + FederateControllerName = "federate" + MonitorControllerName = "monitor" + FollowerControllerName = "follower" + PolicyRCControllerName = "policyrc" + OverrideControllerName = "overridepolicy" + NamespaceAutoPropagationControllerName = "nsautoprop" ) var knownControllers = map[string]controllermanager.StartControllerFunc{ - FederateControllerName: startFederateController, - PolicyRCControllerName: startPolicyRCController, - OverrideControllerName: startOverridePolicyController, + FederateControllerName: startFederateController, + PolicyRCControllerName: startPolicyRCController, + OverrideControllerName: startOverridePolicyController, + NamespaceAutoPropagationControllerName: startNamespaceAutoPropagationController, } var controllersDisabledByDefault = sets.New(MonitorControllerName) diff --git a/cmd/controller-manager/app/core.go b/cmd/controller-manager/app/core.go index 4a20d178..9321f86d 100644 --- a/cmd/controller-manager/app/core.go +++ b/cmd/controller-manager/app/core.go @@ -25,6 +25,7 @@ import ( "github.com/kubewharf/kubeadmiral/pkg/controllermanager" controllercontext "github.com/kubewharf/kubeadmiral/pkg/controllers/context" "github.com/kubewharf/kubeadmiral/pkg/controllers/federate" + "github.com/kubewharf/kubeadmiral/pkg/controllers/nsautoprop" "github.com/kubewharf/kubeadmiral/pkg/controllers/override" "github.com/kubewharf/kubeadmiral/pkg/controllers/policyrc" ) @@ -89,3 +90,29 @@ func startOverridePolicyController(ctx context.Context, controllerCtx *controlle return overrideController, nil } + +func startNamespaceAutoPropagationController( + ctx context.Context, + controllerCtx *controllercontext.Context, +) (controllermanager.Controller, error) { + nsAutoPropController, err := nsautoprop.NewNamespaceAutoPropagationController( + controllerCtx.KubeClientset, + controllerCtx.InformerManager, + controllerCtx.FedClientset, + controllerCtx.FedInformerFactory.Core().V1alpha1().FederatedClusters(), + controllerCtx.FedInformerFactory.Core().V1alpha1().ClusterFederatedObjects(), + controllerCtx.KubeInformerFactory.Core().V1().Namespaces(), + controllerCtx.ComponentConfig.NSAutoPropExcludeRegexp, + controllerCtx.FedSystemNamespace, + controllerCtx.Metrics, + klog.Background(), + controllerCtx.WorkerCount, + ) + if err != nil { + return nil, fmt.Errorf("error creating namespace auto propagation controller: %w", err) + } + + go nsAutoPropController.Run(ctx) + + return nsAutoPropController, nil +} diff --git a/pkg/controllers/nsautoprop/controller.go b/pkg/controllers/nsautoprop/controller.go index 56658cb2..52f36efe 100644 --- a/pkg/controllers/nsautoprop/controller.go +++ b/pkg/controllers/nsautoprop/controller.go @@ -1,4 +1,3 @@ -//go:build exclude /* Copyright 2023 The KubeAdmiral Authors. @@ -31,27 +30,26 @@ import ( "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/runtime" utilruntime "k8s.io/apimachinery/pkg/util/runtime" - "k8s.io/client-go/dynamic" - "k8s.io/client-go/dynamic/dynamicinformer" - "k8s.io/client-go/informers" - "k8s.io/client-go/kubernetes" + corev1informers "k8s.io/client-go/informers/core/v1" kubeclient "k8s.io/client-go/kubernetes" "k8s.io/client-go/tools/cache" "k8s.io/client-go/tools/record" "k8s.io/klog/v2" fedcorev1a1 "github.com/kubewharf/kubeadmiral/pkg/apis/core/v1alpha1" - fedinformers "github.com/kubewharf/kubeadmiral/pkg/client/informers/externalversions" + fedclient "github.com/kubewharf/kubeadmiral/pkg/client/clientset/versioned" fedcorev1a1informers "github.com/kubewharf/kubeadmiral/pkg/client/informers/externalversions/core/v1alpha1" "github.com/kubewharf/kubeadmiral/pkg/controllers/common" "github.com/kubewharf/kubeadmiral/pkg/controllers/util" - annotationutil "github.com/kubewharf/kubeadmiral/pkg/controllers/util/annotation" - "github.com/kubewharf/kubeadmiral/pkg/controllers/util/delayingdeliver" - "github.com/kubewharf/kubeadmiral/pkg/controllers/util/eventsink" - "github.com/kubewharf/kubeadmiral/pkg/controllers/util/pendingcontrollers" - schemautil "github.com/kubewharf/kubeadmiral/pkg/controllers/util/schema" - "github.com/kubewharf/kubeadmiral/pkg/controllers/util/worker" "github.com/kubewharf/kubeadmiral/pkg/stats" + annotationutil "github.com/kubewharf/kubeadmiral/pkg/util/annotation" + "github.com/kubewharf/kubeadmiral/pkg/util/eventhandlers" + "github.com/kubewharf/kubeadmiral/pkg/util/eventsink" + "github.com/kubewharf/kubeadmiral/pkg/util/informermanager" + "github.com/kubewharf/kubeadmiral/pkg/util/logging" + "github.com/kubewharf/kubeadmiral/pkg/util/naming" + "github.com/kubewharf/kubeadmiral/pkg/util/pendingcontrollers" + "github.com/kubewharf/kubeadmiral/pkg/util/worker" ) const ( @@ -60,6 +58,8 @@ const ( EventReasonNamespaceAutoPropagation = "NamespaceAutoPropagation" ) +var namespaceGVK = corev1.SchemeGroupVersion.WithKind("Namespace") + /* NamespacesAutoPropagationController automatically propagates namespaces to all clusters without requiring a ClusterPropagationPolicy for scheduling. @@ -73,130 +73,135 @@ Note that since both NamespaceAutoPropagationController and global-scheduler set if both are enabled, they will conflict with each other and reconcile indefinitely. */ type Controller struct { - // name of controller - name string - - // FederatedTypeConfig for namespaces - typeConfig *fedcorev1a1.FederatedTypeConfig + fedClient fedclient.Interface - dynamicInformerFactory dynamicinformer.DynamicSharedInformerFactory - fedInformerFactory fedinformers.SharedInformerFactory + informerManager informermanager.InformerManager + namespaceInformer corev1informers.NamespaceInformer + clusterFedObjectInformer fedcorev1a1informers.ClusterFederatedObjectInformer + clusterInformer fedcorev1a1informers.FederatedClusterInformer - // Informer for FederatedCluster - clusterInformer fedcorev1a1informers.FederatedClusterInformer - // Informer for FederatedNamespace - fedNamespaceInformer informers.GenericInformer - // Client for FederatedNamespace - fedNamespaceClient dynamic.NamespaceableResourceInterface - - worker worker.ReconcileWorker + worker worker.ReconcileWorker[common.QualifiedName] eventRecorder record.EventRecorder - fedSystemNamespace string excludeRegexp *regexp.Regexp + fedSystemNamespace string + logger klog.Logger metrics stats.Metrics } -func StartController( - controllerConfig *util.ControllerConfig, - stopChan <-chan struct{}, - typeConfig *fedcorev1a1.FederatedTypeConfig, - kubeClient kubernetes.Interface, - dynamicClient dynamic.Interface, - dynamicInformerFactory dynamicinformer.DynamicSharedInformerFactory, - fedInformerFactory fedinformers.SharedInformerFactory, -) error { - controller, err := newController( - controllerConfig, - typeConfig, - kubeClient, - dynamicClient, - fedInformerFactory, - dynamicInformerFactory, - ) - if err != nil { - return err - } - klog.V(4).Infof("Starting namespace auto propagation controller") - go controller.Run(stopChan) - return nil +func (c *Controller) IsControllerReady() bool { + return c.HasSynced() } -func newController( - controllerConfig *util.ControllerConfig, - typeConfig *fedcorev1a1.FederatedTypeConfig, +func NewNamespaceAutoPropagationController( kubeClient kubeclient.Interface, - dynamicClient dynamic.Interface, - fedInformerFactory fedinformers.SharedInformerFactory, - dynamicInformerFactory dynamicinformer.DynamicSharedInformerFactory, + informerManager informermanager.InformerManager, + fedClient fedclient.Interface, + clusterInformer fedcorev1a1informers.FederatedClusterInformer, + clusterFedObjectInformer fedcorev1a1informers.ClusterFederatedObjectInformer, + namespaceInformer corev1informers.NamespaceInformer, + nsExcludeRegexp *regexp.Regexp, + fedSystemNamespace string, + metrics stats.Metrics, + logger klog.Logger, + workerCount int, ) (*Controller, error) { - userAgent := NamespaceAutoPropagationControllerName - if !typeConfig.IsNamespace() { - return nil, fmt.Errorf("%s expects a FederatedTypeConfig for namespaces", userAgent) - } - - federatedNamespaceApiResource := typeConfig.GetFederatedType() - fedNamespaceGVR := schemautil.APIResourceToGVR(&federatedNamespaceApiResource) c := &Controller{ - name: userAgent, - typeConfig: typeConfig, - eventRecorder: eventsink.NewDefederatingRecorderMux(kubeClient, userAgent, 4), - dynamicInformerFactory: dynamicInformerFactory, - fedInformerFactory: fedInformerFactory, - fedSystemNamespace: controllerConfig.FedSystemNamespace, - excludeRegexp: controllerConfig.NamespaceAutoPropagationExcludeRegexp, - metrics: controllerConfig.Metrics, - fedNamespaceClient: dynamicClient.Resource(fedNamespaceGVR), - clusterInformer: fedInformerFactory.Core().V1alpha1().FederatedClusters(), - fedNamespaceInformer: dynamicInformerFactory.ForResource(fedNamespaceGVR), + fedClient: fedClient, + + informerManager: informerManager, + clusterFedObjectInformer: clusterFedObjectInformer, + clusterInformer: clusterInformer, + namespaceInformer: namespaceInformer, + + excludeRegexp: nsExcludeRegexp, + fedSystemNamespace: fedSystemNamespace, + + eventRecorder: eventsink.NewDefederatingRecorderMux(kubeClient, NamespaceAutoPropagationControllerName, 4), + metrics: metrics, + logger: logger.WithValues("controller", NamespaceAutoPropagationControllerName), } - c.worker = worker.NewReconcileWorker( + c.worker = worker.NewReconcileWorker[common.QualifiedName]( + NamespaceAutoPropagationControllerName, + nil, c.reconcile, worker.RateLimiterOptions{}, - controllerConfig.WorkerCount, - controllerConfig.Metrics, - delayingdeliver.NewMetricTags(userAgent, federatedNamespaceApiResource.Kind), + workerCount, + metrics, ) - enqueueObj := c.worker.EnqueueObject - c.fedNamespaceInformer.Informer(). - AddEventHandlerWithResyncPeriod(util.NewTriggerOnAllChanges(enqueueObj), util.NoResyncPeriod) + + if _, err := c.clusterFedObjectInformer.Informer().AddEventHandlerWithResyncPeriod( + eventhandlers.NewTriggerOnAllChanges(func(o runtime.Object) { + fedObj := o.(*fedcorev1a1.ClusterFederatedObject) + logger := c.logger.WithValues("cluster-federated-object", common.NewQualifiedName(fedObj)) + + srcMeta, err := fedObj.Spec.GetTemplateAsUnstructured() + if err != nil { + logger.Error(err, "Failed to get source object's metadata from ClusterFederatedObject") + return + } + + if srcMeta.GetKind() != common.NamespaceKind || !c.shouldBeAutoPropagated(srcMeta) { + return + } + + c.worker.Enqueue(common.QualifiedName{Name: fedObj.GetName()}) + }), util.NoResyncPeriod); err != nil { + return nil, err + } reconcileAll := func() { - for _, fns := range c.fedNamespaceInformer.Informer().GetStore().List() { - enqueueObj(fns.(runtime.Object)) + typeConfig, exists := c.informerManager.GetResourceFTC(namespaceGVK) + if !exists { + c.logger.Error(nil, "Namespace ftc does not exist") + return + } + + allNamespaces, err := c.namespaceInformer.Lister().List(labels.Everything()) + if err != nil { + c.logger.Error(err, "Failed to list all namespaces") + return + } + + for _, ns := range allNamespaces { + c.worker.Enqueue(common.QualifiedName{Name: naming.GenerateFederatedObjectName(ns.Name, typeConfig.Name)}) } } - c.clusterInformer.Informer().AddEventHandlerWithResyncPeriod(cache.ResourceEventHandlerFuncs{ + + if _, err := c.clusterInformer.Informer().AddEventHandlerWithResyncPeriod(cache.ResourceEventHandlerFuncs{ AddFunc: func(obj interface{}) { reconcileAll() }, DeleteFunc: func(obj interface{}) { reconcileAll() }, - }, util.NoResyncPeriod) + }, util.NoResyncPeriod); err != nil { + return nil, err + } + return c, nil } -func (c *Controller) reconcile(qualifiedName common.QualifiedName) worker.Result { - key := qualifiedName.String() +func (c *Controller) reconcile(ctx context.Context, qualifiedName common.QualifiedName) worker.Result { + ctx, keyedLogger := logging.InjectLoggerValues(ctx, "federated-name", qualifiedName.String()) c.metrics.Rate("namespace-auto-propagation-controller.throughput", 1) - klog.V(4).Infof("namespace auto propagation controller starting to reconcile %v", key) + keyedLogger.V(3).Info("Starting to reconcile") startTime := time.Now() defer func() { c.metrics.Duration("namespace-auto-propagation-controller.latency", startTime) - klog.V(4). - Infof("namespace auto propagation controller finished reconciling %v (duration: %v)", key, time.Since(startTime)) + keyedLogger.WithValues("duration", time.Since(startTime)).V(3).Info("Finished reconciling") }() - fedNamespace, err := c.getFederatedObject(qualifiedName) - if err != nil { - utilruntime.HandleError(err) + fedNamespace, err := c.clusterFedObjectInformer.Lister().Get(qualifiedName.Name) + if err != nil && !apierrors.IsNotFound(err) { + keyedLogger.Error(err, "Failed to get federated namespace") return worker.StatusError } - if fedNamespace == nil || fedNamespace.GetDeletionTimestamp() != nil { + + if apierrors.IsNotFound(err) || fedNamespace.GetDeletionTimestamp() != nil { return worker.StatusAllOK } @@ -204,31 +209,43 @@ func (c *Controller) reconcile(qualifiedName common.QualifiedName) worker.Result fedNamespace, PrefixedNamespaceAutoPropagationControllerName, ); err != nil { - utilruntime.HandleError(fmt.Errorf("failed to check controller dependencies for %q: %w", key, err)) + keyedLogger.Error(err, "Failed to get pending controllers") return worker.StatusError } else if !ok { return worker.StatusAllOK } - if !c.shouldBeAutoPropagated(fedNamespace) { + typeConfig, exists := c.informerManager.GetResourceFTC(namespaceGVK) + if !exists { + keyedLogger.Error(nil, "Namespace ftc does not exist") + return worker.StatusError + } + + srcMeta, err := fedNamespace.Spec.GetTemplateAsUnstructured() + if err != nil { + keyedLogger.Error(err, "Failed to get source object's metadata from ClusterFederatedObject") + return worker.StatusError + } + + if !c.shouldBeAutoPropagated(srcMeta) { updated, err := pendingcontrollers.UpdatePendingControllers( fedNamespace, PrefixedNamespaceAutoPropagationControllerName, false, - c.typeConfig.GetControllers(), + typeConfig.GetControllers(), ) if err != nil { - utilruntime.HandleError(err) + keyedLogger.Error(err, "Failed to set pending controllers") return worker.StatusError } if updated { - _, err = c.fedNamespaceClient.Update(context.TODO(), fedNamespace, metav1.UpdateOptions{}) + _, err = c.fedClient.CoreV1alpha1().ClusterFederatedObjects().Update(ctx, fedNamespace, metav1.UpdateOptions{}) if err != nil { if apierrors.IsConflict(err) { return worker.StatusConflict } - utilruntime.HandleError(err) + keyedLogger.Error(err, "Failed to update cluster federated object") return worker.StatusError } } @@ -240,23 +257,17 @@ func (c *Controller) reconcile(qualifiedName common.QualifiedName) worker.Result // Set placement to propagate to all clusters clusters, err := c.clusterInformer.Lister().List(labels.Everything()) if err != nil { - utilruntime.HandleError(fmt.Errorf("failed to list from cluster store: %w", err)) + keyedLogger.Error(err, "Failed to list federated clusters") return worker.StatusError } - clusterNames := make(map[string]struct{}, len(clusters)) - for _, cluster := range clusters { - clusterNames[cluster.Name] = struct{}{} - } - isDirty, err := util.SetPlacementClusterNames( - fedNamespace, - PrefixedNamespaceAutoPropagationControllerName, - clusterNames, - ) - if err != nil { - utilruntime.HandleError(err) - return worker.StatusError + clusterNames := make([]string, 0, len(clusters)) + for _, clusterName := range clusters { + clusterNames = append(clusterNames, clusterName.Name) } + + isDirty := fedNamespace.Spec.SetControllerPlacement(PrefixedNamespaceAutoPropagationControllerName, clusterNames) + needsUpdate = needsUpdate || isDirty // Set internal versions of the annotations so they do not get overridden by federate controller @@ -290,7 +301,7 @@ func (c *Controller) reconcile(qualifiedName common.QualifiedName) worker.Result fedNamespace, PrefixedNamespaceAutoPropagationControllerName, needsUpdate, - c.typeConfig.GetControllers(), + typeConfig.GetControllers(), ) if err != nil { utilruntime.HandleError(err) @@ -302,19 +313,19 @@ func (c *Controller) reconcile(qualifiedName common.QualifiedName) worker.Result return worker.StatusAllOK } - _, err = c.fedNamespaceClient.Update(context.TODO(), fedNamespace, metav1.UpdateOptions{}) + _, err = c.fedClient.CoreV1alpha1().ClusterFederatedObjects().Update(ctx, fedNamespace, metav1.UpdateOptions{}) if err != nil { if apierrors.IsConflict(err) { return worker.StatusConflict } c.eventRecorder.Eventf(fedNamespace, corev1.EventTypeWarning, EventReasonNamespaceAutoPropagation, - "failed to update %s %q for auto propagation, err: %v", - fedNamespace.GetKind(), fedNamespace.GetName(), err) + "failed to update %s for auto propagation, err: %v", + fedNamespace.GetName(), err) return worker.StatusError } c.eventRecorder.Eventf(fedNamespace, corev1.EventTypeNormal, EventReasonNamespaceAutoPropagation, - "updated %s %q for auto propagation", fedNamespace.GetKind(), fedNamespace.GetName()) + "updated %s for auto propagation", fedNamespace.GetName()) return worker.StatusAllOK } @@ -344,38 +355,38 @@ func (c *Controller) shouldBeAutoPropagated(fedNamespace *unstructured.Unstructu return true } -func (c *Controller) ensureAnnotation(fedNamespace *unstructured.Unstructured, key, value string) (bool, error) { +func (c *Controller) ensureAnnotation(fedNamespace *fedcorev1a1.ClusterFederatedObject, key, value string) (bool, error) { needsUpdate, err := annotationutil.AddAnnotation(fedNamespace, key, value) if err != nil { return false, fmt.Errorf( - "failed to add %s annotation to %s %q, err: %w", - key, fedNamespace.GetKind(), fedNamespace.GetName(), err) + "failed to add %s annotation to %s, err: %w", + key, fedNamespace.GetName(), err) } return needsUpdate, nil } -func (c *Controller) Run(stopChan <-chan struct{}) { - c.dynamicInformerFactory.Start(stopChan) - c.fedInformerFactory.Start(stopChan) - if !cache.WaitForNamedCacheSync(c.name, stopChan, c.HasSynced) { +func (c *Controller) Run(ctx context.Context) { + ctx, logger := logging.InjectLogger(ctx, c.logger) + + logger.Info("Starting controller") + defer logger.Info("Stopping controller") + + go c.namespaceInformer.Informer().Run(ctx.Done()) + + if !cache.WaitForNamedCacheSync(NamespaceAutoPropagationControllerName, ctx.Done(), c.HasSynced) { + logger.Error(nil, "Timed out waiting for caches to sync") return } - c.worker.Run(stopChan) + + logger.Info("Caches are synced") + c.worker.Run(ctx) + <-ctx.Done() } func (c *Controller) HasSynced() bool { return c.clusterInformer.Informer().HasSynced() && - c.fedNamespaceInformer.Informer().HasSynced() -} - -func (c *Controller) getFederatedObject(qualifiedName common.QualifiedName) (*unstructured.Unstructured, error) { - cachedObj, err := c.fedNamespaceInformer.Lister().Get(qualifiedName.String()) - if err != nil && !apierrors.IsNotFound(err) { - return nil, err - } - if err != nil { - return nil, nil - } - return cachedObj.(*unstructured.Unstructured).DeepCopy(), nil + c.clusterFedObjectInformer.Informer().HasSynced() && + c.namespaceInformer.Informer().HasSynced() && + c.informerManager.HasSynced() } diff --git a/pkg/controllers/util/controllerconfig.go b/pkg/controllers/util/controllerconfig.go index c13f92fb..88fea409 100644 --- a/pkg/controllers/util/controllerconfig.go +++ b/pkg/controllers/util/controllerconfig.go @@ -1,4 +1,3 @@ -//go:build exclude /* Copyright 2018 The Kubernetes Authors. diff --git a/pkg/controllers/util/federatedclient/podinformer.go b/pkg/controllers/util/federatedclient/podinformer.go index b6fc61a1..03e7a030 100644 --- a/pkg/controllers/util/federatedclient/podinformer.go +++ b/pkg/controllers/util/federatedclient/podinformer.go @@ -1,4 +1,5 @@ //go:build exclude + /* Copyright 2023 The KubeAdmiral Authors. From 97762c04f1c3520c7f17a6f264d426070387d4d0 Mon Sep 17 00:00:00 2001 From: "lihanbo.0316" Date: Wed, 19 Jul 2023 20:02:36 +0800 Subject: [PATCH 087/173] refactor: status controller --- .../app/controllermanager.go | 2 + cmd/controller-manager/app/core.go | 40 +- cmd/controller-manager/app/options/options.go | 14 +- cmd/controller-manager/app/util.go | 4 +- .../v1alpha1/extensions_collectedstatus.go | 39 ++ .../extensions_federatedtypeconfig.go | 4 + pkg/apis/core/v1alpha1/interface.go | 7 + pkg/apis/core/v1alpha1/register.go | 2 + pkg/controllers/context/context.go | 1 + pkg/controllers/status/controller.go | 621 ++++++++++-------- pkg/controllers/util/federatedstatus.go | 1 - pkg/util/collectedstatusadapters/adapter.go | 98 +++ .../federatedinformermanager.go | 70 ++ pkg/util/informermanager/interface.go | 13 + 14 files changed, 632 insertions(+), 284 deletions(-) create mode 100644 pkg/apis/core/v1alpha1/extensions_collectedstatus.go create mode 100644 pkg/util/collectedstatusadapters/adapter.go diff --git a/cmd/controller-manager/app/controllermanager.go b/cmd/controller-manager/app/controllermanager.go index e4fc1339..2300a2b1 100644 --- a/cmd/controller-manager/app/controllermanager.go +++ b/cmd/controller-manager/app/controllermanager.go @@ -42,6 +42,7 @@ const ( PolicyRCControllerName = "policyrc" OverrideControllerName = "overridepolicy" NamespaceAutoPropagationControllerName = "nsautoprop" + StatusControllerName = "status" ) var knownControllers = map[string]controllermanager.StartControllerFunc{ @@ -49,6 +50,7 @@ var knownControllers = map[string]controllermanager.StartControllerFunc{ PolicyRCControllerName: startPolicyRCController, OverrideControllerName: startOverridePolicyController, NamespaceAutoPropagationControllerName: startNamespaceAutoPropagationController, + StatusControllerName: startStatusController, } var controllersDisabledByDefault = sets.New(MonitorControllerName) diff --git a/cmd/controller-manager/app/core.go b/cmd/controller-manager/app/core.go index 9321f86d..ccc98b00 100644 --- a/cmd/controller-manager/app/core.go +++ b/cmd/controller-manager/app/core.go @@ -28,6 +28,7 @@ import ( "github.com/kubewharf/kubeadmiral/pkg/controllers/nsautoprop" "github.com/kubewharf/kubeadmiral/pkg/controllers/override" "github.com/kubewharf/kubeadmiral/pkg/controllers/policyrc" + "github.com/kubewharf/kubeadmiral/pkg/controllers/status" ) func startFederateController( @@ -55,7 +56,10 @@ func startFederateController( return federateController, nil } -func startPolicyRCController(ctx context.Context, controllerCtx *controllercontext.Context) (controllermanager.Controller, error) { +func startPolicyRCController( + ctx context.Context, + controllerCtx *controllercontext.Context, +) (controllermanager.Controller, error) { policyRCController, err := policyrc.NewPolicyRCController( controllerCtx.RestConfig, controllerCtx.FedInformerFactory, @@ -72,7 +76,10 @@ func startPolicyRCController(ctx context.Context, controllerCtx *controllerconte return policyRCController, nil } -func startOverridePolicyController(ctx context.Context, controllerCtx *controllercontext.Context) (controllermanager.Controller, error) { +func startOverridePolicyController( + ctx context.Context, + controllerCtx *controllercontext.Context, +) (controllermanager.Controller, error) { overrideController, err := override.NewOverridePolicyController( controllerCtx.KubeClientset, controllerCtx.FedClientset, @@ -116,3 +123,32 @@ func startNamespaceAutoPropagationController( return nsAutoPropController, nil } + +func startStatusController( + ctx context.Context, + controllerCtx *controllercontext.Context, +) (controllermanager.Controller, error) { + statusController, err := status.NewStatusController( + controllerCtx.KubeClientset, + controllerCtx.FedClientset, + controllerCtx.FedInformerFactory.Core().V1alpha1().FederatedObjects(), + controllerCtx.FedInformerFactory.Core().V1alpha1().ClusterFederatedObjects(), + controllerCtx.FedInformerFactory.Core().V1alpha1().CollectedStatuses(), + controllerCtx.FedInformerFactory.Core().V1alpha1().ClusterCollectedStatuses(), + controllerCtx.InformerManager, + controllerCtx.FederatedInformerManager, + controllerCtx.ClusterAvailableDelay, + controllerCtx.ClusterUnavailableDelay, + controllerCtx.ComponentConfig.MemberObjectEnqueueDelay, + klog.Background(), + controllerCtx.WorkerCount, + controllerCtx.Metrics, + ) + if err != nil { + return nil, fmt.Errorf("error creating sync controller: %w", err) + } + + go statusController.Run(ctx) + + return statusController, nil +} diff --git a/cmd/controller-manager/app/options/options.go b/cmd/controller-manager/app/options/options.go index 692bae10..0c09bdc7 100644 --- a/cmd/controller-manager/app/options/options.go +++ b/cmd/controller-manager/app/options/options.go @@ -50,9 +50,9 @@ type Options struct { LogVerbosity int KlogVerbosity int - NSAutoPropExcludeRegexp string - CreateCRDsForFTCs bool - ClusterJoinTimeout time.Duration + NSAutoPropExcludeRegexp string + ClusterJoinTimeout time.Duration + MemberObjectEnqueueDelay time.Duration MaxPodListers int64 EnablePodPruning bool @@ -104,7 +104,6 @@ func (o *Options) AddFlags(flags *pflag.FlagSet, allControllers []string, disabl "", "If non-empty, namespaces that match this go regular expression will be excluded from auto propagation.", ) - flags.BoolVar(&o.CreateCRDsForFTCs, "create-crds-for-ftcs", false, "Generate CRDs for federated types automatically.") flags.DurationVar( &o.ClusterJoinTimeout, "cluster-join-timeout", @@ -112,6 +111,13 @@ func (o *Options) AddFlags(flags *pflag.FlagSet, allControllers []string, disabl "The maximum amount of time to wait for a new cluster to join the federation before timing out.", ) + flags.DurationVar( + &o.MemberObjectEnqueueDelay, + "member-object-enqueue-delay", + time.Second*5, + "The time to wait before enqueuing the object from member cluster.", + ) + flags.Int64Var(&o.MaxPodListers, "max-pod-listers", 0, "The maximum number of concurrent pod listing requests to member clusters. "+ "A non-positive number means unlimited, but may increase the instantaneous memory usage.") flags.BoolVar(&o.EnablePodPruning, "enable-pod-pruning", false, "Enable pod pruning for pod informer. "+ diff --git a/cmd/controller-manager/app/util.go b/cmd/controller-manager/app/util.go index 87bab5ef..41958dfc 100644 --- a/cmd/controller-manager/app/util.go +++ b/cmd/controller-manager/app/util.go @@ -161,8 +161,8 @@ func createControllerContext(opts *options.Options) (*controllercontext.Context, func getComponentConfig(opts *options.Options) (*controllercontext.ComponentConfig, error) { componentConfig := &controllercontext.ComponentConfig{ - FederatedTypeConfigCreateCRDsForFTCs: opts.CreateCRDsForFTCs, - ClusterJoinTimeout: opts.ClusterJoinTimeout, + ClusterJoinTimeout: opts.ClusterJoinTimeout, + MemberObjectEnqueueDelay: opts.MemberObjectEnqueueDelay, } if opts.NSAutoPropExcludeRegexp != "" { diff --git a/pkg/apis/core/v1alpha1/extensions_collectedstatus.go b/pkg/apis/core/v1alpha1/extensions_collectedstatus.go new file mode 100644 index 00000000..c5434c2c --- /dev/null +++ b/pkg/apis/core/v1alpha1/extensions_collectedstatus.go @@ -0,0 +1,39 @@ +/* +Copyright 2023 The KubeAdmiral Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// Implementations for GenericCollectedStatusObject + +func (cs *CollectedStatus) GetGenericCollectedStatus() *GenericCollectedStatus { + return &cs.GenericCollectedStatus +} + +func (cs *CollectedStatus) GetLastUpdateTime() *metav1.Time { + return &cs.LastUpdateTime +} + +func (ccs *ClusterCollectedStatus) GetGenericCollectedStatus() *GenericCollectedStatus { + return &ccs.GenericCollectedStatus +} + +func (ccs *ClusterCollectedStatus) GetLastUpdateTime() *metav1.Time { + return &ccs.LastUpdateTime +} diff --git a/pkg/apis/core/v1alpha1/extensions_federatedtypeconfig.go b/pkg/apis/core/v1alpha1/extensions_federatedtypeconfig.go index cdeb2d3d..dbecfe7b 100644 --- a/pkg/apis/core/v1alpha1/extensions_federatedtypeconfig.go +++ b/pkg/apis/core/v1alpha1/extensions_federatedtypeconfig.go @@ -92,6 +92,10 @@ func (f *FederatedTypeConfig) IsNamespace() bool { return f.Name == NamespaceName } +func (f *FederatedTypeConfig) IsStatusCollectionEnabled() bool { + return f.Spec.StatusCollection != nil && f.Spec.StatusCollection.Enabled +} + func (a *APIResource) Namespaced() bool { return a.Scope == apiextv1beta1.NamespaceScoped } diff --git a/pkg/apis/core/v1alpha1/interface.go b/pkg/apis/core/v1alpha1/interface.go index fdcef995..7a3512e1 100644 --- a/pkg/apis/core/v1alpha1/interface.go +++ b/pkg/apis/core/v1alpha1/interface.go @@ -49,3 +49,10 @@ type GenericFederatedObject interface { GetStatus() *GenericFederatedObjectStatus DeepCopyGenericFederatedObject() GenericFederatedObject } + +type GenericCollectedStatusObject interface { + metav1.Object + pkgruntime.Object + GetGenericCollectedStatus() *GenericCollectedStatus + GetLastUpdateTime() *metav1.Time +} diff --git a/pkg/apis/core/v1alpha1/register.go b/pkg/apis/core/v1alpha1/register.go index 41cde200..74c71ea7 100644 --- a/pkg/apis/core/v1alpha1/register.go +++ b/pkg/apis/core/v1alpha1/register.go @@ -59,6 +59,8 @@ func addKnownTypes(scheme *runtime.Scheme) error { &ClusterFederatedObjectList{}, &CollectedStatus{}, &CollectedStatusList{}, + &ClusterCollectedStatus{}, + &ClusterCollectedStatusList{}, &FederatedCluster{}, &FederatedClusterList{}, &FederatedTypeConfig{}, diff --git a/pkg/controllers/context/context.go b/pkg/controllers/context/context.go index e38f088d..90d28f89 100644 --- a/pkg/controllers/context/context.go +++ b/pkg/controllers/context/context.go @@ -80,4 +80,5 @@ type ComponentConfig struct { NSAutoPropExcludeRegexp *regexp.Regexp FederatedTypeConfigCreateCRDsForFTCs bool ClusterJoinTimeout time.Duration + MemberObjectEnqueueDelay time.Duration } diff --git a/pkg/controllers/status/controller.go b/pkg/controllers/status/controller.go index dc42fb2f..72d771b0 100644 --- a/pkg/controllers/status/controller.go +++ b/pkg/controllers/status/controller.go @@ -1,4 +1,3 @@ -//go:build exclude /* Copyright 2018 The Kubernetes Authors. @@ -34,28 +33,34 @@ import ( "github.com/pkg/errors" appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" + apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/labels" pkgruntime "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" - kubeclient "k8s.io/client-go/kubernetes" - "k8s.io/client-go/rest" + "k8s.io/client-go/kubernetes" "k8s.io/client-go/tools/cache" "k8s.io/client-go/tools/record" "k8s.io/client-go/util/workqueue" "k8s.io/klog/v2" fedcorev1a1 "github.com/kubewharf/kubeadmiral/pkg/apis/core/v1alpha1" - genericclient "github.com/kubewharf/kubeadmiral/pkg/client/generic" + fedclient "github.com/kubewharf/kubeadmiral/pkg/client/clientset/versioned" + fedcorev1a1informers "github.com/kubewharf/kubeadmiral/pkg/client/informers/externalversions/core/v1alpha1" "github.com/kubewharf/kubeadmiral/pkg/controllers/common" "github.com/kubewharf/kubeadmiral/pkg/controllers/util" - "github.com/kubewharf/kubeadmiral/pkg/controllers/util/annotation" - "github.com/kubewharf/kubeadmiral/pkg/controllers/util/delayingdeliver" - "github.com/kubewharf/kubeadmiral/pkg/controllers/util/eventsink" - schemautil "github.com/kubewharf/kubeadmiral/pkg/controllers/util/schema" - "github.com/kubewharf/kubeadmiral/pkg/controllers/util/worker" "github.com/kubewharf/kubeadmiral/pkg/stats" + "github.com/kubewharf/kubeadmiral/pkg/util/annotation" + clusterutil "github.com/kubewharf/kubeadmiral/pkg/util/cluster" + "github.com/kubewharf/kubeadmiral/pkg/util/collectedstatusadapters" + "github.com/kubewharf/kubeadmiral/pkg/util/eventhandlers" + "github.com/kubewharf/kubeadmiral/pkg/util/eventsink" + "github.com/kubewharf/kubeadmiral/pkg/util/fedobjectadapters" + "github.com/kubewharf/kubeadmiral/pkg/util/informermanager" + "github.com/kubewharf/kubeadmiral/pkg/util/naming" + "github.com/kubewharf/kubeadmiral/pkg/util/worker" ) const ( @@ -68,181 +73,168 @@ const ( // StatusController collects the status of resources in member clusters. type StatusController struct { - name string - // For triggering reconciliation of all target resources. This is // used when a new cluster becomes available. clusterQueue workqueue.DelayingInterface - // Informer for resources in member clusters - informer util.FederatedInformer + fedClient fedclient.Interface + fedInformerManager informermanager.FederatedInformerManager + ftcManager informermanager.FederatedTypeConfigManager - // Store for the federated type - federatedStore cache.Store - // Informer for the federated type - federatedController cache.Controller + // Informers for federated objects + fedObjectInformer fedcorev1a1informers.FederatedObjectInformer + clusterFedObjectInformer fedcorev1a1informers.ClusterFederatedObjectInformer - // Store for the status of the federated type - statusStore cache.Store - // Informer for the status of the federated type - statusController cache.Controller + // Informers for collected status objects + collectedStatusInformer fedcorev1a1informers.CollectedStatusInformer + clusterCollectedStatusInformer fedcorev1a1informers.ClusterCollectedStatusInformer - worker worker.ReconcileWorker + worker worker.ReconcileWorker[common.QualifiedName] clusterAvailableDelay time.Duration clusterUnavailableDelay time.Duration reconcileOnClusterChangeDelay time.Duration memberObjectEnqueueDelay time.Duration - typeConfig *fedcorev1a1.FederatedTypeConfig - - client genericclient.Client - statusClient util.ResourceClient - - fedNamespace string metrics stats.Metrics logger klog.Logger eventRecorder record.EventRecorder } -// StartStatusController starts a new status controller for a type config -func StartStatusController( - controllerConfig *util.ControllerConfig, - stopChan <-chan struct{}, - typeConfig *fedcorev1a1.FederatedTypeConfig, -) error { - controller, err := newStatusController(controllerConfig, typeConfig) - if err != nil { - return err - } - if controllerConfig.MinimizeLatency { - controller.minimizeLatency() - } - controller.logger.Info("Starting status controller") - controller.Run(stopChan) - return nil +func (s *StatusController) IsControllerReady() bool { + return s.HasSynced() } -// newStatusController returns a new status controller for the federated type -func newStatusController( - controllerConfig *util.ControllerConfig, - typeConfig *fedcorev1a1.FederatedTypeConfig, -) (*StatusController, error) { - federatedAPIResource := typeConfig.GetFederatedType() - statusAPIResource := typeConfig.GetStatusType() - if statusAPIResource == nil { - return nil, errors.Errorf("Status collection is not supported for %q", federatedAPIResource.Kind) - } - userAgent := fmt.Sprintf("%s-federate-status-controller", strings.ToLower(statusAPIResource.Kind)) - configCopy := rest.CopyConfig(controllerConfig.KubeConfig) - rest.AddUserAgent(configCopy, userAgent) - kubeClient, err := kubeclient.NewForConfig(configCopy) - if err != nil { - return nil, err - } - client := genericclient.NewForConfigOrDieWithUserAgent(controllerConfig.KubeConfig, userAgent) +// NewStatusController returns a new status controller for the configuration +func NewStatusController( + kubeClient kubernetes.Interface, + fedClient fedclient.Interface, - federatedTypeClient, err := util.NewResourceClient(controllerConfig.KubeConfig, &federatedAPIResource) - if err != nil { - return nil, err - } + fedObjectInformer fedcorev1a1informers.FederatedObjectInformer, + clusterFedObjectInformer fedcorev1a1informers.ClusterFederatedObjectInformer, + collectedStatusInformer fedcorev1a1informers.CollectedStatusInformer, + clusterCollectedStatusInformer fedcorev1a1informers.ClusterCollectedStatusInformer, - statusClient, err := util.NewResourceClient(controllerConfig.KubeConfig, statusAPIResource) - if err != nil { - return nil, err - } + ftcManager informermanager.FederatedTypeConfigManager, + fedInformerManager informermanager.FederatedInformerManager, - logger := klog.LoggerWithValues(klog.Background(), "controller", StatusControllerName, - "ftc", typeConfig.Name, "status-kind", typeConfig.GetStatusType().Kind) + clusterAvailableDelay, clusterUnavailableDelay, memberObjectEnqueueDelay time.Duration, + logger klog.Logger, + workerCount int, + metrics stats.Metrics, +) (*StatusController, error) { s := &StatusController{ - name: userAgent, - clusterAvailableDelay: controllerConfig.ClusterAvailableDelay, - clusterUnavailableDelay: controllerConfig.ClusterUnavailableDelay, - reconcileOnClusterChangeDelay: time.Second * 3, - memberObjectEnqueueDelay: time.Second * 10, - typeConfig: typeConfig, - client: client, - statusClient: statusClient, - fedNamespace: controllerConfig.FedSystemNamespace, - metrics: controllerConfig.Metrics, - logger: logger, - eventRecorder: eventsink.NewDefederatingRecorderMux(kubeClient, StatusControllerName, 6), + fedClient: fedClient, + fedInformerManager: fedInformerManager, + ftcManager: ftcManager, + fedObjectInformer: fedObjectInformer, + clusterFedObjectInformer: clusterFedObjectInformer, + collectedStatusInformer: collectedStatusInformer, + clusterCollectedStatusInformer: clusterCollectedStatusInformer, + clusterAvailableDelay: clusterAvailableDelay, + clusterUnavailableDelay: clusterUnavailableDelay, + reconcileOnClusterChangeDelay: time.Second * 3, + memberObjectEnqueueDelay: memberObjectEnqueueDelay, + metrics: metrics, + logger: logger.WithValues("controller", StatusControllerName), + eventRecorder: eventsink.NewDefederatingRecorderMux(kubeClient, StatusControllerName, 4), } s.worker = worker.NewReconcileWorker( + StatusControllerName, + nil, s.reconcile, worker.RateLimiterOptions{}, - controllerConfig.WorkerCount, - controllerConfig.Metrics, - delayingdeliver.NewMetricTags("status-worker", typeConfig.GetTargetType().Kind), + workerCount, + metrics, ) // Build queue for triggering cluster reconciliations. s.clusterQueue = workqueue.NewNamedDelayingQueue("status-controller-cluster-queue") - // Start informers on the resources for the federated type - enqueueObj := s.worker.EnqueueObject + fedObjectHandler := util.NewTriggerOnAllChanges(func(o pkgruntime.Object) { + s.enqueueEnableCollectedStatusObject(common.NewQualifiedName(o), 0) + }) - targetNamespace := controllerConfig.TargetNamespace + if _, err := s.fedObjectInformer.Informer().AddEventHandler(fedObjectHandler); err != nil { + return nil, err + } - s.federatedStore, s.federatedController = util.NewResourceInformer( - federatedTypeClient, - targetNamespace, - enqueueObj, - controllerConfig.Metrics, - ) - s.statusStore, s.statusController = util.NewResourceInformer( - statusClient, - targetNamespace, - enqueueObj, - controllerConfig.Metrics, - ) - logger.Info("Creating new FederatedInformer") - - targetAPIResource := typeConfig.GetTargetType() - - // Federated informer for resources in member clusters - s.informer, err = util.NewFederatedInformer( - controllerConfig, - client, - configCopy, - &targetAPIResource, - func(obj pkgruntime.Object) { - qualifiedName := common.NewQualifiedName(obj) - s.worker.EnqueueWithDelay(qualifiedName, s.memberObjectEnqueueDelay) + if _, err := s.clusterFedObjectInformer.Informer().AddEventHandler(fedObjectHandler); err != nil { + return nil, err + } + + if _, err := s.collectedStatusInformer.Informer().AddEventHandler(util.NewTriggerOnAllChanges(func(o pkgruntime.Object) { + s.worker.Enqueue(common.NewQualifiedName(o)) + })); err != nil { + return nil, err + } + + if _, err := s.clusterCollectedStatusInformer.Informer().AddEventHandler(util.NewTriggerOnAllChanges(func(o pkgruntime.Object) { + s.worker.Enqueue(common.NewQualifiedName(o)) + })); err != nil { + return nil, err + } + + if err := s.fedInformerManager.AddEventHandlerGenerator(&informermanager.EventHandlerGenerator{ + Predicate: func(lastApplied, latest *fedcorev1a1.FederatedTypeConfig) bool { + return lastApplied.IsStatusCollectionEnabled() != latest.IsStatusCollectionEnabled() }, - &util.ClusterLifecycleHandlerFuncs{ - ClusterAvailable: func(cluster *fedcorev1a1.FederatedCluster) { - // When new cluster becomes available process all the target resources again. + Generator: func(ftc *fedcorev1a1.FederatedTypeConfig) cache.ResourceEventHandler { + if !ftc.IsStatusCollectionEnabled() { + return nil + } + + return eventhandlers.NewTriggerOnAllChanges(func(o pkgruntime.Object) { + obj := o.(*unstructured.Unstructured) + + ftc, exists := s.ftcManager.GetResourceFTC(obj.GroupVersionKind()) + if !exists { + return + } + + federatedName := common.QualifiedName{ + Namespace: obj.GetNamespace(), + Name: naming.GenerateFederatedObjectName(obj.GetName(), ftc.GetName()), + } + s.worker.EnqueueWithDelay(federatedName, s.memberObjectEnqueueDelay) + }) + }, + }); err != nil { + return nil, fmt.Errorf("failed to add event handler generator: %w", err) + } + + if err := s.fedInformerManager.AddClusterEventHandlers( + &informermanager.ClusterEventHandler{ + Predicate: func(oldCluster, newCluster *fedcorev1a1.FederatedCluster) bool { + // Reconcile all federated objects when cluster becomes available + return oldCluster != nil && newCluster != nil && + !clusterutil.IsClusterReady(&oldCluster.Status) && clusterutil.IsClusterReady(&newCluster.Status) + }, + Callback: func(cluster *fedcorev1a1.FederatedCluster) { s.clusterQueue.AddAfter(struct{}{}, s.clusterAvailableDelay) }, - // When a cluster becomes unavailable process all the target resources again. - ClusterUnavailable: func(cluster *fedcorev1a1.FederatedCluster, _ []interface{}) { + }, + &informermanager.ClusterEventHandler{ + Predicate: func(oldCluster, newCluster *fedcorev1a1.FederatedCluster) bool { + // Reconcile all federated objects when cluster becomes unavailable + return oldCluster != nil && newCluster != nil && + clusterutil.IsClusterReady(&oldCluster.Status) && !clusterutil.IsClusterReady(&newCluster.Status) + }, + Callback: func(cluster *fedcorev1a1.FederatedCluster) { s.clusterQueue.AddAfter(struct{}{}, s.clusterUnavailableDelay) }, }, - ) - if err != nil { - return nil, err + ); err != nil { + return nil, fmt.Errorf("failed to add cluster event handler: %w", err) } return s, nil } -// minimizeLatency reduces delays and timeouts to make the controller more responsive (useful for testing). -func (s *StatusController) minimizeLatency() { - s.clusterAvailableDelay = time.Second - s.clusterUnavailableDelay = time.Second - s.reconcileOnClusterChangeDelay = 20 * time.Millisecond - s.memberObjectEnqueueDelay = 50 * time.Millisecond -} - // Run runs the status controller -func (s *StatusController) Run(stopChan <-chan struct{}) { - go s.federatedController.Run(stopChan) - go s.statusController.Run(stopChan) - s.informer.Start() +func (s *StatusController) Run(ctx context.Context) { go func() { for { _, shutdown := s.clusterQueue.Get() @@ -253,16 +245,16 @@ func (s *StatusController) Run(stopChan <-chan struct{}) { } }() - if !cache.WaitForNamedCacheSync(s.name, stopChan, s.HasSynced) { + if !cache.WaitForNamedCacheSync(StatusControllerName, ctx.Done(), s.HasSynced) { + s.logger.Error(nil, "Timed out waiting for cache sync") return } - - s.worker.Run(stopChan) + s.logger.Info("Caches are synced") + s.worker.Run(ctx) // Ensure all goroutines are cleaned up when the stop channel closes go func() { - <-stopChan - s.informer.Stop() + <-ctx.Done() s.clusterQueue.ShutDown() }() } @@ -270,36 +262,42 @@ func (s *StatusController) Run(stopChan <-chan struct{}) { // Check whether all data stores are in sync. False is returned if any of the informer/stores is not yet // synced with the corresponding api server. func (s *StatusController) HasSynced() bool { - if !s.informer.ClustersSynced() { - s.logger.V(3).Info("Cluster list not synced") - return false - } - if !s.federatedController.HasSynced() { - s.logger.V(3).Info("Federated type not synced") - return false - } - if !s.statusController.HasSynced() { - s.logger.V(3).Info("Status not synced") - return false - } - return true + return s.ftcManager.HasSynced() && + s.fedInformerManager.HasSynced() && + s.fedObjectInformer.Informer().HasSynced() && + s.clusterFedObjectInformer.Informer().HasSynced() && + s.collectedStatusInformer.Informer().HasSynced() && + s.clusterCollectedStatusInformer.Informer().HasSynced() } // The function triggers reconciliation of all target federated resources. func (s *StatusController) reconcileOnClusterChange() { - for _, obj := range s.federatedStore.List() { - qualifiedName := common.NewQualifiedName(obj.(pkgruntime.Object)) - s.worker.EnqueueWithDelay(qualifiedName, s.reconcileOnClusterChangeDelay) + visitFunc := func(obj fedcorev1a1.GenericFederatedObject) { + s.enqueueEnableCollectedStatusObject(common.NewQualifiedName(obj), s.reconcileOnClusterChangeDelay) + } + + fedObjects, err := s.fedObjectInformer.Lister().List(labels.Everything()) + if err == nil { + for _, obj := range fedObjects { + visitFunc(obj) + } + } else { + s.logger.Error(err, "Failed to list FederatedObjects from lister") + } + + clusterFedObjects, err := s.clusterFedObjectInformer.Lister().List(labels.Everything()) + if err == nil { + for _, obj := range clusterFedObjects { + visitFunc(obj) + } + } else { + s.logger.Error(err, "Failed to list ClusterFederatedObjects from lister") } } -func (s *StatusController) reconcile(qualifiedName common.QualifiedName) (reconcileStatus worker.Result) { - targetType := s.typeConfig.GetTargetType() - targetIsDeployment := schemautil.APIResourceToGVK(&targetType) == appsv1.SchemeGroupVersion.WithKind(common.DeploymentKind) - statusKind := s.typeConfig.GetStatusType().Kind - key := qualifiedName.String() - keyedLogger := s.logger.WithValues("object", key) - ctx := klog.NewContext(context.TODO(), keyedLogger) +func (s *StatusController) reconcile(ctx context.Context, qualifiedName common.QualifiedName) (reconcileStatus worker.Result) { + keyedLogger := s.logger.WithValues("federated-name", qualifiedName.String()) + ctx = klog.NewContext(ctx, keyedLogger) s.metrics.Rate("status.throughput", 1) keyedLogger.V(3).Info("Starting reconcile") @@ -310,39 +308,75 @@ func (s *StatusController) reconcile(qualifiedName common.QualifiedName) (reconc V(3).Info("Finished reconcile") }() - fedObject, err := s.objFromCache(s.federatedStore, key) + fedObject, err := fedobjectadapters.GetFromLister( + s.fedObjectInformer.Lister(), + s.clusterFedObjectInformer.Lister(), + qualifiedName.Namespace, qualifiedName.Name, + ) if err != nil { - keyedLogger.Error(err, "Failed to get federated object from cache") - return worker.StatusError + if apierrors.IsNotFound(err) { + return worker.StatusAllOK + } else { + keyedLogger.Error(err, "Failed to get federated object from cache") + return worker.StatusError + } } if fedObject == nil || fedObject.GetDeletionTimestamp() != nil { keyedLogger.V(1).Info("No federated type found, deleting status object") - err = s.statusClient.Resources(qualifiedName.Namespace). - Delete(ctx, qualifiedName.Name, metav1.DeleteOptions{}) + err = collectedstatusadapters.Delete(ctx, s.fedClient.CoreV1alpha1(), qualifiedName.Namespace, qualifiedName.Name, metav1.DeleteOptions{}) if err != nil && !apierrors.IsNotFound(err) { return worker.StatusError } return worker.StatusAllOK } + template, err := fedObject.GetSpec().GetTemplateAsUnstructured() + if err != nil { + keyedLogger.Error(err, "Failed to unmarshal template") + return worker.StatusError + } + + templateGVK := template.GroupVersionKind() + targetIsDeployment := templateGVK == appsv1.SchemeGroupVersion.WithKind(common.DeploymentKind) + + templateQualifiedName := common.NewQualifiedName(template) + + typeConfig, exists := s.ftcManager.GetResourceFTC(templateGVK) + if !exists || typeConfig == nil { + keyedLogger.V(3).Info("Resource ftc not found") + return worker.StatusAllOK + } + + if typeConfig.Spec.StatusCollection == nil || !typeConfig.Spec.StatusCollection.Enabled { + keyedLogger.V(3).Info("StatusCollection is not enabled") + return worker.StatusAllOK + } + clusterNames, err := s.clusterNames() if err != nil { keyedLogger.Error(err, "Failed to get cluster list") return worker.Result{RequeueAfter: &s.clusterAvailableDelay} } - clusterStatus := s.clusterStatuses(ctx, fedObject, clusterNames, qualifiedName) + clusterStatuses := s.clusterStatuses(ctx, fedObject, templateQualifiedName, templateGVK, typeConfig, clusterNames) - existingStatus, err := s.objFromCache(s.statusStore, key) - if err != nil { + existingStatus, err := collectedstatusadapters.GetFromLister( + s.collectedStatusInformer.Lister(), + s.clusterCollectedStatusInformer.Lister(), + qualifiedName.Namespace, qualifiedName.Name) + if err != nil && !apierrors.IsNotFound(err) { keyedLogger.Error(err, "Failed to get status from cache") return worker.StatusError } + if apierrors.IsNotFound(err) { + existingStatus = nil + } + var rsDigestsAnnotation string if targetIsDeployment { - latestReplicasetDigests, err := s.latestReplicasetDigests(ctx, clusterNames, qualifiedName) + latestReplicasetDigests, err := s.latestReplicasetDigests(ctx, clusterNames, templateQualifiedName, templateGVK, typeConfig) if err != nil { keyedLogger.Error(err, "Failed to get latest replicaset digests") } else { @@ -366,82 +400,53 @@ func (s *StatusController) reconcile(qualifiedName common.QualifiedName) (reconc } } - resourceGroupVersion := schema.GroupVersion{ - Group: s.typeConfig.GetStatusType().Group, - Version: s.typeConfig.GetStatusType().Version, - } - federatedResource := util.FederatedResource{ - TypeMeta: metav1.TypeMeta{ - Kind: statusKind, - APIVersion: resourceGroupVersion.String(), - }, - ObjectMeta: metav1.ObjectMeta{ - Name: qualifiedName.Name, - Namespace: qualifiedName.Namespace, - // Add ownership of status object to corresponding - // federated object, so that status object is deleted when - // the federated object is deleted. - OwnerReferences: []metav1.OwnerReference{{ - APIVersion: fedObject.GetAPIVersion(), - Kind: fedObject.GetKind(), - Name: fedObject.GetName(), - UID: fedObject.GetUID(), - }}, - Labels: fedObject.GetLabels(), - }, - ClusterStatus: clusterStatus, - } + collectedStatus := newCollectedStatusObject(fedObject, clusterStatuses) + if rsDigestsAnnotation != "" { - federatedResource.Annotations = map[string]string{util.LatestReplicasetDigestsAnnotation: rsDigestsAnnotation} + collectedStatus.SetAnnotations(map[string]string{util.LatestReplicasetDigestsAnnotation: rsDigestsAnnotation}) } replicasAnnotationUpdated := false if targetIsDeployment { replicasAnnotationUpdated, err = s.setReplicasAnnotations( ctx, - &federatedResource, + collectedStatus, fedObject, clusterNames, - qualifiedName, + templateQualifiedName, + templateGVK, + typeConfig, ) if err != nil { keyedLogger.Error(err, "Failed to set annotations about replicas") } } - status, err := util.GetUnstructured(federatedResource) - if err != nil { - keyedLogger.Error(err, "Failed to convert to unstructured") - return worker.StatusError - } - if existingStatus == nil { - _, err = s.statusClient.Resources(qualifiedName.Namespace). - Create(context.TODO(), status, metav1.CreateOptions{}) + collectedStatus.GetLastUpdateTime().Time = time.Now() + _, err = collectedstatusadapters.Create(ctx, s.fedClient.CoreV1alpha1(), collectedStatus, metav1.CreateOptions{}) if err != nil { if apierrors.IsAlreadyExists(err) { return worker.StatusConflict } - keyedLogger.Error(err, "Failed to create status object") + keyedLogger.Error(err, "Failed to create collected status object") return worker.StatusError } - } else if !reflect.DeepEqual(existingStatus.Object["clusterStatus"], status.Object["clusterStatus"]) || - !reflect.DeepEqual(status.GetLabels(), existingStatus.GetLabels()) || + } else if !reflect.DeepEqual(existingStatus.GetGenericCollectedStatus().Clusters, collectedStatus.GetGenericCollectedStatus().Clusters) || + !reflect.DeepEqual(collectedStatus.GetLabels(), existingStatus.GetLabels()) || replicasAnnotationUpdated || (rsDigestsAnnotation != "" && !hasRSDigestsAnnotation) { - if status.Object["clusterStatus"] == nil { - status.Object["clusterStatus"] = make([]util.ResourceClusterStatus, 0) - } - existingStatus.Object["clusterStatus"] = status.Object["clusterStatus"] - existingStatus.SetLabels(status.GetLabels()) + collectedStatus.GetLastUpdateTime().Time = time.Now() + existingStatus.GetGenericCollectedStatus().Clusters = collectedStatus.GetGenericCollectedStatus().Clusters + existingStatus.SetLabels(collectedStatus.GetLabels()) anns := existingStatus.GetAnnotations() if anns == nil { anns = make(map[string]string) } - for key, value := range federatedResource.GetAnnotations() { + for key, value := range collectedStatus.GetAnnotations() { anns[key] = value } existingStatus.SetAnnotations(anns) - _, err = s.statusClient.Resources(qualifiedName.Namespace).Update(context.TODO(), existingStatus, metav1.UpdateOptions{}) + _, err = collectedstatusadapters.Update(ctx, s.fedClient.CoreV1alpha1(), existingStatus, metav1.UpdateOptions{}) if err != nil { if apierrors.IsConflict(err) { return worker.StatusConflict @@ -454,33 +459,50 @@ func (s *StatusController) reconcile(qualifiedName common.QualifiedName) (reconc return worker.StatusAllOK } -func (s *StatusController) rawObjFromCache(store cache.Store, key string) (pkgruntime.Object, error) { - cachedObj, exist, err := store.GetByKey(key) +func (s *StatusController) enqueueEnableCollectedStatusObject(qualifiedName common.QualifiedName, delay time.Duration) { + keyedLogger := s.logger.WithValues("federated-name", qualifiedName.String()) + + fedObject, err := fedobjectadapters.GetFromLister( + s.fedObjectInformer.Lister(), + s.clusterFedObjectInformer.Lister(), + qualifiedName.Namespace, qualifiedName.Name, + ) if err != nil { - return nil, fmt.Errorf("failed to query store for %q, err info: %w", key, err) + if apierrors.IsNotFound(err) { + return + } else { + keyedLogger.Error(err, "Failed to get federated object from cache") + return + } } - if !exist { - return nil, nil + + if fedObject == nil || fedObject.GetDeletionTimestamp() != nil { + // enqueue to delete reference collectedstatus object + s.worker.Enqueue(qualifiedName) + return } - return cachedObj.(pkgruntime.Object).DeepCopyObject(), nil -} -func (s *StatusController) objFromCache( - store cache.Store, - key string, -) (*unstructured.Unstructured, error) { - obj, err := s.rawObjFromCache(store, key) + templateGVK, err := fedObject.GetSpec().GetTemplateGVK() if err != nil { - return nil, err + keyedLogger.Error(err, "Failed to get template gvk") + return } - if obj == nil { - return nil, nil + + typeConfig, exists := s.ftcManager.GetResourceFTC(templateGVK) + if !exists || typeConfig == nil { + keyedLogger.V(3).Info("Resource ftc not found") + return + } + + if typeConfig.Spec.StatusCollection == nil || !typeConfig.Spec.StatusCollection.Enabled { + return } - return obj.(*unstructured.Unstructured), nil + + s.worker.EnqueueWithDelay(qualifiedName, delay) } func (s *StatusController) clusterNames() ([]string, error) { - clusters, err := s.informer.GetReadyClusters() + clusters, err := s.fedInformerManager.GetReadyClusters() if err != nil { return nil, err } @@ -495,24 +517,27 @@ func (s *StatusController) clusterNames() ([]string, error) { // clusterStatuses returns the resource status in member cluster. func (s *StatusController) clusterStatuses( ctx context.Context, - fedObject *unstructured.Unstructured, + fedObject fedcorev1a1.GenericFederatedObject, + targetQualifiedName common.QualifiedName, + targetGVK schema.GroupVersionKind, + typeConfig *fedcorev1a1.FederatedTypeConfig, clusterNames []string, - qualifiedName common.QualifiedName, -) []util.ResourceClusterStatus { - clusterStatus := []util.ResourceClusterStatus{} +) []fedcorev1a1.CollectedFieldsWithCluster { + clusterStatus := []fedcorev1a1.CollectedFieldsWithCluster{} keyedLogger := klog.FromContext(ctx) // collect errors during status collection and record them as event - errList := []string{} + var errList []string for _, clusterName := range clusterNames { - resourceClusterStatus := util.ResourceClusterStatus{ClusterName: clusterName} + resourceClusterStatus := fedcorev1a1.CollectedFieldsWithCluster{Cluster: clusterName} - clusterObj, exist, err := util.GetClusterObject( + clusterObj, exist, err := informermanager.GetClusterObject( ctx, - s.informer, + s.ftcManager, + s.fedInformerManager, clusterName, - qualifiedName, - s.typeConfig.GetTargetType(), + targetQualifiedName, + targetGVK, ) if err != nil { keyedLogger.WithValues("cluster-name", clusterName).Error(err, "Failed to get object from cluster") @@ -529,8 +554,8 @@ func (s *StatusController) clusterStatuses( collectedFields := map[string]interface{}{} failedFields := []string{} - if s.typeConfig.Spec.StatusCollection != nil { - for _, field := range s.typeConfig.Spec.StatusCollection.Fields { + if typeConfig.Spec.StatusCollection != nil { + for _, field := range typeConfig.Spec.StatusCollection.Fields { fieldVal, found, err := unstructured.NestedFieldCopy( clusterObj.Object, strings.Split(field, ".")...) @@ -554,7 +579,14 @@ func (s *StatusController) clusterStatuses( } } - resourceClusterStatus.CollectedFields = collectedFields + collectedFieldsBytes, err := json.Marshal(collectedFields) + if err != nil { + keyedLogger.WithValues("cluster-name", clusterName). + Error(err, "Failed to marshal collected fields") + continue + } + + resourceClusterStatus.CollectedFields = apiextensionsv1.JSON{Raw: collectedFieldsBytes} if len(failedFields) > 0 { sort.Slice(failedFields, func(i, j int) bool { return failedFields[i] < failedFields[j] @@ -565,14 +597,16 @@ func (s *StatusController) clusterStatuses( clusterStatus = append(clusterStatus, resourceClusterStatus) } - s.eventRecorder.Eventf( - fedObject, - corev1.EventTypeWarning, EventReasonGetObjectStatusError, - fmt.Sprintf("Failed to get some cluster status, error info: %s", strings.Join(errList, ". ")), - ) + if len(errList) != 0 { + s.eventRecorder.Eventf( + fedObject, + corev1.EventTypeWarning, EventReasonGetObjectStatusError, + fmt.Sprintf("Failed to get some cluster status, error info: %s", strings.Join(errList, ". ")), + ) + } sort.Slice(clusterStatus, func(i, j int) bool { - return clusterStatus[i].ClusterName < clusterStatus[j].ClusterName + return clusterStatus[i].Cluster < clusterStatus[j].Cluster }) return clusterStatus } @@ -581,20 +615,23 @@ func (s *StatusController) clusterStatuses( func (s *StatusController) latestReplicasetDigests( ctx context.Context, clusterNames []string, - qualifiedName common.QualifiedName, + targetQualifiedName common.QualifiedName, + targetGVK schema.GroupVersionKind, + typeConfig *fedcorev1a1.FederatedTypeConfig, ) ([]util.LatestReplicasetDigest, error) { - key := qualifiedName.String() + key := targetQualifiedName.String() digests := []util.LatestReplicasetDigest{} - targetKind := s.typeConfig.GetTargetType().Kind + targetKind := typeConfig.Spec.SourceType.Kind keyedLogger := klog.FromContext(ctx) for _, clusterName := range clusterNames { - clusterObj, exist, err := util.GetClusterObject( + clusterObj, exist, err := informermanager.GetClusterObject( ctx, - s.informer, + s.ftcManager, + s.fedInformerManager, clusterName, - qualifiedName, - s.typeConfig.GetTargetType(), + targetQualifiedName, + targetGVK, ) if err != nil { return nil, errors.Wrapf(err, "Failed to get %s %q from cluster %q", targetKind, key, clusterName) @@ -627,21 +664,24 @@ func (s *StatusController) latestReplicasetDigests( func (s *StatusController) realUpdatedReplicas( ctx context.Context, clusterNames []string, - qualifiedName common.QualifiedName, + targetQualifiedName common.QualifiedName, + targetGVK schema.GroupVersionKind, + typeConfig *fedcorev1a1.FederatedTypeConfig, revision string, ) (string, error) { - key := qualifiedName.String() + key := targetQualifiedName.String() var updatedReplicas int64 - targetKind := s.typeConfig.GetTargetType().Kind + targetKind := typeConfig.Spec.SourceType.Kind keyedLogger := klog.FromContext(ctx) for _, clusterName := range clusterNames { - clusterObj, exist, err := util.GetClusterObject( + clusterObj, exist, err := informermanager.GetClusterObject( ctx, - s.informer, + s.ftcManager, + s.fedInformerManager, clusterName, - qualifiedName, - s.typeConfig.GetTargetType(), + targetQualifiedName, + targetGVK, ) if err != nil { return "", errors.Wrapf(err, "Failed to get %s %q from cluster %q", targetKind, key, clusterName) @@ -669,23 +709,54 @@ func (s *StatusController) realUpdatedReplicas( func (s *StatusController) setReplicasAnnotations( ctx context.Context, - federatedResource *util.FederatedResource, - fedObject *unstructured.Unstructured, + collectedStatus fedcorev1a1.GenericCollectedStatusObject, + fedObject fedcorev1a1.GenericFederatedObject, clusterNames []string, qualifedName common.QualifiedName, + targetGVK schema.GroupVersionKind, + typeConfig *fedcorev1a1.FederatedTypeConfig, ) (bool, error) { revision, ok := fedObject.GetAnnotations()[common.CurrentRevisionAnnotation] if !ok { return false, nil } - updatedReplicas, err := s.realUpdatedReplicas(ctx, clusterNames, qualifedName, revision) + updatedReplicas, err := s.realUpdatedReplicas(ctx, clusterNames, qualifedName, targetGVK, typeConfig, revision) if err != nil { return false, err } - if federatedResource.Annotations == nil { - federatedResource.Annotations = make(map[string]string) + + collectedStatusAnno := collectedStatus.GetAnnotations() + if collectedStatusAnno == nil { + collectedStatusAnno = make(map[string]string) } - federatedResource.Annotations[util.AggregatedUpdatedReplicas] = updatedReplicas - federatedResource.Annotations[common.CurrentRevisionAnnotation] = revision + collectedStatusAnno[util.AggregatedUpdatedReplicas] = updatedReplicas + collectedStatusAnno[common.CurrentRevisionAnnotation] = revision + + collectedStatus.SetAnnotations(collectedStatusAnno) return true, nil } + +func newCollectedStatusObject( + fedObj fedcorev1a1.GenericFederatedObject, + clusterStatus []fedcorev1a1.CollectedFieldsWithCluster, +) fedcorev1a1.GenericCollectedStatusObject { + var colletcedStatusObj fedcorev1a1.GenericCollectedStatusObject + + if fedObj.GetNamespace() == "" { + colletcedStatusObj = &fedcorev1a1.ClusterCollectedStatus{} + } else { + colletcedStatusObj = &fedcorev1a1.CollectedStatus{} + } + + colletcedStatusObj.SetName(fedObj.GetName()) + colletcedStatusObj.SetNamespace(fedObj.GetNamespace()) + colletcedStatusObj.SetLabels(fedObj.GetLabels()) + + fedGVK := fedcorev1a1.SchemeGroupVersion.WithKind(reflect.TypeOf(fedObj).Elem().Name()) + colletcedStatusObj.SetOwnerReferences( + []metav1.OwnerReference{*metav1.NewControllerRef(fedObj, fedGVK)}, + ) + + colletcedStatusObj.GetGenericCollectedStatus().Clusters = clusterStatus + return colletcedStatusObj +} diff --git a/pkg/controllers/util/federatedstatus.go b/pkg/controllers/util/federatedstatus.go index 824cd70c..87d1a911 100644 --- a/pkg/controllers/util/federatedstatus.go +++ b/pkg/controllers/util/federatedstatus.go @@ -1,4 +1,3 @@ -//go:build exclude /* Copyright 2018 The Kubernetes Authors. diff --git a/pkg/util/collectedstatusadapters/adapter.go b/pkg/util/collectedstatusadapters/adapter.go new file mode 100644 index 00000000..aa23408f --- /dev/null +++ b/pkg/util/collectedstatusadapters/adapter.go @@ -0,0 +1,98 @@ +package collectedstatusadapters + +import ( + "context" + "fmt" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + fedcorev1a1 "github.com/kubewharf/kubeadmiral/pkg/apis/core/v1alpha1" + fedcorev1a1client "github.com/kubewharf/kubeadmiral/pkg/client/clientset/versioned/typed/core/v1alpha1" + fedcorev1a1listers "github.com/kubewharf/kubeadmiral/pkg/client/listers/core/v1alpha1" +) + +func ensureNilInterface( + obj fedcorev1a1.GenericCollectedStatusObject, err error, +) (fedcorev1a1.GenericCollectedStatusObject, error) { + if err != nil { + // Returning a non-nil interface value with nil concrete type can be confusing. + // We make sure the returned interface value is nil if there's an error. + return nil, err + } + return obj, nil +} + +func GetFromLister( + collectedStatusLister fedcorev1a1listers.CollectedStatusLister, + clusterCollectedStatusLister fedcorev1a1listers.ClusterCollectedStatusLister, + namespace, name string, +) (fedcorev1a1.GenericCollectedStatusObject, error) { + if namespace == "" { + return ensureNilInterface(clusterCollectedStatusLister.Get(name)) + } else { + return ensureNilInterface(collectedStatusLister.CollectedStatuses(namespace).Get(name)) + } +} + +func Create( + ctx context.Context, + fedv1a1Client fedcorev1a1client.CoreV1alpha1Interface, + obj fedcorev1a1.GenericCollectedStatusObject, + opts metav1.CreateOptions, +) (fedcorev1a1.GenericCollectedStatusObject, error) { + if obj.GetNamespace() == "" { + clusterCollectedStatus, ok := obj.(*fedcorev1a1.ClusterCollectedStatus) + if !ok { + return nil, fmt.Errorf("expected ClusterCollectedStatus but got %T", obj) + } + return ensureNilInterface( + fedv1a1Client.ClusterCollectedStatuses().Create(ctx, clusterCollectedStatus, opts), + ) + } else { + collectedStatus, ok := obj.(*fedcorev1a1.CollectedStatus) + if !ok { + return nil, fmt.Errorf("expected CollectedStatus but got %T", obj) + } + return ensureNilInterface( + fedv1a1Client.CollectedStatuses(obj.GetNamespace()).Create(ctx, collectedStatus, opts), + ) + } +} + +func Update( + ctx context.Context, + fedv1a1Client fedcorev1a1client.CoreV1alpha1Interface, + obj fedcorev1a1.GenericCollectedStatusObject, + opts metav1.UpdateOptions, +) (fedcorev1a1.GenericCollectedStatusObject, error) { + if obj.GetNamespace() == "" { + clusterFedObject, ok := obj.(*fedcorev1a1.ClusterCollectedStatus) + if !ok { + return nil, fmt.Errorf("expected ClusterCollectedStatus but got %T", obj) + } + return ensureNilInterface( + fedv1a1Client.ClusterCollectedStatuses().Update(ctx, clusterFedObject, opts), + ) + } else { + fedObject, ok := obj.(*fedcorev1a1.CollectedStatus) + if !ok { + return nil, fmt.Errorf("expected CollectedStatus but got %T", obj) + } + return ensureNilInterface( + fedv1a1Client.CollectedStatuses(obj.GetNamespace()).Update(ctx, fedObject, opts), + ) + } +} + +func Delete( + ctx context.Context, + fedv1a1Client fedcorev1a1client.CoreV1alpha1Interface, + namespace, name string, + opts metav1.DeleteOptions, +) error { + if namespace == "" { + return fedv1a1Client.ClusterCollectedStatuses().Delete(ctx, name, opts) + } else { + return fedv1a1Client.CollectedStatuses(namespace).Delete(ctx, name, opts) + } +} diff --git a/pkg/util/informermanager/federatedinformermanager.go b/pkg/util/informermanager/federatedinformermanager.go index f5100add..7264b220 100644 --- a/pkg/util/informermanager/federatedinformermanager.go +++ b/pkg/util/informermanager/federatedinformermanager.go @@ -24,7 +24,10 @@ import ( "sync" apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/util/wait" "k8s.io/client-go/dynamic" @@ -35,6 +38,7 @@ import ( fedcorev1a1 "github.com/kubewharf/kubeadmiral/pkg/apis/core/v1alpha1" fedcorev1a1informers "github.com/kubewharf/kubeadmiral/pkg/client/informers/externalversions/core/v1alpha1" fedcorev1a1listers "github.com/kubewharf/kubeadmiral/pkg/client/listers/core/v1alpha1" + "github.com/kubewharf/kubeadmiral/pkg/controllers/common" clusterutil "github.com/kubewharf/kubeadmiral/pkg/util/cluster" "github.com/kubewharf/kubeadmiral/pkg/util/logging" "github.com/kubewharf/kubeadmiral/pkg/util/managedlabel" @@ -273,6 +277,22 @@ func (m *federatedInformerManager) GetClusterClient(cluster string) (client dyna return client, ok } +func (m *federatedInformerManager) GetReadyClusters() ([]*fedcorev1a1.FederatedCluster, error) { + var clusters []*fedcorev1a1.FederatedCluster + + allClusters, err := m.GetFederatedClusterLister().List(labels.Everything()) + if err != nil { + return nil, fmt.Errorf("failed to list clusters: %w", err) + } + for _, cluster := range allClusters { + if clusterutil.IsClusterReady(&cluster.Status) { + clusters = append(clusters, cluster) + } + } + + return clusters, nil +} + func (m *federatedInformerManager) GetFederatedClusterLister() fedcorev1a1listers.FederatedClusterLister { return m.clusterInformer.Lister() } @@ -383,3 +403,53 @@ func DefaultClusterConnectionHash(cluster *fedcorev1a1.FederatedCluster) ([]byte } return b.Bytes(), nil } + +// GetClusterObject is a helper function to get a cluster object. GetClusterObject first attempts to get the object from +// the federated informer manager with the given key. However, if the cache for the cluster is not synced, it will send a GET +// request to the cluster's apiserver to retrieve the object directly. +func GetClusterObject( + ctx context.Context, + ftcManager FederatedTypeConfigManager, + fedInformerManager FederatedInformerManager, + clusterName string, + qualifiedName common.QualifiedName, + gvk schema.GroupVersionKind, +) (*unstructured.Unstructured, bool, error) { + lister, hasSynced, exists := fedInformerManager.GetResourceLister(gvk, clusterName) + if exists && hasSynced() { + clusterObj, err := lister.Get(qualifiedName.String()) + if err != nil { + if apierrors.IsNotFound(err) { + return nil, false, nil + } else { + return nil, false, err + } + } + return clusterObj.(*unstructured.Unstructured), true, nil + } + + client, exists := fedInformerManager.GetClusterClient(clusterName) + if !exists { + return nil, false, fmt.Errorf("cluster client does not exist for cluster %q", clusterName) + } + + ftc, exists := ftcManager.GetResourceFTC(gvk) + if !exists { + return nil, false, fmt.Errorf("FTC does not exist for GVK %q", gvk) + } + + clusterObj, err := client.Resource(ftc.GetSourceTypeGVR()).Namespace(qualifiedName.Namespace).Get( + ctx, qualifiedName.Name, metav1.GetOptions{ResourceVersion: "0"}, + ) + if apierrors.IsNotFound(err) || meta.IsNoMatchError(err) { + return nil, false, nil + } + if err != nil { + return nil, false, fmt.Errorf("failed to get object %q with client: %w", qualifiedName.String(), err) + } + if !managedlabel.HasManagedLabel(clusterObj) { + return nil, false, nil + } + + return clusterObj, true, nil +} diff --git a/pkg/util/informermanager/interface.go b/pkg/util/informermanager/interface.go index aad8ab74..a696f288 100644 --- a/pkg/util/informermanager/interface.go +++ b/pkg/util/informermanager/interface.go @@ -44,6 +44,17 @@ type EventHandlerGenerator struct { // When a FTC deletion is observed, latest will be nil. type FTCUpdateHandler func(lastObserved, latest *fedcorev1a1.FederatedTypeConfig) +type FederatedTypeConfigManager interface { + // Returns the known FTC mapping for the given GVK if it exists. + GetResourceFTC(gvk schema.GroupVersionKind) (ftc *fedcorev1a1.FederatedTypeConfig, exists bool) + + // Returns the FederatedTypeConfig lister used by the manager. + GetFederatedTypeConfigLister() fedcorev1a1listers.FederatedTypeConfigLister + + // Returns true if the manager's view of FederatedTypeConfigs is synced. + HasSynced() bool +} + // InformerManager provides an interface for controllers that need to dynamically register event handlers and access // objects based on FederatedTypeConfigs. InformerManager will listen to FTC events and maintain informers for the // source type of each FTC. @@ -113,6 +124,8 @@ type FederatedInformerManager interface { GetFederatedTypeConfigLister() fedcorev1a1listers.FederatedTypeConfigLister // Returns the FederatedCluster lister used by the FederatedInformerManager. GetFederatedClusterLister() fedcorev1a1listers.FederatedClusterLister + // Returns the joined clusters in ready status listed from the FederatedInformerManager. + GetReadyClusters() ([]*fedcorev1a1.FederatedCluster, error) // Returns true if the FederatedInformerManager's view of FederatedTypeConfigs and FederatedClusters is synced. HasSynced() bool From 59cef5c7846898ca401cfb04d389edf9e512089d Mon Sep 17 00:00:00 2001 From: "lihanbo.0316" Date: Thu, 27 Jul 2023 10:45:59 +0800 Subject: [PATCH 088/173] bugfix: nil pointer panic in status controller --- pkg/controllers/status/controller.go | 3 +++ 1 file changed, 3 insertions(+) diff --git a/pkg/controllers/status/controller.go b/pkg/controllers/status/controller.go index 72d771b0..688b98f1 100644 --- a/pkg/controllers/status/controller.go +++ b/pkg/controllers/status/controller.go @@ -179,6 +179,9 @@ func NewStatusController( if err := s.fedInformerManager.AddEventHandlerGenerator(&informermanager.EventHandlerGenerator{ Predicate: func(lastApplied, latest *fedcorev1a1.FederatedTypeConfig) bool { + if lastApplied == nil || latest == nil { + return true + } return lastApplied.IsStatusCollectionEnabled() != latest.IsStatusCollectionEnabled() }, Generator: func(ftc *fedcorev1a1.FederatedTypeConfig) cache.ResourceEventHandler { From e10bbe8f070aa0a72b4d3202221c5c3a95eda31b Mon Sep 17 00:00:00 2001 From: "hawjia.lim" Date: Mon, 24 Jul 2023 11:22:41 +0800 Subject: [PATCH 089/173] feat(federated-informer-manager): integrate pod informer --- cmd/controller-manager/app/util.go | 11 +- .../federatedinformermanager.go | 184 +++++++++++++---- .../federatedinformermanager_test.go | 191 +++++++++++++++--- pkg/util/informermanager/informermanager.go | 10 +- pkg/util/informermanager/interface.go | 20 +- pkg/util/informermanager/podinformer.go | 139 +++++++++++++ 6 files changed, 464 insertions(+), 91 deletions(-) create mode 100644 pkg/util/informermanager/podinformer.go diff --git a/cmd/controller-manager/app/util.go b/cmd/controller-manager/app/util.go index 41958dfc..a12d8f3e 100644 --- a/cmd/controller-manager/app/util.go +++ b/cmd/controller-manager/app/util.go @@ -27,6 +27,7 @@ import ( "k8s.io/client-go/dynamic/dynamicinformer" "k8s.io/client-go/informers" "k8s.io/client-go/kubernetes" + "k8s.io/client-go/rest" "k8s.io/client-go/tools/clientcmd" "github.com/kubewharf/kubeadmiral/cmd/controller-manager/app/options" @@ -115,19 +116,15 @@ func createControllerContext(opts *options.Options) (*controllercontext.Context, nil, ) federatedInformerManager := informermanager.NewFederatedInformerManager( - informermanager.ClusterClientGetter{ + informermanager.ClusterClientHelper{ ConnectionHash: informermanager.DefaultClusterConnectionHash, - ClientGetter: func(cluster *fedcorev1a1.FederatedCluster) (dynamic.Interface, error) { - restConfig, err := clusterutil.BuildClusterConfig( + RestConfigGetter: func(cluster *fedcorev1a1.FederatedCluster) (*rest.Config, error) { + return clusterutil.BuildClusterConfig( cluster, kubeClientset, restConfig, common.DefaultFedSystemNamespace, ) - if err != nil { - return nil, err - } - return dynamic.NewForConfig(restConfig) }, }, fedInformerFactory.Core().V1alpha1().FederatedTypeConfigs(), diff --git a/pkg/util/informermanager/federatedinformermanager.go b/pkg/util/informermanager/federatedinformermanager.go index 7264b220..aa04def9 100644 --- a/pkg/util/informermanager/federatedinformermanager.go +++ b/pkg/util/informermanager/federatedinformermanager.go @@ -22,15 +22,22 @@ import ( "encoding/gob" "fmt" "sync" + "time" + "golang.org/x/sync/semaphore" apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/util/sets" "k8s.io/apimachinery/pkg/util/wait" "k8s.io/client-go/dynamic" + "k8s.io/client-go/informers" + "k8s.io/client-go/kubernetes" + v1 "k8s.io/client-go/listers/core/v1" + "k8s.io/client-go/rest" "k8s.io/client-go/tools/cache" "k8s.io/client-go/util/workqueue" "k8s.io/klog/v2" @@ -50,40 +57,51 @@ type federatedInformerManager struct { started bool shutdown bool - clientGetter ClusterClientGetter + clientHelper ClusterClientHelper + kubeClientGetter func(*fedcorev1a1.FederatedCluster, *rest.Config) (kubernetes.Interface, error) + dynamicClientGetter func(*fedcorev1a1.FederatedCluster, *rest.Config) (dynamic.Interface, error) + ftcInformer fedcorev1a1informers.FederatedTypeConfigInformer clusterInformer fedcorev1a1informers.FederatedClusterInformer eventHandlerGenerators []*EventHandlerGenerator clusterEventHandlers []*ClusterEventHandler - clients map[string]dynamic.Interface - connectionMap map[string][]byte - informerManagers map[string]InformerManager - informerManagersCancelFuncs map[string]context.CancelFunc + kubeClients map[string]kubernetes.Interface + dynamicClients map[string]dynamic.Interface + connectionMap map[string][]byte + clusterCancelFuncs map[string]context.CancelFunc + informerManagers map[string]InformerManager + informerFactories map[string]informers.SharedInformerFactory - queue workqueue.RateLimitingInterface + queue workqueue.RateLimitingInterface + podListerSemaphore *semaphore.Weighted + initialClusters sets.Set[string] } func NewFederatedInformerManager( - clientGetter ClusterClientGetter, + clientHelper ClusterClientHelper, ftcInformer fedcorev1a1informers.FederatedTypeConfigInformer, clusterInformer fedcorev1a1informers.FederatedClusterInformer, ) FederatedInformerManager { manager := &federatedInformerManager{ - lock: sync.RWMutex{}, - started: false, - shutdown: false, - clientGetter: clientGetter, - ftcInformer: ftcInformer, - clusterInformer: clusterInformer, - eventHandlerGenerators: []*EventHandlerGenerator{}, - clusterEventHandlers: []*ClusterEventHandler{}, - clients: map[string]dynamic.Interface{}, - connectionMap: map[string][]byte{}, - informerManagers: map[string]InformerManager{}, - informerManagersCancelFuncs: map[string]context.CancelFunc{}, - queue: workqueue.NewRateLimitingQueue(workqueue.DefaultItemBasedRateLimiter()), + lock: sync.RWMutex{}, + started: false, + shutdown: false, + clientHelper: clientHelper, + ftcInformer: ftcInformer, + clusterInformer: clusterInformer, + eventHandlerGenerators: []*EventHandlerGenerator{}, + clusterEventHandlers: []*ClusterEventHandler{}, + kubeClients: map[string]kubernetes.Interface{}, + dynamicClients: map[string]dynamic.Interface{}, + connectionMap: map[string][]byte{}, + clusterCancelFuncs: map[string]context.CancelFunc{}, + informerManagers: map[string]InformerManager{}, + informerFactories: map[string]informers.SharedInformerFactory{}, + queue: workqueue.NewRateLimitingQueue(workqueue.DefaultItemBasedRateLimiter()), + podListerSemaphore: semaphore.NewWeighted(3), // TODO: make this configurable + initialClusters: sets.New[string](), } clusterInformer.Informer().AddEventHandler(cache.FilteringResourceEventHandler{ @@ -100,6 +118,13 @@ func NewFederatedInformerManager( ftcInformer.Informer() + manager.dynamicClientGetter = func(_ *fedcorev1a1.FederatedCluster, config *rest.Config) (dynamic.Interface, error) { + return dynamic.NewForConfig(config) + } + manager.kubeClientGetter = func(_ *fedcorev1a1.FederatedCluster, config *rest.Config) (kubernetes.Interface, error) { + return kubernetes.NewForConfig(config) + } + return manager } @@ -145,7 +170,7 @@ func (m *federatedInformerManager) worker(ctx context.Context) { return } - err, needReenqueue := m.processCluster(ctx, cluster) + err, needReenqueue, delay := m.processCluster(ctx, cluster) if err != nil { if needReenqueue { logger.Error(err, "Failed to process FederatedCluster, will retry") @@ -159,22 +184,22 @@ func (m *federatedInformerManager) worker(ctx context.Context) { m.queue.Forget(key) if needReenqueue { - m.queue.Add(key) + m.queue.AddAfter(key, delay) } } func (m *federatedInformerManager) processCluster( ctx context.Context, cluster *fedcorev1a1.FederatedCluster, -) (err error, needReenqueue bool) { +) (err error, needReenqueue bool, delay time.Duration) { m.lock.Lock() defer m.lock.Unlock() clusterName := cluster.Name - connectionHash, err := m.clientGetter.ConnectionHash(cluster) + connectionHash, err := m.clientHelper.ConnectionHash(cluster) if err != nil { - return fmt.Errorf("failed to get connection hash for cluster %s: %w", clusterName, err), true + return fmt.Errorf("failed to get connection hash for cluster %s: %w", clusterName, err), true, 0 } if oldConnectionHash, exists := m.connectionMap[clusterName]; exists { if !bytes.Equal(oldConnectionHash, connectionHash) { @@ -183,16 +208,26 @@ func (m *federatedInformerManager) processCluster( // reenqueue. // Note: updating of cluster connection details, however, is still not a supported use case. err := m.processClusterDeletionUnlocked(ctx, clusterName) - return err, true + return err, true, 0 } } else { - clusterClient, err := m.clientGetter.ClientGetter(cluster) + clusterRestConfig, err := m.clientHelper.RestConfigGetter(cluster) + if err != nil { + return fmt.Errorf("failed to get rest config for cluster %s: %w", clusterName, err), true, 0 + } + + clusterDynamicClient, err := m.dynamicClientGetter(cluster, clusterRestConfig) + if err != nil { + return fmt.Errorf("failed to get dynamic client for cluster %s: %w", clusterName, err), true, 0 + } + + clusterKubeClient, err := m.kubeClientGetter(cluster, clusterRestConfig) if err != nil { - return fmt.Errorf("failed to get client for cluster %s: %w", clusterName, err), true + return fmt.Errorf("failed to get kubernetes client for cluster %s: %w", clusterName, err), true, 0 } manager := NewInformerManager( - clusterClient, + clusterDynamicClient, m.ftcInformer, func(opts *metav1.ListOptions) { selector := &metav1.LabelSelector{} @@ -209,21 +244,39 @@ func (m *federatedInformerManager) processCluster( for _, generator := range m.eventHandlerGenerators { if err := manager.AddEventHandlerGenerator(generator); err != nil { cancel() - return fmt.Errorf("failed to initialized InformerManager for cluster %s: %w", clusterName, err), true + return fmt.Errorf("failed to initialized InformerManager for cluster %s: %w", clusterName, err), true, 0 } } - klog.FromContext(ctx).V(2).Info("Starting new InformerManager for FederatedCluster") + factory := informers.NewSharedInformerFactory(clusterKubeClient, 0) + addPodInformer(ctx, factory, clusterKubeClient, m.podListerSemaphore, true) + factory.Core().V1().Nodes().Informer() + klog.FromContext(ctx).V(2).Info("Starting new InformerManager for FederatedCluster") manager.Start(ctx) + klog.FromContext(ctx).V(2).Info("Starting new SharedInformerFactory for FederatedCluster") + factory.Start(ctx.Done()) + m.connectionMap[clusterName] = connectionHash - m.clients[clusterName] = clusterClient + m.kubeClients[clusterName] = clusterKubeClient + m.dynamicClients[clusterName] = clusterDynamicClient + m.clusterCancelFuncs[clusterName] = cancel m.informerManagers[clusterName] = manager - m.informerManagersCancelFuncs[clusterName] = cancel + m.informerFactories[clusterName] = factory + } + + if m.initialClusters.Has(cluster.Name) { + manager := m.informerManagers[cluster.Name] + if manager != nil && manager.HasSynced() { + m.initialClusters.Delete(cluster.Name) + } else { + klog.FromContext(ctx).V(3).Info("Waiting for InformerManager sync") + return nil, true, 100 * time.Millisecond + } } - return nil, false + return nil, false, 0 } func (m *federatedInformerManager) processClusterDeletion(ctx context.Context, clusterName string) error { @@ -234,14 +287,16 @@ func (m *federatedInformerManager) processClusterDeletion(ctx context.Context, c func (m *federatedInformerManager) processClusterDeletionUnlocked(ctx context.Context, clusterName string) error { delete(m.connectionMap, clusterName) - delete(m.clients, clusterName) + delete(m.dynamicClients, clusterName) - if cancel, ok := m.informerManagersCancelFuncs[clusterName]; ok { + if cancel, ok := m.clusterCancelFuncs[clusterName]; ok { klog.FromContext(ctx).V(2).Info("Stopping InformerManager for FederatedCluster") cancel() } delete(m.informerManagers, clusterName) - delete(m.informerManagersCancelFuncs, clusterName) + delete(m.clusterCancelFuncs, clusterName) + + m.initialClusters.Delete(clusterName) return nil } @@ -270,10 +325,17 @@ func (m *federatedInformerManager) AddEventHandlerGenerator(generator *EventHand return nil } -func (m *federatedInformerManager) GetClusterClient(cluster string) (client dynamic.Interface, exists bool) { +func (m *federatedInformerManager) GetClusterDynamicClient(cluster string) (client dynamic.Interface, exists bool) { m.lock.RLock() defer m.lock.RUnlock() - client, ok := m.clients[cluster] + client, ok := m.dynamicClients[cluster] + return client, ok +} + +func (m *federatedInformerManager) GetClusterKubeClient(cluster string) (client kubernetes.Interface, exists bool) { + m.lock.RLock() + defer m.lock.RUnlock() + client, ok := m.kubeClients[cluster] return client, ok } @@ -301,6 +363,34 @@ func (m *federatedInformerManager) GetFederatedTypeConfigLister() fedcorev1a1lis return m.ftcInformer.Lister() } +func (m *federatedInformerManager) GetNodeLister( + cluster string, +) (lister v1.NodeLister, informerSynced cache.InformerSynced, exists bool) { + m.lock.RLock() + defer m.lock.RUnlock() + + factory, ok := m.informerFactories[cluster] + if !ok { + return nil, nil, false + } + + return factory.Core().V1().Nodes().Lister(), factory.Core().V1().Nodes().Informer().HasSynced, true +} + +func (m *federatedInformerManager) GetPodLister( + cluster string, +) (lister v1.PodLister, informerSynced cache.InformerSynced, exists bool) { + m.lock.RLock() + defer m.lock.RUnlock() + + factory, ok := m.informerFactories[cluster] + if !ok { + return nil, nil, false + } + + return factory.Core().V1().Pods().Lister(), factory.Core().V1().Pods().Informer().HasSynced, true +} + func (m *federatedInformerManager) GetResourceLister( gvk schema.GroupVersionKind, cluster string, @@ -317,7 +407,10 @@ func (m *federatedInformerManager) GetResourceLister( } func (m *federatedInformerManager) HasSynced() bool { - return m.ftcInformer.Informer().HasSynced() && m.clusterInformer.Informer().HasSynced() + m.lock.RLock() + defer m.lock.RUnlock() + return m.ftcInformer.Informer().HasSynced() && m.clusterInformer.Informer().HasSynced() && + len(m.initialClusters) == 0 } func (m *federatedInformerManager) Start(ctx context.Context) { @@ -333,11 +426,18 @@ func (m *federatedInformerManager) Start(ctx context.Context) { m.started = true - if !cache.WaitForCacheSync(ctx.Done(), m.HasSynced) { + if !cache.WaitForCacheSync(ctx.Done(), m.ftcInformer.Informer().HasSynced, m.clusterInformer.Informer().HasSynced) { logger.Error(nil, "Failed to wait for FederatedInformerManager cache sync") return } + // Populate the initial snapshot of clusters + + clusters := m.clusterInformer.Informer().GetStore().List() + for _, cluster := range clusters { + m.initialClusters.Insert(cluster.(*fedcorev1a1.FederatedCluster).GetName()) + } + for _, handler := range m.clusterEventHandlers { predicate := handler.Predicate callback := handler.Callback @@ -428,7 +528,7 @@ func GetClusterObject( return clusterObj.(*unstructured.Unstructured), true, nil } - client, exists := fedInformerManager.GetClusterClient(clusterName) + client, exists := fedInformerManager.GetClusterDynamicClient(clusterName) if !exists { return nil, false, fmt.Errorf("cluster client does not exist for cluster %q", clusterName) } diff --git a/pkg/util/informermanager/federatedinformermanager_test.go b/pkg/util/informermanager/federatedinformermanager_test.go index dc13a1a5..bbfd3acf 100644 --- a/pkg/util/informermanager/federatedinformermanager_test.go +++ b/pkg/util/informermanager/federatedinformermanager_test.go @@ -31,13 +31,16 @@ import ( "k8s.io/apimachinery/pkg/util/wait" dynamicclient "k8s.io/client-go/dynamic" dynamicfake "k8s.io/client-go/dynamic/fake" + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/kubernetes/fake" + "k8s.io/client-go/rest" "k8s.io/client-go/tools/cache" "k8s.io/klog/v2" "k8s.io/klog/v2/ktesting" fedcorev1a1 "github.com/kubewharf/kubeadmiral/pkg/apis/core/v1alpha1" fedclient "github.com/kubewharf/kubeadmiral/pkg/client/clientset/versioned" - "github.com/kubewharf/kubeadmiral/pkg/client/clientset/versioned/fake" + fedfake "github.com/kubewharf/kubeadmiral/pkg/client/clientset/versioned/fake" fedinformers "github.com/kubewharf/kubeadmiral/pkg/client/informers/externalversions" "github.com/kubewharf/kubeadmiral/pkg/controllers/common" ) @@ -78,11 +81,17 @@ func TestFederatedInformerManager(t *testing.T) { _ = wait.PollInfinite(time.Millisecond, func() (done bool, err error) { return manager.IsShutdown(), nil }) }() + ctxWithTimeout, timeoutCancel := context.WithTimeout(ctx, time.Second) + defer timeoutCancel() + if !cache.WaitForCacheSync(ctxWithTimeout.Done(), manager.HasSynced) { + g.Fail("Timed out waiting for FederatedInformerManager cache sync") + } + // 2. Verify that the clients for each cluster is eventually available for _, cluster := range defaultClusters { g.Eventually(func(g gomega.Gomega) { - client, exists := manager.GetClusterClient(cluster.Name) + client, exists := manager.GetClusterDynamicClient(cluster.Name) g.Expect(exists).To(gomega.BeTrue()) g.Expect(client).ToNot(gomega.BeNil()) }).WithTimeout(time.Second * 2).Should(gomega.Succeed()) @@ -90,7 +99,7 @@ func TestFederatedInformerManager(t *testing.T) { // 3. Verify that the client for a non-existent cluster is not available - client, exists := manager.GetClusterClient("cluster-4") + client, exists := manager.GetClusterDynamicClient("cluster-4") g.Expect(exists).To(gomega.BeFalse()) g.Expect(client).To(gomega.BeNil()) }) @@ -122,10 +131,16 @@ func TestFederatedInformerManager(t *testing.T) { _ = wait.PollInfinite(time.Millisecond, func() (done bool, err error) { return manager.IsShutdown(), nil }) }() + ctxWithTimeout, timeoutCancel := context.WithTimeout(ctx, time.Second) + defer timeoutCancel() + if !cache.WaitForCacheSync(ctxWithTimeout.Done(), manager.HasSynced) { + g.Fail("Timed out waiting for FederatedInformerManager cache sync") + } + // 2. Verify that client for cluster-1 does is not available initially. g.Consistently(func(g gomega.Gomega) { - client, exists := manager.GetClusterClient("cluster-1") + client, exists := manager.GetClusterDynamicClient("cluster-1") g.Expect(exists).To(gomega.BeFalse()) g.Expect(client).To(gomega.BeNil()) }).WithTimeout(time.Second * 2).Should(gomega.Succeed()) @@ -142,7 +157,7 @@ func TestFederatedInformerManager(t *testing.T) { // 4. Verify that client for new cluster is eventually available g.Eventually(func(g gomega.Gomega) { - client, exists := manager.GetClusterClient(cluster.Name) + client, exists := manager.GetClusterDynamicClient(cluster.Name) g.Expect(exists).To(gomega.BeTrue()) g.Expect(client).ToNot(gomega.BeNil()) }).WithTimeout(time.Second * 2).Should(gomega.Succeed()) @@ -179,6 +194,12 @@ func TestFederatedInformerManager(t *testing.T) { _ = wait.PollInfinite(time.Millisecond, func() (done bool, err error) { return manager.IsShutdown(), nil }) }() + ctxWithTimeout, timeoutCancel := context.WithTimeout(ctx, time.Second) + defer timeoutCancel() + if !cache.WaitForCacheSync(ctxWithTimeout.Done(), manager.HasSynced) { + g.Fail("Timed out waiting for FederatedInformerManager cache sync") + } + // 2. Verify that listers for existing FTCs and clusters are eventually available for _, ftc := range defaultFTCs { @@ -195,6 +216,24 @@ func TestFederatedInformerManager(t *testing.T) { } } + for _, cluster := range defaultClusters { + g.Eventually(func(g gomega.Gomega) { + lister, informerSynced, exists := manager.GetPodLister(cluster.Name) + + g.Expect(exists).To(gomega.BeTrue()) + g.Expect(lister).ToNot(gomega.BeNil()) + g.Expect(informerSynced()).To(gomega.BeTrue()) + }).WithTimeout(time.Second * 2).Should(gomega.Succeed()) + + g.Eventually(func(g gomega.Gomega) { + lister, informerSynced, exists := manager.GetNodeLister(cluster.Name) + + g.Expect(exists).To(gomega.BeTrue()) + g.Expect(lister).ToNot(gomega.BeNil()) + g.Expect(informerSynced()).To(gomega.BeTrue()) + }).WithTimeout(time.Second * 2).Should(gomega.Succeed()) + } + // 3. Verify that the lister for non-existent FTCs or clusters are not available lister, informerSynced, exists := manager.GetResourceLister(daemonsetGVK, "cluster-1") @@ -244,6 +283,12 @@ func TestFederatedInformerManager(t *testing.T) { _ = wait.PollInfinite(time.Millisecond, func() (done bool, err error) { return manager.IsShutdown(), nil }) }() + ctxWithTimeout, timeoutCancel := context.WithTimeout(ctx, time.Second) + defer timeoutCancel() + if !cache.WaitForCacheSync(ctxWithTimeout.Done(), manager.HasSynced) { + g.Fail("Timed out waiting for FederatedInformerManager cache sync") + } + ftc := daemonsetFTC gvk := ftc.GetSourceTypeGVK() @@ -302,6 +347,12 @@ func TestFederatedInformerManager(t *testing.T) { _ = wait.PollInfinite(time.Millisecond, func() (done bool, err error) { return manager.IsShutdown(), nil }) }() + ctxWithTimeout, timeoutCancel := context.WithTimeout(ctx, time.Second) + defer timeoutCancel() + if !cache.WaitForCacheSync(ctxWithTimeout.Done(), manager.HasSynced) { + g.Fail("Timed out waiting for FederatedInformerManager cache sync") + } + cluster := getTestCluster("cluster-1") // 2. Verify that listers for cluster-1 is not available at the start @@ -315,6 +366,16 @@ func TestFederatedInformerManager(t *testing.T) { g.Expect(lister).To(gomega.BeNil()) g.Expect(informerSynced).To(gomega.BeNil()) } + + podLister, informerSynced, exists := manager.GetPodLister(cluster.Name) + g.Expect(exists).To(gomega.BeFalse()) + g.Expect(podLister).To(gomega.BeNil()) + g.Expect(informerSynced).To(gomega.BeNil()) + + nodeLister, informerSynced, exists := manager.GetNodeLister(cluster.Name) + g.Expect(exists).To(gomega.BeFalse()) + g.Expect(nodeLister).To(gomega.BeNil()) + g.Expect(informerSynced).To(gomega.BeNil()) }).WithTimeout(time.Second * 2).Should(gomega.Succeed()) // 3. Create cluster-1 @@ -333,6 +394,16 @@ func TestFederatedInformerManager(t *testing.T) { g.Expect(lister).ToNot(gomega.BeNil()) g.Expect(informerSynced()).To(gomega.BeTrue()) } + + podLister, informerSynced, exists := manager.GetPodLister(cluster.Name) + g.Expect(exists).To(gomega.BeTrue()) + g.Expect(podLister).ToNot(gomega.BeNil()) + g.Expect(informerSynced()).To(gomega.BeTrue()) + + nodeLister, informerSynced, exists := manager.GetNodeLister(cluster.Name) + g.Expect(exists).To(gomega.BeTrue()) + g.Expect(nodeLister).ToNot(gomega.BeNil()) + g.Expect(informerSynced()).To(gomega.BeTrue()) }).WithTimeout(time.Second * 2).Should(gomega.Succeed()) }) @@ -387,6 +458,12 @@ func TestFederatedInformerManager(t *testing.T) { _ = wait.PollInfinite(time.Millisecond, func() (done bool, err error) { return manager.IsShutdown(), nil }) }() + ctxWithTimeout, timeoutCancel := context.WithTimeout(ctx, time.Second) + defer timeoutCancel() + if !cache.WaitForCacheSync(ctxWithTimeout.Done(), manager.HasSynced) { + g.Fail("Timed out waiting for FederatedInformerManager cache sync") + } + // 2. Verify alwaysRegistered is eventually registered for all existing FTCs and clusters. for _, cluster := range defaultClusters { @@ -406,7 +483,7 @@ func TestFederatedInformerManager(t *testing.T) { dp1.SetAnnotations(map[string]string{"test": "test"}) for _, cluster := range defaultClusters { - dynamicClient, exists := manager.GetClusterClient(cluster.Name) + dynamicClient, exists := manager.GetClusterDynamicClient(cluster.Name) g.Expect(exists).To(gomega.BeTrue()) generateEvents( @@ -439,7 +516,7 @@ func TestFederatedInformerManager(t *testing.T) { // 5. Verify that events for non-existent FTCs are not received for _, cluster := range defaultClusters { - dynamicClient, exists := manager.GetClusterClient(cluster.Name) + dynamicClient, exists := manager.GetClusterDynamicClient(cluster.Name) g.Expect(exists).To(gomega.BeTrue()) generateEvents( @@ -504,12 +581,17 @@ func TestFederatedInformerManager(t *testing.T) { generators, clusterHandlers, ) - defer func() { cancel() _ = wait.PollInfinite(time.Millisecond, func() (done bool, err error) { return manager.IsShutdown(), nil }) }() + ctxWithTimeout, timeoutCancel := context.WithTimeout(ctx, time.Second) + defer timeoutCancel() + if !cache.WaitForCacheSync(ctxWithTimeout.Done(), manager.HasSynced) { + g.Fail("Timed out waiting for FederatedInformerManager cache sync") + } + // 2. Verify that alwaysRegistered is not registered initially for daemonset alwaysRegistered.AssertConsistently(g, time.Second*2) @@ -531,7 +613,7 @@ func TestFederatedInformerManager(t *testing.T) { // 5. Verify that newly generated events are also received by alwaysRegistered for _, cluster := range defaultClusters { - dynamicClient, exists := manager.GetClusterClient(cluster.Name) + dynamicClient, exists := manager.GetClusterDynamicClient(cluster.Name) g.Expect(exists).To(gomega.BeTrue()) generateEvents( @@ -548,7 +630,7 @@ func TestFederatedInformerManager(t *testing.T) { // 6. Verify that events for non-existent FTCs are not received by alwaysRegistered for _, cluster := range defaultClusters { - dynamicClient, exists := manager.GetClusterClient(cluster.Name) + dynamicClient, exists := manager.GetClusterDynamicClient(cluster.Name) g.Expect(exists).To(gomega.BeTrue()) generateEvents( @@ -614,6 +696,12 @@ func TestFederatedInformerManager(t *testing.T) { _ = wait.PollInfinite(time.Millisecond, func() (done bool, err error) { return manager.IsShutdown(), nil }) }() + ctxWithTimeout, timeoutCancel := context.WithTimeout(ctx, time.Second) + defer timeoutCancel() + if !cache.WaitForCacheSync(ctxWithTimeout.Done(), manager.HasSynced) { + g.Fail("Timed out waiting for FederatedInformerManager cache sync") + } + // 2. Verify that alwaysRegistered is not registered initially since there are no clusters alwaysRegistered.AssertConsistently(g, time.Second*2) @@ -653,7 +741,7 @@ func TestFederatedInformerManager(t *testing.T) { // 5. Verify that newly generated events are also received by alwaysRegistered for _, cluster := range defaultClusters { - dynamicClient, exists := manager.GetClusterClient(cluster.Name) + dynamicClient, exists := manager.GetClusterDynamicClient(cluster.Name) g.Expect(exists).To(gomega.BeTrue()) generateEvents( @@ -670,7 +758,7 @@ func TestFederatedInformerManager(t *testing.T) { // 6. Verify that events for non-existent FTCs are not received by alwaysRegistered for _, cluster := range defaultClusters { - dynamicClient, exists := manager.GetClusterClient(cluster.Name) + dynamicClient, exists := manager.GetClusterDynamicClient(cluster.Name) g.Expect(exists).To(gomega.BeTrue()) generateEvents( @@ -812,6 +900,12 @@ func TestFederatedInformerManager(t *testing.T) { _ = wait.PollInfinite(time.Millisecond, func() (done bool, err error) { return manager.IsShutdown(), nil }) }() + ctxWithTimeout, timeoutCancel := context.WithTimeout(ctx, time.Second) + defer timeoutCancel() + if !cache.WaitForCacheSync(ctxWithTimeout.Done(), manager.HasSynced) { + g.Fail("Timed out waiting for FederatedInformerManager cache sync") + } + // 2. Verify that handler is not registered initially. handler.AssertConsistently(g, time.Second*2) @@ -830,7 +924,7 @@ func TestFederatedInformerManager(t *testing.T) { handler.AssertEventually(g, time.Second*2) for _, cluster := range defaultClusters { - dynamicClient, exists := manager.GetClusterClient(cluster.Name) + dynamicClient, exists := manager.GetClusterDynamicClient(cluster.Name) g.Expect(exists).To(gomega.BeTrue()) generateEvents( @@ -888,6 +982,12 @@ func TestFederatedInformerManager(t *testing.T) { _ = wait.PollInfinite(time.Millisecond, func() (done bool, err error) { return manager.IsShutdown(), nil }) }() + ctxWithTimeout, timeoutCancel := context.WithTimeout(ctx, time.Second) + defer timeoutCancel() + if !cache.WaitForCacheSync(ctxWithTimeout.Done(), manager.HasSynced) { + g.Fail("Timed out waiting for FederatedInformerManager cache sync") + } + // 2. Verify that handler is registered initially. handler.ExpectGenerateEvents(ftc.Name, len(defaultClusters)) @@ -905,7 +1005,7 @@ func TestFederatedInformerManager(t *testing.T) { // 4. Verify that handler is unregistered and new events are no longer received by handler. for _, cluster := range defaultClusters { - dynamicClient, exists := manager.GetClusterClient(cluster.Name) + dynamicClient, exists := manager.GetClusterDynamicClient(cluster.Name) g.Expect(exists).To(gomega.BeTrue()) generateEvents( @@ -963,6 +1063,12 @@ func TestFederatedInformerManager(t *testing.T) { _ = wait.PollInfinite(time.Millisecond, func() (done bool, err error) { return manager.IsShutdown(), nil }) }() + ctxWithTimeout, timeoutCancel := context.WithTimeout(ctx, time.Second) + defer timeoutCancel() + if !cache.WaitForCacheSync(ctxWithTimeout.Done(), manager.HasSynced) { + g.Fail("Timed out waiting for FederatedInformerManager cache sync") + } + // 2. Verify that handler is registered initially handler.ExpectGenerateEvents(ftc.Name, len(defaultClusters)) @@ -982,7 +1088,7 @@ func TestFederatedInformerManager(t *testing.T) { dp1.SetAnnotations(map[string]string{"test": "test"}) for _, cluster := range defaultClusters { - dynamicClient, exists := manager.GetClusterClient(cluster.Name) + dynamicClient, exists := manager.GetClusterDynamicClient(cluster.Name) g.Expect(exists).To(gomega.BeTrue()) generateEvents( @@ -1041,6 +1147,12 @@ func TestFederatedInformerManager(t *testing.T) { _ = wait.PollInfinite(time.Millisecond, func() (done bool, err error) { return manager.IsShutdown(), nil }) }() + ctxWithTimeout, timeoutCancel := context.WithTimeout(ctx, time.Second) + defer timeoutCancel() + if !cache.WaitForCacheSync(ctxWithTimeout.Done(), manager.HasSynced) { + g.Fail("Timed out waiting for FederatedInformerManager cache sync") + } + // 2. Verify that handler is registered initially handler.ExpectGenerateEvents(ftc.Name, len(defaultClusters)) @@ -1060,7 +1172,7 @@ func TestFederatedInformerManager(t *testing.T) { dp1.SetAnnotations(map[string]string{"test": "test"}) for _, cluster := range defaultClusters { - dynamicClient, exists := manager.GetClusterClient(cluster.Name) + dynamicClient, exists := manager.GetClusterDynamicClient(cluster.Name) g.Expect(exists).To(gomega.BeTrue()) generateEvents( @@ -1125,6 +1237,12 @@ func TestFederatedInformerManager(t *testing.T) { _ = wait.PollInfinite(time.Millisecond, func() (done bool, err error) { return manager.IsShutdown(), nil }) }() + ctxWithTimeout, timeoutCancel := context.WithTimeout(ctx, time.Second) + defer timeoutCancel() + if !cache.WaitForCacheSync(ctxWithTimeout.Done(), manager.HasSynced) { + g.Fail("Timed out waiting for FederatedInformerManager cache sync") + } + // 2. Verify that handler1 and handler2 is registered initially for all FTCs and clusters for _, cluster := range defaultClusters { @@ -1157,7 +1275,7 @@ func TestFederatedInformerManager(t *testing.T) { // 4. Verify that handler1 and handler2 is unregistered for deployments and no additional events are received for _, cluster := range defaultClusters { - dynamicClient, exists := manager.GetClusterClient(cluster.Name) + dynamicClient, exists := manager.GetClusterDynamicClient(cluster.Name) g.Expect(exists).To(gomega.BeTrue()) generateEvents( @@ -1174,7 +1292,7 @@ func TestFederatedInformerManager(t *testing.T) { // 5. Verify that handler1 and handler2 is not unregistered for other FTCs. for _, cluster := range defaultClusters { - dynamicClient, exists := manager.GetClusterClient(cluster.Name) + dynamicClient, exists := manager.GetClusterDynamicClient(cluster.Name) g.Expect(exists).To(gomega.BeTrue()) generateEvents( @@ -1250,6 +1368,12 @@ func TestFederatedInformerManager(t *testing.T) { _ = wait.PollInfinite(time.Millisecond, func() (done bool, err error) { return manager.IsShutdown(), nil }) }() + ctxWithTimeout, timeoutCancel := context.WithTimeout(ctx, time.Second) + defer timeoutCancel() + if !cache.WaitForCacheSync(ctxWithTimeout.Done(), manager.HasSynced) { + g.Fail("Timed out waiting for FederatedInformerManager cache sync") + } + // 2. Verify that handler1 and handler2 is registered initially for all FTCs and clusters for _, cluster := range defaultClusters { @@ -1271,7 +1395,7 @@ func TestFederatedInformerManager(t *testing.T) { // 3. Delete cluster-1 // Get client before deletion - dynamicClient, exists := manager.GetClusterClient("cluster-1") + dynamicClient, exists := manager.GetClusterDynamicClient("cluster-1") err := fedClient.CoreV1alpha1().FederatedClusters().Delete(ctx, "cluster-1", metav1.DeleteOptions{}) g.Expect(err).ToNot(gomega.HaveOccurred()) @@ -1295,7 +1419,7 @@ func TestFederatedInformerManager(t *testing.T) { // 5. Verify that handler1 and handler2 is not unregistered for other clusters. for _, cluster := range []string{"cluster-2", "cluster-3"} { - dynamicClient, exists := manager.GetClusterClient(cluster) + dynamicClient, exists := manager.GetClusterDynamicClient(cluster) g.Expect(exists).To(gomega.BeTrue()) generateEvents( @@ -1437,7 +1561,7 @@ func bootstrapFederatedInformerManagerWithFakeClients( for _, ftc := range ftcs { fedObjects = append(fedObjects, runtime.Object(ftc.DeepCopy())) } - fedClient := fake.NewSimpleClientset(fedObjects...) + fedClient := fedfake.NewSimpleClientset(fedObjects...) factory := fedinformers.NewSharedInformerFactory(fedClient, 0) @@ -1450,15 +1574,27 @@ func bootstrapFederatedInformerManagerWithFakeClients( } informerManager := NewFederatedInformerManager( - ClusterClientGetter{ + ClusterClientHelper{ ConnectionHash: DefaultClusterConnectionHash, - ClientGetter: func(cluster *fedcorev1a1.FederatedCluster) (dynamicclient.Interface, error) { - return dynamicfake.NewSimpleDynamicClient(scheme, dynamicObjects[cluster.Name]...), nil + RestConfigGetter: func(cluster *fedcorev1a1.FederatedCluster) (*rest.Config, error) { + return nil, nil }, }, factory.Core().V1alpha1().FederatedTypeConfigs(), factory.Core().V1alpha1().FederatedClusters(), ) + informerManager.(*federatedInformerManager).dynamicClientGetter = func( + cluster *fedcorev1a1.FederatedCluster, + config *rest.Config, + ) (dynamicclient.Interface, error) { + return dynamicfake.NewSimpleDynamicClient(scheme, dynamicObjects[cluster.Name]...), nil + } + informerManager.(*federatedInformerManager).kubeClientGetter = func( + cluster *fedcorev1a1.FederatedCluster, + config *rest.Config, + ) (kubernetes.Interface, error) { + return fake.NewSimpleClientset(), nil + } for _, generator := range eventHandlerGenerators { err := informerManager.AddEventHandlerGenerator(generator) @@ -1472,12 +1608,5 @@ func bootstrapFederatedInformerManagerWithFakeClients( factory.Start(ctx.Done()) informerManager.Start(ctx) - ctxWithTimeout, cancel := context.WithTimeout(ctx, time.Second) - defer cancel() - - if !cache.WaitForCacheSync(ctxWithTimeout.Done(), informerManager.HasSynced) { - g.Fail("Timed out waiting for FederatedInformerManager cache sync") - } - return informerManager, fedClient } diff --git a/pkg/util/informermanager/informermanager.go b/pkg/util/informermanager/informermanager.go index 42e3c7b2..8124a5e6 100644 --- a/pkg/util/informermanager/informermanager.go +++ b/pkg/util/informermanager/informermanager.go @@ -54,8 +54,7 @@ type informerManager struct { eventHandlerGenerators []*EventHandlerGenerator ftcUpdateHandlers []FTCUpdateHandler - initialFTCs sets.Set[string] - gvkMapping *bijection.Bijection[string, schema.GroupVersionKind] + gvkMapping *bijection.Bijection[string, schema.GroupVersionKind] lastObservedFTCs map[string]*fedcorev1a1.FederatedTypeConfig informers map[string]informers.GenericInformer @@ -63,7 +62,8 @@ type informerManager struct { eventHandlerRegistrations map[string]map[*EventHandlerGenerator]cache.ResourceEventHandlerRegistration lastAppliedFTCsCache map[string]map[*EventHandlerGenerator]*fedcorev1a1.FederatedTypeConfig - queue workqueue.RateLimitingInterface + queue workqueue.RateLimitingInterface + initialFTCs sets.Set[string] } func NewInformerManager( @@ -80,7 +80,6 @@ func NewInformerManager( ftcInformer: ftcInformer, eventHandlerGenerators: []*EventHandlerGenerator{}, ftcUpdateHandlers: []FTCUpdateHandler{}, - initialFTCs: sets.New[string](), gvkMapping: bijection.NewBijection[string, schema.GroupVersionKind](), lastObservedFTCs: map[string]*fedcorev1a1.FederatedTypeConfig{}, informers: map[string]informers.GenericInformer{}, @@ -88,6 +87,7 @@ func NewInformerManager( eventHandlerRegistrations: map[string]map[*EventHandlerGenerator]cache.ResourceEventHandlerRegistration{}, lastAppliedFTCsCache: map[string]map[*EventHandlerGenerator]*fedcorev1a1.FederatedTypeConfig{}, queue: workqueue.NewRateLimitingQueue(workqueue.DefaultItemBasedRateLimiter()), + initialFTCs: sets.New[string](), } ftcInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{ @@ -383,7 +383,7 @@ func (m *informerManager) Start(ctx context.Context) { return } - // Populate the intial snapshot of FTCs + // Populate the initial snapshot of FTCs ftcs := m.ftcInformer.Informer().GetStore().List() for _, ftc := range ftcs { diff --git a/pkg/util/informermanager/interface.go b/pkg/util/informermanager/interface.go index a696f288..5ea8b2a6 100644 --- a/pkg/util/informermanager/interface.go +++ b/pkg/util/informermanager/interface.go @@ -21,6 +21,9 @@ import ( "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/client-go/dynamic" + "k8s.io/client-go/kubernetes" + corev1listers "k8s.io/client-go/listers/core/v1" + "k8s.io/client-go/rest" "k8s.io/client-go/tools/cache" fedcorev1a1 "github.com/kubewharf/kubeadmiral/pkg/apis/core/v1alpha1" @@ -117,8 +120,13 @@ type FederatedInformerManager interface { gvk schema.GroupVersionKind, cluster string, ) (lister cache.GenericLister, informerSynced cache.InformerSynced, exists bool) - // Returns a client for the given cluster if it exists. The client for each cluster will eventually exist. - GetClusterClient(cluster string) (client dynamic.Interface, exists bool) + // Returns a dynamic client for the given cluster if it exists. The client for each cluster will eventually exist. + GetClusterDynamicClient(cluster string) (client dynamic.Interface, exists bool) + // Returns a kubernetes client for the given cluster if it exists. The client for each cluster will eventually exist. + GetClusterKubeClient(cluster string) (client kubernetes.Interface, exists bool) + + GetPodLister(cluster string) (lister corev1listers.PodLister, informerSynced cache.InformerSynced, exists bool) + GetNodeLister(cluster string) (lister corev1listers.NodeLister, informerSynced cache.InformerSynced, exists bool) // Returns the FederatedTypeConfig lister used by the FederatedInformerManager. GetFederatedTypeConfigLister() fedcorev1a1listers.FederatedTypeConfigLister @@ -140,12 +148,12 @@ type FederatedInformerManager interface { IsShutdown() bool } -// ClusterClientGetter is used by the FederatedInformerManager to create clients for joined member clusters. -type ClusterClientGetter struct { +// ClusterClientHelper is used by the FederatedInformerManager to create clients for joined member clusters. +type ClusterClientHelper struct { // ConnectionHash should return a string that uniquely identifies the combination of parameters used to generate the // cluster client. A change in the connection hash indicates a need to create a new client for a given member // cluster. ConnectionHash func(cluster *fedcorev1a1.FederatedCluster) ([]byte, error) - // ClientGetter returns a dynamic client for the given member cluster. - ClientGetter func(cluster *fedcorev1a1.FederatedCluster) (dynamic.Interface, error) + // RestConfigGetter returns a *rest.Config for the given member cluster. + RestConfigGetter func(cluster *fedcorev1a1.FederatedCluster) (*rest.Config, error) } diff --git a/pkg/util/informermanager/podinformer.go b/pkg/util/informermanager/podinformer.go new file mode 100644 index 00000000..d5f6f539 --- /dev/null +++ b/pkg/util/informermanager/podinformer.go @@ -0,0 +1,139 @@ +/* +Copyright 2023 The KubeAdmiral Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package informermanager + +import ( + "context" + "time" + + "golang.org/x/sync/semaphore" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/watch" + "k8s.io/client-go/informers" + kubeclient "k8s.io/client-go/kubernetes" + "k8s.io/client-go/tools/cache" +) + +func addPodInformer(ctx context.Context, + informer informers.SharedInformerFactory, + client kubeclient.Interface, + podListerSemaphore *semaphore.Weighted, + enablePodPruning bool, +) { + informer.InformerFor(&corev1.Pod{}, func(k kubeclient.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + podListerWatcher(ctx, client, podListerSemaphore, enablePodPruning), + &corev1.Pod{}, + resyncPeriod, + cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, + ) + }) + +} + +func podListerWatcher( + ctx context.Context, + client kubeclient.Interface, + semaphore *semaphore.Weighted, + enablePodPruning bool, +) cache.ListerWatcher { + return &cache.ListWatch{ + ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { + if semaphore != nil { + if err := semaphore.Acquire(ctx, 1); err != nil { + return nil, err + } + defer semaphore.Release(1) + } + pods, err := client.CoreV1().Pods(corev1.NamespaceAll).List(ctx, options) + if err != nil { + return nil, err + } + if enablePodPruning { + for i := range pods.Items { + prunePod(&pods.Items[i]) + } + } + return pods, nil + }, + WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { + watcher, err := client.CoreV1().Pods(corev1.NamespaceAll).Watch(ctx, options) + if err != nil { + return nil, err + } + if !enablePodPruning { + return watcher, nil + } + + // It's easy for a consumer to add buffering via an extra + // goroutine/channel, but impossible for them to remove it, + // so nonbuffered is better. -- from watch.NewStreamWatcher + proxyCh := make(chan watch.Event) + proxyWatcher := watch.NewProxyWatcher(proxyCh) + go func() { + defer watcher.Stop() + // Closing proxyCh will notify the reflector to stop the current + // watching cycle and then restart the list and watch. + defer close(proxyCh) + for { + select { + case <-proxyWatcher.StopChan(): + return + case event, ok := <-watcher.ResultChan(): + if !ok { + // the watcher has been closed, stop the proxy + return + } + if pod, ok := event.Object.(*corev1.Pod); ok { + prunePod(pod) + } + proxyCh <- event + } + } + }() + return proxyWatcher, nil + }, + } +} + +func prunePod(pod *corev1.Pod) { + containers := make([]corev1.Container, len(pod.Spec.Containers)) + initContainers := make([]corev1.Container, len(pod.Spec.InitContainers)) + for i := range pod.Spec.Containers { + containers[i] = corev1.Container{Resources: pod.Spec.Containers[i].Resources} + } + for i := range pod.Spec.InitContainers { + initContainers[i] = corev1.Container{Resources: pod.Spec.InitContainers[i].Resources} + } + *pod = corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: pod.Name, + Namespace: pod.Namespace, + Generation: pod.Generation, + ResourceVersion: pod.ResourceVersion, + UID: pod.UID, + }, + Spec: corev1.PodSpec{ + NodeName: pod.Spec.NodeName, + Overhead: pod.Spec.Overhead, + Containers: containers, + InitContainers: initContainers, + }, + } +} From 513a40d79bc558b60dc29a8a7b6976a5197f7f4f Mon Sep 17 00:00:00 2001 From: "hawjia.lim" Date: Tue, 25 Jul 2023 13:09:40 +0800 Subject: [PATCH 090/173] fix(federated-informer-manager): fix cluster deletion --- .../federatedinformermanager.go | 24 ++++++++++--------- pkg/util/informermanager/informermanager.go | 16 ++++++------- pkg/util/informermanager/podinformer.go | 20 +++++++++------- 3 files changed, 32 insertions(+), 28 deletions(-) diff --git a/pkg/util/informermanager/federatedinformermanager.go b/pkg/util/informermanager/federatedinformermanager.go index aa04def9..0f433d23 100644 --- a/pkg/util/informermanager/federatedinformermanager.go +++ b/pkg/util/informermanager/federatedinformermanager.go @@ -170,7 +170,7 @@ func (m *federatedInformerManager) worker(ctx context.Context) { return } - err, needReenqueue, delay := m.processCluster(ctx, cluster) + needReenqueue, delay, err := m.processCluster(ctx, cluster) if err != nil { if needReenqueue { logger.Error(err, "Failed to process FederatedCluster, will retry") @@ -191,7 +191,7 @@ func (m *federatedInformerManager) worker(ctx context.Context) { func (m *federatedInformerManager) processCluster( ctx context.Context, cluster *fedcorev1a1.FederatedCluster, -) (err error, needReenqueue bool, delay time.Duration) { +) (needReenqueue bool, reenqueueDelay time.Duration, err error) { m.lock.Lock() defer m.lock.Unlock() @@ -199,7 +199,7 @@ func (m *federatedInformerManager) processCluster( connectionHash, err := m.clientHelper.ConnectionHash(cluster) if err != nil { - return fmt.Errorf("failed to get connection hash for cluster %s: %w", clusterName, err), true, 0 + return true, 0, fmt.Errorf("failed to get connection hash for cluster %s: %w", clusterName, err) } if oldConnectionHash, exists := m.connectionMap[clusterName]; exists { if !bytes.Equal(oldConnectionHash, connectionHash) { @@ -208,22 +208,22 @@ func (m *federatedInformerManager) processCluster( // reenqueue. // Note: updating of cluster connection details, however, is still not a supported use case. err := m.processClusterDeletionUnlocked(ctx, clusterName) - return err, true, 0 + return true, 0, err } } else { clusterRestConfig, err := m.clientHelper.RestConfigGetter(cluster) if err != nil { - return fmt.Errorf("failed to get rest config for cluster %s: %w", clusterName, err), true, 0 + return true, 0, fmt.Errorf("failed to get rest config for cluster %s: %w", clusterName, err) } clusterDynamicClient, err := m.dynamicClientGetter(cluster, clusterRestConfig) if err != nil { - return fmt.Errorf("failed to get dynamic client for cluster %s: %w", clusterName, err), true, 0 + return true, 0, fmt.Errorf("failed to get dynamic client for cluster %s: %w", clusterName, err) } clusterKubeClient, err := m.kubeClientGetter(cluster, clusterRestConfig) if err != nil { - return fmt.Errorf("failed to get kubernetes client for cluster %s: %w", clusterName, err), true, 0 + return true, 0, fmt.Errorf("failed to get kubernetes client for cluster %s: %w", clusterName, err) } manager := NewInformerManager( @@ -244,7 +244,7 @@ func (m *federatedInformerManager) processCluster( for _, generator := range m.eventHandlerGenerators { if err := manager.AddEventHandlerGenerator(generator); err != nil { cancel() - return fmt.Errorf("failed to initialized InformerManager for cluster %s: %w", clusterName, err), true, 0 + return true, 0, fmt.Errorf("failed to initialized InformerManager for cluster %s: %w", clusterName, err) } } @@ -272,11 +272,11 @@ func (m *federatedInformerManager) processCluster( m.initialClusters.Delete(cluster.Name) } else { klog.FromContext(ctx).V(3).Info("Waiting for InformerManager sync") - return nil, true, 100 * time.Millisecond + return true, 100 * time.Millisecond, nil } } - return nil, false, 0 + return false, 0, nil } func (m *federatedInformerManager) processClusterDeletion(ctx context.Context, clusterName string) error { @@ -287,13 +287,15 @@ func (m *federatedInformerManager) processClusterDeletion(ctx context.Context, c func (m *federatedInformerManager) processClusterDeletionUnlocked(ctx context.Context, clusterName string) error { delete(m.connectionMap, clusterName) + delete(m.kubeClients, clusterName) delete(m.dynamicClients, clusterName) if cancel, ok := m.clusterCancelFuncs[clusterName]; ok { - klog.FromContext(ctx).V(2).Info("Stopping InformerManager for FederatedCluster") + klog.FromContext(ctx).V(2).Info("Stopping InformerManager and SharedInformerFactory for FederatedCluster") cancel() } delete(m.informerManagers, clusterName) + delete(m.informerFactories, clusterName) delete(m.clusterCancelFuncs, clusterName) m.initialClusters.Delete(clusterName) diff --git a/pkg/util/informermanager/informermanager.go b/pkg/util/informermanager/informermanager.go index 8124a5e6..c23d2687 100644 --- a/pkg/util/informermanager/informermanager.go +++ b/pkg/util/informermanager/informermanager.go @@ -141,7 +141,7 @@ func (m *informerManager) worker(ctx context.Context) { return } - err, needReenqueue, delay := m.processFTC(ctx, ftc) + needReenqueue, delay, err := m.processFTC(ctx, ftc) if err != nil { if needReenqueue { logger.Error(err, "Failed to process FederatedTypeConfig, will retry") @@ -162,7 +162,7 @@ func (m *informerManager) worker(ctx context.Context) { func (m *informerManager) processFTC( ctx context.Context, ftc *fedcorev1a1.FederatedTypeConfig, -) (err error, needReenqueue bool, reenqueueDelay time.Duration) { +) (needReenqueue bool, reenqueueDelay time.Duration, err error) { m.lock.Lock() defer m.lock.Unlock() @@ -183,14 +183,14 @@ func (m *informerManager) processFTC( // time and we missed processing the deletion. We simply process the ftc deletion and reenqueue. Note: // updating of ftc source types, however, is still not a supported use case. err := m.processFTCDeletionUnlocked(ctx, ftcName) - return err, true, 0 + return true, 0, err } informer = m.informers[ftcName] } else { if err := m.gvkMapping.Add(ftcName, gvk); err != nil { // There must be another ftc with the same source type GVK. - return fmt.Errorf("source type is already referenced by another FederatedTypeConfig: %w", err), false, 0 + return false, 0, fmt.Errorf("source type is already referenced by another FederatedTypeConfig: %w", err) } logger.V(2).Info("Starting new informer for FederatedTypeConfig") @@ -220,7 +220,7 @@ func (m *informerManager) processFTC( if !informer.Informer().HasSynced() { logger.V(3).Info("Informer for FederatedTypeConfig not synced, will not register event handlers yet") - return nil, true, 100 * time.Millisecond + return true, 100 * time.Millisecond, nil } registrations := m.eventHandlerRegistrations[ftcName] @@ -237,7 +237,7 @@ func (m *informerManager) processFTC( if oldRegistration := registrations[generator]; oldRegistration != nil { if err := informer.Informer().RemoveEventHandler(oldRegistration); err != nil { - return fmt.Errorf("failed to unregister event handler: %w", err), true, 0 + return true, 0, fmt.Errorf("failed to unregister event handler: %w", err) } delete(registrations, generator) } @@ -246,7 +246,7 @@ func (m *informerManager) processFTC( if handler := generator.Generator(ftc); handler != nil { newRegistration, err := informer.Informer().AddEventHandler(handler) if err != nil { - return fmt.Errorf("failed to register event handler: %w", err), true, 0 + return true, 0, fmt.Errorf("failed to register event handler: %w", err) } registrations[generator] = newRegistration } @@ -254,7 +254,7 @@ func (m *informerManager) processFTC( lastAppliedFTCs[generator] = ftc } - return nil, false, 0 + return false, 0, nil } func (m *informerManager) processFTCDeletion(ctx context.Context, ftcName string) error { diff --git a/pkg/util/informermanager/podinformer.go b/pkg/util/informermanager/podinformer.go index d5f6f539..c49da4b5 100644 --- a/pkg/util/informermanager/podinformer.go +++ b/pkg/util/informermanager/podinformer.go @@ -36,15 +36,17 @@ func addPodInformer(ctx context.Context, podListerSemaphore *semaphore.Weighted, enablePodPruning bool, ) { - informer.InformerFor(&corev1.Pod{}, func(k kubeclient.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { - return cache.NewSharedIndexInformer( - podListerWatcher(ctx, client, podListerSemaphore, enablePodPruning), - &corev1.Pod{}, - resyncPeriod, - cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, - ) - }) - + informer.InformerFor( + &corev1.Pod{}, + func(k kubeclient.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + podListerWatcher(ctx, client, podListerSemaphore, enablePodPruning), + &corev1.Pod{}, + resyncPeriod, + cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, + ) + }, + ) } func podListerWatcher( From 18f2623d6a2569d36229ddff858a0e9117e342b6 Mon Sep 17 00:00:00 2001 From: "hawjia.lim" Date: Tue, 25 Jul 2023 11:50:26 +0800 Subject: [PATCH 091/173] refactor(cluster-controller): adopt unified types --- pkg/controllers/common/types.go | 4 +- pkg/controllers/federate/controller.go | 106 ++++--- .../federatedcluster/clusterjoin.go | 117 ++++--- .../federatedcluster/clusterstatus.go | 72 ++--- .../federatedcluster/controller.go | 297 +++++++++--------- pkg/controllers/federatedcluster/util.go | 1 - pkg/controllers/nsautoprop/controller.go | 47 +-- pkg/controllers/policyrc/controller.go | 57 ++-- pkg/controllers/status/controller.go | 95 ++++-- pkg/util/eventhandlers/eventhandler.go | 56 +++- 10 files changed, 490 insertions(+), 362 deletions(-) diff --git a/pkg/controllers/common/types.go b/pkg/controllers/common/types.go index 946d7324..210ca55b 100644 --- a/pkg/controllers/common/types.go +++ b/pkg/controllers/common/types.go @@ -25,7 +25,7 @@ import ( "strings" "k8s.io/apimachinery/pkg/api/meta" - pkgruntime "k8s.io/apimachinery/pkg/runtime" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) // QualifiedName comprises a resource name with an optional namespace. @@ -39,7 +39,7 @@ type QualifiedName struct { Name string } -func NewQualifiedName(obj pkgruntime.Object) QualifiedName { +func NewQualifiedName(obj metav1.Object) QualifiedName { accessor, err := meta.Accessor(obj) if err != nil { // This should never happen, but if it does, the diff --git a/pkg/controllers/federate/controller.go b/pkg/controllers/federate/controller.go index 7964f5bf..bf6ea4a0 100644 --- a/pkg/controllers/federate/controller.go +++ b/pkg/controllers/federate/controller.go @@ -25,7 +25,6 @@ import ( apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" - "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/util/sets" "k8s.io/client-go/dynamic" @@ -130,14 +129,16 @@ func NewFederateController( uns := obj.(*unstructured.Unstructured) return uns.GetNamespace() != fedSystemNamespace }, - Handler: eventhandlers.NewTriggerOnAllChanges(func(obj runtime.Object) { - uns := obj.(*unstructured.Unstructured) - c.worker.Enqueue(workerKey{ - name: uns.GetName(), - namespace: uns.GetNamespace(), - gvk: ftc.GetSourceTypeGVK(), - }) - }), + Handler: eventhandlers.NewTriggerOnAllChanges( + func(uns *unstructured.Unstructured) workerKey { + return workerKey{ + name: uns.GetName(), + namespace: uns.GetNamespace(), + gvk: ftc.GetSourceTypeGVK(), + } + }, + c.worker.Enqueue, + ), } }, }); err != nil { @@ -152,47 +153,59 @@ func NewFederateController( fedObj := obj.(*fedcorev1a1.FederatedObject) return fedObj.Namespace != fedSystemNamespace }, - Handler: eventhandlers.NewTriggerOnAllChanges(func(o runtime.Object) { - fedObj := o.(*fedcorev1a1.FederatedObject) - logger := c.logger.WithValues("federated-object", common.NewQualifiedName(fedObj)) - - srcMeta, err := fedObj.Spec.GetTemplateAsUnstructured() - if err != nil { - logger.Error(err, "Failed to get source object's metadata from FederatedObject") - return - } - - gvk := srcMeta.GroupVersionKind() - - c.worker.Enqueue(workerKey{ - name: srcMeta.GetName(), - namespace: srcMeta.GetNamespace(), - gvk: gvk, - }) - }), + Handler: eventhandlers.NewTriggerOnAllChanges( + func(fedObj *fedcorev1a1.FederatedObject) *fedcorev1a1.FederatedObject { + return fedObj + }, + func(fedObj *fedcorev1a1.FederatedObject) { + srcMeta, err := fedObj.Spec.GetTemplateAsUnstructured() + if err != nil { + c.logger.Error( + err, + "Failed to get source object's metadata from FederatedObject", + "object", + common.NewQualifiedName(fedObj), + ) + return + } + + gvk := srcMeta.GroupVersionKind() + + c.worker.Enqueue(workerKey{ + name: srcMeta.GetName(), + namespace: srcMeta.GetNamespace(), + gvk: gvk, + }) + }), }); err != nil { return nil, err } if _, err := clusterFedObjectInformer.Informer().AddEventHandler( - eventhandlers.NewTriggerOnAllChanges(func(o runtime.Object) { - fedObj := o.(*fedcorev1a1.ClusterFederatedObject) - logger := c.logger.WithValues("cluster-federated-object", common.NewQualifiedName(fedObj)) - - srcMeta, err := fedObj.Spec.GetTemplateAsUnstructured() - if err != nil { - logger.Error(err, "Failed to get source object's metadata from ClusterFederatedObject") - return - } - - gvk := srcMeta.GroupVersionKind() - - c.worker.Enqueue(workerKey{ - name: srcMeta.GetName(), - namespace: srcMeta.GetNamespace(), - gvk: gvk, - }) - }), + eventhandlers.NewTriggerOnAllChanges( + func(fedObj *fedcorev1a1.ClusterFederatedObject) *fedcorev1a1.ClusterFederatedObject { + return fedObj + }, + func(fedObj *fedcorev1a1.ClusterFederatedObject) { + srcMeta, err := fedObj.Spec.GetTemplateAsUnstructured() + if err != nil { + logger.Error( + err, + "Failed to get source object's metadata from ClusterFederatedObject", + "object", + common.NewQualifiedName(fedObj), + ) + return + } + + gvk := srcMeta.GroupVersionKind() + + c.worker.Enqueue(workerKey{ + name: srcMeta.GetName(), + namespace: srcMeta.GetNamespace(), + gvk: gvk, + }) + }), ); err != nil { return nil, err } @@ -224,8 +237,7 @@ func (c *FederateController) HasSynced() bool { func (c *FederateController) reconcile(ctx context.Context, key workerKey) (status worker.Result) { _ = c.metrics.Rate("federate.throughput", 1) - ctx, logger := logging.InjectLogger(ctx, c.logger) - ctx, logger = logging.InjectLoggerValues(ctx, "source-object", key.QualifiedName().String(), "gvk", key.gvk) + ctx, logger := logging.InjectLoggerValues(ctx, "source-object", key.QualifiedName().String(), "gvk", key.gvk) startTime := time.Now() diff --git a/pkg/controllers/federatedcluster/clusterjoin.go b/pkg/controllers/federatedcluster/clusterjoin.go index 7b54790e..906396be 100644 --- a/pkg/controllers/federatedcluster/clusterjoin.go +++ b/pkg/controllers/federatedcluster/clusterjoin.go @@ -1,4 +1,3 @@ -//go:build exclude /* Copyright 2018 The Kubernetes Authors. @@ -33,15 +32,14 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/util/wait" - kubeclient "k8s.io/client-go/kubernetes" - "k8s.io/client-go/tools/record" + "k8s.io/client-go/kubernetes" "k8s.io/client-go/util/retry" "k8s.io/klog/v2" "k8s.io/utils/pointer" fedcorev1a1 "github.com/kubewharf/kubeadmiral/pkg/apis/core/v1alpha1" - fedclient "github.com/kubewharf/kubeadmiral/pkg/client/clientset/versioned" "github.com/kubewharf/kubeadmiral/pkg/controllers/common" + "github.com/kubewharf/kubeadmiral/pkg/util/logging" ) const ( @@ -81,17 +79,11 @@ const ( // The returned condition (if not-nil) will have status, reason and message set. The other fields // should be added by the caller. // The returned err is for informational purpose only and the caller should not abort on non-nil error. -func handleNotJoinedCluster( +func (c *FederatedClusterController) handleNotJoinedCluster( ctx context.Context, cluster *fedcorev1a1.FederatedCluster, - client fedclient.Interface, - kubeClient kubeclient.Interface, - eventRecorder record.EventRecorder, - fedSystemNamespace string, - clusterJoinTimeout time.Duration, -) (c *fedcorev1a1.FederatedCluster, condition *fedcorev1a1.ClusterCondition, joinPerformed *bool, err error) { - logger := klog.FromContext(ctx).WithValues("process", "cluster-join") - ctx = klog.NewContext(ctx, logger) +) (updated *fedcorev1a1.FederatedCluster, condition *fedcorev1a1.ClusterCondition, joinPerformed *bool, err error) { + logger := klog.FromContext(ctx) joinedCondition := getClusterCondition(&cluster.Status, fedcorev1a1.ClusterJoined) @@ -99,10 +91,10 @@ func handleNotJoinedCluster( if joinedCondition != nil && joinedCondition.Status == corev1.ConditionFalse && - time.Since(joinedCondition.LastTransitionTime.Time) > clusterJoinTimeout { - // join timed out + time.Since(joinedCondition.LastTransitionTime.Time) > c.clusterJoinTimeout { + // Join timed out logger.Error(nil, "Cluster join timed out") - eventRecorder.Eventf( + c.eventRecorder.Eventf( cluster, corev1.EventTypeWarning, EventReasonJoinClusterTimeoutExceeded, @@ -117,11 +109,11 @@ func handleNotJoinedCluster( // 2. The remaining steps require a cluster kube client, attempt to create one - _, clusterKubeClient, err := getClusterClient(ctx, kubeClient, fedSystemNamespace, cluster) + _, clusterKubeClient, err := c.getClusterClient(ctx, cluster) if err != nil { logger.Error(err, "Failed to create cluster client") msg := fmt.Sprintf("Failed to create cluster client: %v", err.Error()) - eventRecorder.Eventf( + c.eventRecorder.Eventf( cluster, corev1.EventTypeWarning, EventReasonJoinClusterError, msg, ) @@ -134,13 +126,15 @@ func handleNotJoinedCluster( // 3. Get or create system namespace in the cluster, this will also tell us if the cluster is unjoinable - logger.V(2).WithValues("fed-system-namespace", fedSystemNamespace).Info("Get system namespace in cluster") - memberFedNamespace, err := clusterKubeClient.CoreV1().Namespaces().Get(ctx, fedSystemNamespace, metav1.GetOptions{ResourceVersion: "0"}) + ctx, logger = logging.InjectLoggerValues(ctx, "fed-system-namespace", c.fedSystemNamespace) + + logger.V(2).Info("Get system namespace in cluster") + memberFedNamespace, err := clusterKubeClient.CoreV1().Namespaces().Get(ctx, c.fedSystemNamespace, metav1.GetOptions{ResourceVersion: "0"}) if err != nil { if !apierrors.IsNotFound(err) { msg := fmt.Sprintf("Failed to get namespace: %v", err.Error()) logger.Error(err, "Failed to get namespace") - eventRecorder.Eventf( + c.eventRecorder.Eventf( cluster, corev1.EventTypeWarning, EventReasonJoinClusterError, msg, ) @@ -156,12 +150,14 @@ func handleNotJoinedCluster( } if memberFedNamespace != nil && memberFedNamespace.Annotations[FederatedClusterUID] != string(cluster.UID) { - // ns exists and is not created by us - the cluster is managed by another control plane + // Namespace exists and is not created by us - the cluster is managed by another control plane. msg := "Cluster is unjoinable (check if cluster is already joined to another federation)" logger.Error(nil, msg, "UID", memberFedNamespace.Annotations[FederatedClusterUID], "clusterUID", string(cluster.UID)) - eventRecorder.Eventf( + c.eventRecorder.Eventf( cluster, - corev1.EventTypeWarning, EventReasonClusterUnjoinable, msg, + corev1.EventTypeWarning, + EventReasonClusterUnjoinable, + msg, ) return cluster, &fedcorev1a1.ClusterCondition{ Status: corev1.ConditionFalse, @@ -180,19 +176,19 @@ func handleNotJoinedCluster( if memberFedNamespace == nil { memberFedNamespace = &corev1.Namespace{ ObjectMeta: metav1.ObjectMeta{ - Name: fedSystemNamespace, + Name: c.fedSystemNamespace, Annotations: map[string]string{ FederatedClusterUID: string(cluster.UID), }, }, } - logger.V(1).WithValues("fed-system-namespace", fedSystemNamespace).Info("Create system namespace in cluster") + logger.V(1).Info("Create system namespace in cluster") memberFedNamespace, err = clusterKubeClient.CoreV1().Namespaces().Create(ctx, memberFedNamespace, metav1.CreateOptions{}) if err != nil { msg := fmt.Sprintf("Failed to create system namespace: %v", err.Error()) logger.Error(err, "Failed to create system namespace") - eventRecorder.Eventf( + c.eventRecorder.Eventf( cluster, corev1.EventTypeWarning, EventReasonJoinClusterError, msg, ) @@ -208,12 +204,12 @@ func handleNotJoinedCluster( if cluster.Spec.UseServiceAccountToken { logger.V(2).Info("Get and save cluster token") - err = getAndSaveClusterToken(ctx, cluster, kubeClient, clusterKubeClient, fedSystemNamespace, memberFedNamespace) + err = c.getAndSaveClusterToken(ctx, cluster, clusterKubeClient, memberFedNamespace) if err != nil { msg := fmt.Sprintf("Failed to get and save cluster token: %v", err.Error()) logger.Error(err, "Failed to get and save cluster token") - eventRecorder.Eventf( + c.eventRecorder.Eventf( cluster, corev1.EventTypeWarning, EventReasonJoinClusterError, msg, ) @@ -228,9 +224,11 @@ func handleNotJoinedCluster( // 5. Cluster is joined, update condition logger.V(2).Info("Cluster joined successfully") - eventRecorder.Eventf( + c.eventRecorder.Eventf( cluster, - corev1.EventTypeNormal, EventReasonJoinClusterSuccess, "Cluster joined successfully", + corev1.EventTypeNormal, + EventReasonJoinClusterSuccess, + "Cluster joined successfully", ) return cluster, &fedcorev1a1.ClusterCondition{ Status: corev1.ConditionTrue, @@ -239,12 +237,10 @@ func handleNotJoinedCluster( }, joinPerformed, nil } -func getAndSaveClusterToken( +func (c *FederatedClusterController) getAndSaveClusterToken( ctx context.Context, cluster *fedcorev1a1.FederatedCluster, - kubeClient kubeclient.Interface, - clusterKubeClient kubeclient.Interface, - fedSystemNamespace string, + clusterKubeClient kubernetes.Interface, memberSystemNamespace *corev1.Namespace, ) error { logger := klog.FromContext(ctx) @@ -262,13 +258,17 @@ func getAndSaveClusterToken( } err = retry.RetryOnConflict(retry.DefaultBackoff, func() error { - secret, err := kubeClient.CoreV1().Secrets(fedSystemNamespace).Get(ctx, cluster.Spec.SecretRef.Name, metav1.GetOptions{}) + secret, err := c.kubeClient.CoreV1().Secrets(c.fedSystemNamespace).Get( + ctx, + cluster.Spec.SecretRef.Name, + metav1.GetOptions{}, + ) if err != nil { return err } secret.Data[ServiceAccountTokenKey] = token secret.Data[ServiceAccountCAKey] = ca - _, err = kubeClient.CoreV1().Secrets(fedSystemNamespace).Update(ctx, secret, metav1.UpdateOptions{}) + _, err = c.kubeClient.CoreV1().Secrets(c.fedSystemNamespace).Update(ctx, secret, metav1.UpdateOptions{}) return err }) if err != nil { @@ -286,22 +286,29 @@ func getAndSaveClusterToken( // resources in the joining cluster. The created secret name is returned on success. func createAuthorizedServiceAccount( ctx context.Context, - clusterKubeClient kubeclient.Interface, + clusterKubeClient kubernetes.Interface, memberSystemNamespace *corev1.Namespace, clusterName string, errorOnExisting bool, ) (string, error) { - logger := klog.FromContext(ctx).WithValues("member-service-account-name", MemberServiceAccountName) - ctx = klog.NewContext(ctx, logger) + ctx, logger := logging.InjectLoggerValues(ctx, "member-service-account-name", MemberServiceAccountName) // 1. create service account + logger.V(1).Info("Creating service account") - err := createServiceAccount(ctx, clusterKubeClient, memberSystemNamespace.Name, MemberServiceAccountName, clusterName, errorOnExisting) - if err != nil { + if err := createServiceAccount( + ctx, + clusterKubeClient, + memberSystemNamespace.Name, + MemberServiceAccountName, + clusterName, + errorOnExisting, + ); err != nil { return "", fmt.Errorf("failed to create service account %s: %w", MemberServiceAccountName, err) } // 2. create service account token secret + logger.V(1).Info("Creating service account token secret") saTokenSecretName, err := createServiceAccountTokenSecret( ctx, @@ -314,12 +321,21 @@ func createAuthorizedServiceAccount( if err != nil { return "", fmt.Errorf("error creating service account token secret %s : %w", MemberServiceAccountName, err) } - logger.V(1).WithValues("sa-token-secret-name", saTokenSecretName).Info("Created service account token secret for service account") + + ctx, logger = logging.InjectLoggerValues(ctx, "sa-token-secret-name", saTokenSecretName) + logger.V(1).Info("Created service account token secret for service account") // 3. create rbac + logger.V(1).Info("Creating RBAC for service account") - err = createClusterRoleAndBinding(ctx, clusterKubeClient, memberSystemNamespace, MemberServiceAccountName, clusterName, errorOnExisting) - if err != nil { + if err = createClusterRoleAndBinding( + ctx, + clusterKubeClient, + memberSystemNamespace, + MemberServiceAccountName, + clusterName, + errorOnExisting, + ); err != nil { return "", fmt.Errorf("error creating cluster role and binding for service account %s: %w", MemberServiceAccountName, err) } @@ -331,8 +347,9 @@ func createAuthorizedServiceAccount( // to access its API server. func createServiceAccount( ctx context.Context, - clusterClientset kubeclient.Interface, - namespace, saName, joiningClusterName string, errorOnExisting bool, + clusterClientset kubernetes.Interface, + namespace, saName, joiningClusterName string, + errorOnExisting bool, ) error { sa := &corev1.ServiceAccount{ ObjectMeta: metav1.ObjectMeta{ @@ -367,7 +384,7 @@ func createServiceAccount( // to access its API server. func createServiceAccountTokenSecret( ctx context.Context, - clusterClientset kubeclient.Interface, + clusterClientset kubernetes.Interface, namespace, saName, joiningClusterName string, errorOnExisting bool, ) (string, error) { @@ -421,7 +438,7 @@ func bindingSubjects(saName, namespace string) []rbacv1.Subject { // with clientset. func createClusterRoleAndBinding( ctx context.Context, - clientset kubeclient.Interface, + clientset kubernetes.Interface, namespace *corev1.Namespace, saName, clusterName string, errorOnExisting bool, @@ -551,7 +568,7 @@ func createClusterRoleAndBinding( func getServiceAccountToken( ctx context.Context, - clusterClientset kubeclient.Interface, + clusterClientset kubernetes.Interface, memberSystemNamespace, secretName string, ) ([]byte, []byte, error) { // Get the secret from the joining cluster. diff --git a/pkg/controllers/federatedcluster/clusterstatus.go b/pkg/controllers/federatedcluster/clusterstatus.go index 87adca6f..80c6fa8f 100644 --- a/pkg/controllers/federatedcluster/clusterstatus.go +++ b/pkg/controllers/federatedcluster/clusterstatus.go @@ -1,4 +1,3 @@ -//go:build exclude /* Copyright 2016 The Kubernetes Authors. @@ -33,14 +32,11 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" "k8s.io/client-go/discovery" - "k8s.io/client-go/informers" - "k8s.io/client-go/tools/cache" + corev1listers "k8s.io/client-go/listers/core/v1" "k8s.io/client-go/util/retry" "k8s.io/klog/v2" fedcorev1a1 "github.com/kubewharf/kubeadmiral/pkg/apis/core/v1alpha1" - fedclient "github.com/kubewharf/kubeadmiral/pkg/client/clientset/versioned" - "github.com/kubewharf/kubeadmiral/pkg/controllers/util/federatedclient" ) const ( @@ -62,32 +58,38 @@ const ( ClusterNotReachableMsg = "Cluster is not reachable" ) -func collectIndividualClusterStatus( +func (c *FederatedClusterController) collectIndividualClusterStatus( ctx context.Context, cluster *fedcorev1a1.FederatedCluster, - fedClient fedclient.Interface, - federatedClient federatedclient.FederatedClientFactory, -) error { +) (retryAfter time.Duration, err error) { logger := klog.FromContext(ctx) - clusterKubeClient, exists, err := federatedClient.KubeClientsetForCluster(cluster.Name) + clusterKubeClient, exists := c.federatedInformerManager.GetClusterKubeClient(cluster.Name) if !exists { - return fmt.Errorf("federated client is not yet up to date") + return 0, fmt.Errorf("failed to get cluster client: FederatedInformerManager not yet up-to-date") } - if err != nil { - return fmt.Errorf("failed to get federated kube client: %w", err) + + podLister, podsSynced, exists := c.federatedInformerManager.GetPodLister(cluster.Name) + if !exists { + return 0, fmt.Errorf("failed to get pod lister: FederatedInformerManager not yet up-to-date") + } + if !podsSynced() { + logger.V(3).Info("Pod informer not synced, will reenqueue") + return 100 * time.Millisecond, nil } - clusterKubeInformer, exists, err := federatedClient.KubeSharedInformerFactoryForCluster(cluster.Name) + + nodeLister, nodesSynced, exists := c.federatedInformerManager.GetNodeLister(cluster.Name) if !exists { - return fmt.Errorf("federated client is not yet up to date") + return 0, fmt.Errorf("failed to get node lister: FederatedInformerManager not yet up-to-date") } - if err != nil { - return fmt.Errorf("failed to get federated kube informer factory: %w", err) + if !nodesSynced() { + logger.V(3).Info("Pod informer not synced, will reenqueue") + return 100 * time.Millisecond, nil } discoveryClient := clusterKubeClient.Discovery() - cluster = cluster.DeepCopy() + cluster = cluster.DeepCopy() conditionTime := metav1.Now() offlineStatus, readyStatus := checkReadyByHealthz(ctx, discoveryClient) @@ -104,9 +106,9 @@ func collectIndividualClusterStatus( readyMessage = ClusterNotReachableMsg } - // we skip updating cluster resources and api resources if cluster is not ready + // We skip updating cluster resources and api resources if cluster is not ready if readyStatus == corev1.ConditionTrue { - if err := updateClusterResources(ctx, &cluster.Status, clusterKubeInformer); err != nil { + if err := updateClusterResources(ctx, &cluster.Status, podLister, nodeLister); err != nil { logger.Error(err, "Failed to update cluster resources") readyStatus = corev1.ConditionFalse readyReason = ClusterResourceCollectionFailedReason @@ -125,18 +127,26 @@ func collectIndividualClusterStatus( setClusterCondition(&cluster.Status, &readyCondition) if err := retry.RetryOnConflict(retry.DefaultBackoff, func() error { - latestCluster, err := fedClient.CoreV1alpha1().FederatedClusters().Get(context.TODO(), cluster.Name, metav1.GetOptions{}) + latestCluster, err := c.fedClient.CoreV1alpha1().FederatedClusters().Get( + context.TODO(), + cluster.Name, + metav1.GetOptions{}, + ) if err != nil { return err } cluster.Status.DeepCopyInto(&latestCluster.Status) - _, err = fedClient.CoreV1alpha1().FederatedClusters().UpdateStatus(context.TODO(), latestCluster, metav1.UpdateOptions{}) + _, err = c.fedClient.CoreV1alpha1().FederatedClusters().UpdateStatus( + context.TODO(), + latestCluster, + metav1.UpdateOptions{}, + ) return err }); err != nil { - return fmt.Errorf("failed to update cluster status: %w", err) + return 0, fmt.Errorf("failed to update cluster status: %w", err) } - return nil + return 0, nil } func checkReadyByHealthz( @@ -163,19 +173,9 @@ func checkReadyByHealthz( func updateClusterResources( ctx context.Context, clusterStatus *fedcorev1a1.FederatedClusterStatus, - clusterKubeInformer informers.SharedInformerFactory, + podLister corev1listers.PodLister, + nodeLister corev1listers.NodeLister, ) error { - podLister := clusterKubeInformer.Core().V1().Pods().Lister() - podsSynced := clusterKubeInformer.Core().V1().Pods().Informer().HasSynced - nodeLister := clusterKubeInformer.Core().V1().Nodes().Lister() - nodesSynced := clusterKubeInformer.Core().V1().Nodes().Informer().HasSynced - - ctx, cancel := context.WithTimeout(ctx, 5*time.Second) - defer cancel() - if !cache.WaitForNamedCacheSync("federated-cluster-controller-status-collect", ctx.Done(), podsSynced, nodesSynced) { - return fmt.Errorf("timeout waiting for node and pod informer sync") - } - nodes, err := nodeLister.List(labels.Everything()) if err != nil { return fmt.Errorf("failed to list nodes: %w", err) diff --git a/pkg/controllers/federatedcluster/controller.go b/pkg/controllers/federatedcluster/controller.go index d0c999ba..9be0041d 100644 --- a/pkg/controllers/federatedcluster/controller.go +++ b/pkg/controllers/federatedcluster/controller.go @@ -1,4 +1,3 @@ -//go:build exclude /* Copyright 2016 The Kubernetes Authors. @@ -24,14 +23,13 @@ package federatedcluster import ( "context" "fmt" - "reflect" "time" corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/equality" apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" - utilruntime "k8s.io/apimachinery/pkg/util/runtime" "k8s.io/apimachinery/pkg/util/sets" "k8s.io/apimachinery/pkg/util/wait" kubeclient "k8s.io/client-go/kubernetes" @@ -45,18 +43,18 @@ import ( fedclient "github.com/kubewharf/kubeadmiral/pkg/client/clientset/versioned" genscheme "github.com/kubewharf/kubeadmiral/pkg/client/generic/scheme" fedcorev1a1informers "github.com/kubewharf/kubeadmiral/pkg/client/informers/externalversions/core/v1alpha1" - fedcorev1a1listers "github.com/kubewharf/kubeadmiral/pkg/client/listers/core/v1alpha1" "github.com/kubewharf/kubeadmiral/pkg/controllers/common" - "github.com/kubewharf/kubeadmiral/pkg/controllers/util" - "github.com/kubewharf/kubeadmiral/pkg/controllers/util/delayingdeliver" - "github.com/kubewharf/kubeadmiral/pkg/controllers/util/federatedclient" - finalizerutils "github.com/kubewharf/kubeadmiral/pkg/controllers/util/finalizers" - "github.com/kubewharf/kubeadmiral/pkg/controllers/util/worker" "github.com/kubewharf/kubeadmiral/pkg/stats" + clusterutil "github.com/kubewharf/kubeadmiral/pkg/util/cluster" + "github.com/kubewharf/kubeadmiral/pkg/util/eventhandlers" + "github.com/kubewharf/kubeadmiral/pkg/util/finalizers" + "github.com/kubewharf/kubeadmiral/pkg/util/informermanager" + "github.com/kubewharf/kubeadmiral/pkg/util/logging" + "github.com/kubewharf/kubeadmiral/pkg/util/worker" ) const ( - ControllerName = "federated-cluster-controller" + FederatedClusterControllerName = "federated-cluster-controller" FinalizerFederatedClusterController = common.DefaultPrefix + "federated-cluster-controller" @@ -71,128 +69,145 @@ type ClusterHealthCheckConfig struct { // FederatedClusterController reconciles a FederatedCluster object type FederatedClusterController struct { - client fedclient.Interface - kubeClient kubeclient.Interface + clusterInformer fedcorev1a1informers.FederatedClusterInformer + federatedInformerManager informermanager.FederatedInformerManager - clusterLister fedcorev1a1listers.FederatedClusterLister - clusterSynced cache.InformerSynced - federatedClient federatedclient.FederatedClientFactory + kubeClient kubeclient.Interface + fedClient fedclient.Interface fedSystemNamespace string clusterHealthCheckConfig *ClusterHealthCheckConfig clusterJoinTimeout time.Duration - eventRecorder record.EventRecorder - metrics stats.Metrics - logger klog.Logger + worker worker.ReconcileWorker[common.QualifiedName] + statusCollectWorker worker.ReconcileWorker[common.QualifiedName] + eventRecorder record.EventRecorder - worker worker.ReconcileWorker - statusCollectWorker worker.ReconcileWorker + metrics stats.Metrics + logger klog.Logger } func NewFederatedClusterController( - client fedclient.Interface, kubeClient kubeclient.Interface, - informer fedcorev1a1informers.FederatedClusterInformer, - federatedClient federatedclient.FederatedClientFactory, + fedClient fedclient.Interface, + clusterInformer fedcorev1a1informers.FederatedClusterInformer, + federatedInformerManager informermanager.FederatedInformerManager, metrics stats.Metrics, - fedsystemNamespace string, - restConfig *rest.Config, - workerCount int, + logger klog.Logger, clusterJoinTimeout time.Duration, + workerCount int, + fedsystemNamespace string, ) (*FederatedClusterController, error) { c := &FederatedClusterController{ - client: client, - kubeClient: kubeClient, - clusterLister: informer.Lister(), - clusterSynced: informer.Informer().HasSynced, - federatedClient: federatedClient, - fedSystemNamespace: fedsystemNamespace, + clusterInformer: clusterInformer, + federatedInformerManager: federatedInformerManager, + kubeClient: kubeClient, + fedClient: fedClient, + fedSystemNamespace: fedsystemNamespace, clusterHealthCheckConfig: &ClusterHealthCheckConfig{ Period: time.Minute, }, clusterJoinTimeout: clusterJoinTimeout, metrics: metrics, - logger: klog.LoggerWithValues(klog.Background(), "controller", ControllerName), + logger: logger.WithValues("controller", FederatedClusterControllerName), } broadcaster := record.NewBroadcaster() broadcaster.StartRecordingToSink( &corev1client.EventSinkImpl{Interface: kubeClient.CoreV1().Events("")}, ) - broadcaster.StartLogging(klog.V(4).Infof) + broadcaster.StartLogging(klog.V(6).Infof) c.eventRecorder = broadcaster.NewRecorder( genscheme.Scheme, corev1.EventSource{Component: ("federatedcluster-controller")}, ) - c.worker = worker.NewReconcileWorker( + c.worker = worker.NewReconcileWorker[common.QualifiedName]( + FederatedClusterControllerName, + nil, c.reconcile, worker.RateLimiterOptions{}, workerCount, metrics, - delayingdeliver.NewMetricTags("federatedcluster-worker", "FederatedCluster"), ) - c.statusCollectWorker = worker.NewReconcileWorker( + c.statusCollectWorker = worker.NewReconcileWorker[common.QualifiedName]( + FederatedClusterControllerName, + nil, c.collectClusterStatus, worker.RateLimiterOptions{ InitialDelay: 50 * time.Millisecond, }, workerCount, metrics, - delayingdeliver.NewMetricTags("federatedcluster-status-collect-worker", "FederatedCluster"), ) - informer.Informer(). - AddEventHandler(util.NewTriggerOnGenerationAndMetadataChanges(c.worker.EnqueueObject, - func(oldMeta, newMeta metav1.Object) bool { - if !reflect.DeepEqual(oldMeta.GetAnnotations(), newMeta.GetAnnotations()) || - !reflect.DeepEqual(oldMeta.GetFinalizers(), newMeta.GetFinalizers()) { - return true - } - return false - })) + clusterInformer.Informer().AddEventHandler(eventhandlers.NewTriggerOnChanges( + func(old metav1.Object, cur metav1.Object) bool { + if old.GetGeneration() != cur.GetGeneration() { + return true + } + if !equality.Semantic.DeepEqual(old.GetAnnotations(), cur.GetAnnotations) { + return true + } + if !equality.Semantic.DeepEqual(old.GetFinalizers(), cur.GetFinalizers()) { + return true + } + return false + }, + common.NewQualifiedName, + c.worker.Enqueue, + )) return c, nil } +func (c *FederatedClusterController) HasSynced() bool { + return c.clusterInformer.Informer().HasSynced() +} + func (c *FederatedClusterController) IsControllerReady() bool { - return c.clusterSynced() + return c.HasSynced() } func (c *FederatedClusterController) Run(ctx context.Context) { - defer utilruntime.HandleCrash() + ctx, logger := logging.InjectLogger(ctx, c.logger) - c.logger.Info("Starting controller") - defer c.logger.Info("Stopping controller") + logger.Info("Starting controller") + defer logger.Info("Stopping controller") - if !cache.WaitForNamedCacheSync("federated-controller", ctx.Done(), c.clusterSynced) { + if !cache.WaitForNamedCacheSync("federated-controller", ctx.Done(), c.HasSynced) { + logger.Error(nil, "Timed out waiting for cache sync") return } - c.worker.Run(ctx.Done()) - c.statusCollectWorker.Run(ctx.Done()) + logger.Info("Caches are synced") + + c.worker.Run(ctx) + c.statusCollectWorker.Run(ctx) - // periodically enqueue all clusters to trigger status collection + // Periodically enqueue all clusters to trigger status collection. go wait.Until(c.enqueueAllJoinedClusters, c.clusterHealthCheckConfig.Period, ctx.Done()) <-ctx.Done() } -func (c *FederatedClusterController) reconcile(qualifiedName common.QualifiedName) (status worker.Result) { +func (c *FederatedClusterController) reconcile( + ctx context.Context, + key common.QualifiedName, +) (status worker.Result) { _ = c.metrics.Rate("federated-cluster-controller.throughput", 1) - logger := c.logger.WithValues("control-loop", "reconcile", "object", qualifiedName.String()) - ctx := klog.NewContext(context.TODO(), logger) + ctx, logger := logging.InjectLoggerValues(ctx, "cluster", key.String()) + startTime := time.Now() logger.V(3).Info("Starting reconcile") - defer c.metrics.Duration("federated-cluster-controller.latency", startTime) defer func() { + c.metrics.Duration(fmt.Sprintf("%s.latency", FederatedClusterControllerName), startTime) logger.WithValues("duration", time.Since(startTime), "status", status.String()).V(3).Info("Finished reconcile") }() - cluster, err := c.clusterLister.Get(qualifiedName.Name) + cluster, err := c.clusterInformer.Lister().Get(key.Name) if err != nil && apierrors.IsNotFound(err) { logger.V(3).Info("Observed cluster deletion") return worker.StatusAllOK @@ -201,19 +216,12 @@ func (c *FederatedClusterController) reconcile(qualifiedName common.QualifiedNam logger.Error(err, "Failed to get cluster from store") return worker.StatusError } + cluster = cluster.DeepCopy() if cluster.GetDeletionTimestamp() != nil { logger.V(2).Info("Handle terminating cluster") - err := handleTerminatingCluster( - ctx, - cluster, - c.client, - c.kubeClient, - c.eventRecorder, - c.fedSystemNamespace, - ) - if err != nil { + if err := c.handleTerminatingCluster(ctx, cluster); err != nil { if apierrors.IsConflict(err) { return worker.StatusConflict } @@ -223,9 +231,8 @@ func (c *FederatedClusterController) reconcile(qualifiedName common.QualifiedNam return worker.StatusAllOK } - if cluster, err = ensureFinalizer(ctx, cluster, c.client); err != nil { + if cluster, err = c.ensureFinalizer(ctx, cluster); err != nil { if apierrors.IsConflict(err) { - // Ignore IsConflict errors because we will retry on the next reconcile return worker.StatusConflict } logger.Error(err, "Failed to ensure cluster finalizer") @@ -236,17 +243,9 @@ func (c *FederatedClusterController) reconcile(qualifiedName common.QualifiedNam return worker.StatusAllOK } - // not joined yet and not failed, so we try to join + // Not joined yet and not failed, so we try to join logger.V(2).Info("Handle unjoined cluster") - cluster, newCondition, newJoinPerformed, err := handleNotJoinedCluster( - ctx, - cluster, - c.client, - c.kubeClient, - c.eventRecorder, - c.fedSystemNamespace, - c.clusterJoinTimeout, - ) + cluster, newCondition, newJoinPerformed, err := c.handleNotJoinedCluster(ctx, cluster) needsUpdate := false if newCondition != nil { @@ -256,8 +255,7 @@ func (c *FederatedClusterController) reconcile(qualifiedName common.QualifiedNam newCondition.Type = fedcorev1a1.ClusterJoined newCondition.LastProbeTime = currentTime - // The condition's last transition time is updated to the current time only if - // the status has changed. + // The condition's last transition time is updated to the current time only if the status has changed. oldCondition := getClusterCondition(&cluster.Status, fedcorev1a1.ClusterJoined) if oldCondition != nil && oldCondition.Status == newCondition.Status { newCondition.LastTransitionTime = oldCondition.LastTransitionTime @@ -274,8 +272,10 @@ func (c *FederatedClusterController) reconcile(qualifiedName common.QualifiedNam if needsUpdate { var updateErr error - if cluster, updateErr = c.client.CoreV1alpha1().FederatedClusters().UpdateStatus( - ctx, cluster, metav1.UpdateOptions{}, + if cluster, updateErr = c.fedClient.CoreV1alpha1().FederatedClusters().UpdateStatus( + ctx, + cluster, + metav1.UpdateOptions{}, ); updateErr != nil { logger.Error(updateErr, "Failed to update cluster status") err = updateErr @@ -289,7 +289,7 @@ func (c *FederatedClusterController) reconcile(qualifiedName common.QualifiedNam return worker.StatusError } - // trigger initial status collection if successfully joined + // Trigger initial status collection if successfully joined if joined, alreadyFailed := isClusterJoined(&cluster.Status); joined && !alreadyFailed { c.statusCollectWorker.EnqueueObject(cluster) } @@ -297,17 +297,26 @@ func (c *FederatedClusterController) reconcile(qualifiedName common.QualifiedNam return worker.StatusAllOK } -func (c *FederatedClusterController) collectClusterStatus(qualifiedName common.QualifiedName) (status worker.Result) { - logger := c.logger.WithValues("control-loop", "status-collect", "object", qualifiedName.String()) - ctx := klog.NewContext(context.TODO(), logger) +func (c *FederatedClusterController) collectClusterStatus( + ctx context.Context, + key common.QualifiedName, +) (status worker.Result) { + ctx, logger := logging.InjectLoggerValues(ctx, "cluster", key.String(), "worker", "status-collect") + startTime := time.Now() - logger.V(3).Info("Start status collection") + logger.V(3).Info("Starting to collect cluster status") defer func() { - logger.WithValues("duration", time.Since(startTime), "status", status.String()).V(3).Info("Finished status collection") + c.metrics.Duration(fmt.Sprintf("%s.status-collect.latency", FederatedClusterControllerName), startTime) + logger.WithValues( + "duration", + time.Since(startTime), + "status", + status.String(), + ).V(3).Info("Finished collecting cluster status") }() - cluster, err := c.clusterLister.Get(qualifiedName.Name) + cluster, err := c.clusterInformer.Lister().Get(key.Name) if err != nil && apierrors.IsNotFound(err) { logger.V(3).Info("Observed cluster deletion") return worker.StatusAllOK @@ -318,80 +327,77 @@ func (c *FederatedClusterController) collectClusterStatus(qualifiedName common.Q } cluster = cluster.DeepCopy() + if shouldCollectClusterStatus(cluster, c.clusterHealthCheckConfig.Period) { - if err := collectIndividualClusterStatus(ctx, cluster, c.client, c.federatedClient); err != nil { + retryAfter, err := c.collectIndividualClusterStatus(ctx, cluster) + if err != nil { logger.Error(err, "Failed to collect cluster status") return worker.StatusError } + if retryAfter > 0 { + return worker.Result{ + Success: true, + RequeueAfter: &retryAfter, + Backoff: false, + } + } } return worker.StatusAllOK } -func ensureFinalizer( +func (c *FederatedClusterController) ensureFinalizer( ctx context.Context, cluster *fedcorev1a1.FederatedCluster, - client fedclient.Interface, ) (*fedcorev1a1.FederatedCluster, error) { - updated, err := finalizerutils.AddFinalizers( - cluster, - sets.NewString(FinalizerFederatedClusterController), - ) + updated, err := finalizers.AddFinalizers(cluster, sets.NewString(FinalizerFederatedClusterController)) if err != nil { return nil, err } if updated { - return client.CoreV1alpha1(). - FederatedClusters(). - Update(ctx, cluster, metav1.UpdateOptions{}) + return c.fedClient.CoreV1alpha1().FederatedClusters().Update(ctx, cluster, metav1.UpdateOptions{}) } return cluster, nil } -func handleTerminatingCluster( +func (c *FederatedClusterController) handleTerminatingCluster( ctx context.Context, cluster *fedcorev1a1.FederatedCluster, - client fedclient.Interface, - kubeClient kubeclient.Interface, - eventRecorder record.EventRecorder, - fedSystemNamespace string, ) error { finalizers := sets.New(cluster.GetFinalizers()...) if !finalizers.Has(FinalizerFederatedClusterController) { return nil } - // we need to ensure that all other finalizers are removed before we start cleanup as other controllers may - // still rely on the credentials to do their own cleanup + // We need to ensure that all other finalizers are removed before we start cleanup as other controllers may still + // rely on the service account credentials to do their own cleanup. if len(finalizers) > 1 { - eventRecorder.Eventf( + c.eventRecorder.Eventf( cluster, corev1.EventTypeNormal, EventReasonHandleTerminatingClusterBlocked, - "waiting for other finalizers to be cleaned up", + "Waiting for other finalizers to be cleaned up", ) - return nil } // Only perform clean-up if we made any effectual changes to the cluster during join. if cluster.Status.JoinPerformed { - clusterSecret, clusterKubeClient, err := getClusterClient(ctx, kubeClient, fedSystemNamespace, cluster) + clusterSecret, clusterKubeClient, err := c.getClusterClient(ctx, cluster) if err != nil { - eventRecorder.Eventf( + c.eventRecorder.Eventf( cluster, corev1.EventTypeWarning, EventReasonHandleTerminatingClusterFailed, "Failed to get cluster client: %v", err, ) - return fmt.Errorf("failed to get cluster client: %w", err) } - // 1. cleanup service account token from cluster secret if required + // 1. cleanup service account token from cluster secret if required. if cluster.Spec.UseServiceAccountToken { var err error @@ -399,32 +405,32 @@ func handleTerminatingCluster( _, tokenKeyExists := clusterSecret.Data[common.ClusterServiceAccountTokenKey] _, caExists := clusterSecret.Data[common.ClusterServiceAccountCAKey] if tokenKeyExists || caExists { - delete(clusterSecret.Data, ServiceAccountTokenKey) - delete(clusterSecret.Data, ServiceAccountCAKey) - - _, err = kubeClient.CoreV1(). - Secrets(fedSystemNamespace). - Update(ctx, clusterSecret, metav1.UpdateOptions{}) - if err != nil { - return fmt.Errorf( - "failed to remove service account info from cluster secret: %w", - err, - ) + delete(clusterSecret.Data, common.ClusterServiceAccountTokenKey) + delete(clusterSecret.Data, common.ClusterServiceAccountCAKey) + + if _, err = c.kubeClient.CoreV1().Secrets(c.fedSystemNamespace).Update( + ctx, + clusterSecret, + metav1.UpdateOptions{}, + ); err != nil { + return fmt.Errorf("failed to remove service account info from cluster secret: %w", err) } } } // 2. connect to cluster and perform cleanup - err = clusterKubeClient.CoreV1(). - Namespaces(). - Delete(ctx, fedSystemNamespace, metav1.DeleteOptions{}) + err = clusterKubeClient.CoreV1().Namespaces().Delete( + ctx, + c.fedSystemNamespace, + metav1.DeleteOptions{}, + ) if err != nil && !apierrors.IsNotFound(err) { - eventRecorder.Eventf( + c.eventRecorder.Eventf( cluster, corev1.EventTypeWarning, EventReasonHandleTerminatingClusterFailed, - "delete system namespace from cluster %q failed: %v, will retry later", + "Delete system namespace from cluster %q failed: %v, will retry later", cluster.Name, err, ) @@ -432,22 +438,21 @@ func handleTerminatingCluster( } } - // we have already checked that we are the last finalizer so we can simply set finalizers to be empty + // We have already checked that we are the last finalizer so we can simply set finalizers to be empty. cluster.SetFinalizers(nil) - _, err := client.CoreV1alpha1(). - FederatedClusters(). - Update(ctx, cluster, metav1.UpdateOptions{}) - if err != nil { + if _, err := c.fedClient.CoreV1alpha1().FederatedClusters().Update( + ctx, + cluster, + metav1.UpdateOptions{}, + ); err != nil { return fmt.Errorf("failed to update cluster for finalizer removal: %w", err) } return nil } -func getClusterClient( +func (c *FederatedClusterController) getClusterClient( ctx context.Context, - hostClient kubeclient.Interface, - fedSystemNamespace string, cluster *fedcorev1a1.FederatedCluster, ) (*corev1.Secret, kubeclient.Interface, error) { restConfig := &rest.Config{Host: cluster.Spec.APIEndpoint} @@ -457,12 +462,16 @@ func getClusterClient( return nil, nil, fmt.Errorf("cluster secret is not set") } - clusterSecret, err := hostClient.CoreV1().Secrets(fedSystemNamespace).Get(ctx, clusterSecretName, metav1.GetOptions{}) + clusterSecret, err := c.kubeClient.CoreV1().Secrets(c.fedSystemNamespace).Get( + ctx, + clusterSecretName, + metav1.GetOptions{}, + ) if err != nil { return nil, nil, fmt.Errorf("failed to get cluster secret: %w", err) } - if err := util.PopulateAuthDetailsFromSecret(restConfig, cluster.Spec.Insecure, clusterSecret, false); err != nil { + if err := clusterutil.PopulateAuthDetailsFromSecret(restConfig, cluster.Spec.Insecure, clusterSecret, false); err != nil { return nil, nil, fmt.Errorf("cluster secret malformed: %w", err) } @@ -475,13 +484,13 @@ func getClusterClient( } func (c *FederatedClusterController) enqueueAllJoinedClusters() { - clusters, err := c.clusterLister.List(labels.Everything()) + clusters, err := c.clusterInformer.Lister().List(labels.Everything()) if err != nil { c.logger.Error(err, "Failed to enqueue all clusters") } for _, cluster := range clusters { - if util.IsClusterJoined(&cluster.Status) { + if clusterutil.IsClusterJoined(&cluster.Status) { c.statusCollectWorker.EnqueueObject(cluster) } } diff --git a/pkg/controllers/federatedcluster/util.go b/pkg/controllers/federatedcluster/util.go index 32ff9a95..e21ab7ae 100644 --- a/pkg/controllers/federatedcluster/util.go +++ b/pkg/controllers/federatedcluster/util.go @@ -1,4 +1,3 @@ -//go:build exclude /* Copyright 2023 The KubeAdmiral Authors. diff --git a/pkg/controllers/nsautoprop/controller.go b/pkg/controllers/nsautoprop/controller.go index 52f36efe..ed138372 100644 --- a/pkg/controllers/nsautoprop/controller.go +++ b/pkg/controllers/nsautoprop/controller.go @@ -28,7 +28,6 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/labels" - "k8s.io/apimachinery/pkg/runtime" utilruntime "k8s.io/apimachinery/pkg/util/runtime" corev1informers "k8s.io/client-go/informers/core/v1" kubeclient "k8s.io/client-go/kubernetes" @@ -133,22 +132,27 @@ func NewNamespaceAutoPropagationController( ) if _, err := c.clusterFedObjectInformer.Informer().AddEventHandlerWithResyncPeriod( - eventhandlers.NewTriggerOnAllChanges(func(o runtime.Object) { - fedObj := o.(*fedcorev1a1.ClusterFederatedObject) - logger := c.logger.WithValues("cluster-federated-object", common.NewQualifiedName(fedObj)) - - srcMeta, err := fedObj.Spec.GetTemplateAsUnstructured() - if err != nil { - logger.Error(err, "Failed to get source object's metadata from ClusterFederatedObject") - return - } - - if srcMeta.GetKind() != common.NamespaceKind || !c.shouldBeAutoPropagated(srcMeta) { - return - } - - c.worker.Enqueue(common.QualifiedName{Name: fedObj.GetName()}) - }), util.NoResyncPeriod); err != nil { + eventhandlers.NewTriggerOnAllChanges( + func(obj *fedcorev1a1.ClusterFederatedObject) *fedcorev1a1.ClusterFederatedObject { + return obj + }, + func(obj *fedcorev1a1.ClusterFederatedObject) { + srcMeta, err := obj.Spec.GetTemplateAsUnstructured() + if err != nil { + logger.Error( + err, + "Failed to get source object's metadata from ClusterFederatedObject", + "object", + common.NewQualifiedName(obj), + ) + return + } + if srcMeta.GetKind() != common.NamespaceKind || !c.shouldBeAutoPropagated(srcMeta) { + return + } + c.worker.Enqueue(common.QualifiedName{Name: obj.GetName()}) + }, + ), util.NoResyncPeriod); err != nil { return nil, err } @@ -240,7 +244,9 @@ func (c *Controller) reconcile(ctx context.Context, qualifiedName common.Qualifi } if updated { - _, err = c.fedClient.CoreV1alpha1().ClusterFederatedObjects().Update(ctx, fedNamespace, metav1.UpdateOptions{}) + _, err = c.fedClient.CoreV1alpha1(). + ClusterFederatedObjects(). + Update(ctx, fedNamespace, metav1.UpdateOptions{}) if err != nil { if apierrors.IsConflict(err) { return worker.StatusConflict @@ -355,7 +361,10 @@ func (c *Controller) shouldBeAutoPropagated(fedNamespace *unstructured.Unstructu return true } -func (c *Controller) ensureAnnotation(fedNamespace *fedcorev1a1.ClusterFederatedObject, key, value string) (bool, error) { +func (c *Controller) ensureAnnotation( + fedNamespace *fedcorev1a1.ClusterFederatedObject, + key, value string, +) (bool, error) { needsUpdate, err := annotationutil.AddAnnotation(fedNamespace, key, value) if err != nil { return false, fmt.Errorf( diff --git a/pkg/controllers/policyrc/controller.go b/pkg/controllers/policyrc/controller.go index dfd1ceca..869ef57e 100644 --- a/pkg/controllers/policyrc/controller.go +++ b/pkg/controllers/policyrc/controller.go @@ -22,7 +22,6 @@ import ( "time" apierrors "k8s.io/apimachinery/pkg/api/errors" - pkgruntime "k8s.io/apimachinery/pkg/runtime" "k8s.io/client-go/rest" "k8s.io/client-go/tools/cache" "k8s.io/klog/v2" @@ -33,8 +32,8 @@ import ( fedcorev1a1informers "github.com/kubewharf/kubeadmiral/pkg/client/informers/externalversions/core/v1alpha1" "github.com/kubewharf/kubeadmiral/pkg/controllers/common" "github.com/kubewharf/kubeadmiral/pkg/controllers/override" - "github.com/kubewharf/kubeadmiral/pkg/controllers/util" "github.com/kubewharf/kubeadmiral/pkg/stats" + "github.com/kubewharf/kubeadmiral/pkg/util/eventhandlers" "github.com/kubewharf/kubeadmiral/pkg/util/fedobjectadapters" "github.com/kubewharf/kubeadmiral/pkg/util/logging" "github.com/kubewharf/kubeadmiral/pkg/util/worker" @@ -71,7 +70,6 @@ func NewPolicyRCController( logger klog.Logger, workerCount int, ) (*Controller, error) { - c := &Controller{ client: generic.NewForConfigOrDie(restConfig), fedObjectInformer: fedInformerFactory.Core().V1alpha1().FederatedObjects(), @@ -84,15 +82,17 @@ func NewPolicyRCController( logger: logger.WithValues("controller", ControllerName), } - if _, err := c.fedObjectInformer.Informer().AddEventHandler(util.NewTriggerOnAllChanges(func(o pkgruntime.Object) { - c.countWorker.Enqueue(common.NewQualifiedName(o)) - })); err != nil { + if _, err := c.fedObjectInformer.Informer().AddEventHandler(eventhandlers.NewTriggerOnAllChanges( + common.NewQualifiedName, + c.countWorker.Enqueue, + )); err != nil { return nil, err } - if _, err := c.clusterFedObjectInformer.Informer().AddEventHandler(util.NewTriggerOnAllChanges(func(o pkgruntime.Object) { - c.countWorker.Enqueue(common.NewQualifiedName(o)) - })); err != nil { + if _, err := c.clusterFedObjectInformer.Informer().AddEventHandler(eventhandlers.NewTriggerOnAllChanges( + common.NewQualifiedName, + c.countWorker.Enqueue, + )); err != nil { return nil, err } @@ -141,27 +141,31 @@ func NewPolicyRCController( metrics, ) - persistPpWorkerTrigger := func(o pkgruntime.Object) { - c.persistPpWorker.Enqueue(common.NewQualifiedName(o)) - } - - if _, err := c.propagationPolicyInformer.Informer().AddEventHandler(util.NewTriggerOnAllChanges(persistPpWorkerTrigger)); err != nil { + if _, err := c.propagationPolicyInformer.Informer().AddEventHandler(eventhandlers.NewTriggerOnAllChanges( + common.NewQualifiedName, + c.persistPpWorker.Enqueue, + )); err != nil { return nil, err } - if _, err := c.clusterPropagationPolicyInformer.Informer().AddEventHandler(util.NewTriggerOnAllChanges(persistPpWorkerTrigger)); err != nil { + if _, err := c.clusterPropagationPolicyInformer.Informer().AddEventHandler(eventhandlers.NewTriggerOnAllChanges( + common.NewQualifiedName, + c.persistPpWorker.Enqueue, + )); err != nil { return nil, err } - persistOpWorkerTrigger := func(o pkgruntime.Object) { - c.persistOpWorker.Enqueue(common.NewQualifiedName(o)) - } - - if _, err := c.overridePolicyInformer.Informer().AddEventHandler(util.NewTriggerOnAllChanges(persistOpWorkerTrigger)); err != nil { + if _, err := c.overridePolicyInformer.Informer().AddEventHandler(eventhandlers.NewTriggerOnAllChanges( + common.NewQualifiedName, + c.persistOpWorker.Enqueue, + )); err != nil { return nil, err } - if _, err := c.clusterOverridePolicyInformer.Informer().AddEventHandler(util.NewTriggerOnAllChanges(persistOpWorkerTrigger)); err != nil { + if _, err := c.clusterOverridePolicyInformer.Informer().AddEventHandler(eventhandlers.NewTriggerOnAllChanges( + common.NewQualifiedName, + c.persistPpWorker.Enqueue, + )); err != nil { return nil, err } @@ -223,7 +227,12 @@ func (c *Controller) reconcileCount(ctx context.Context, qualifiedName common.Qu Info("Policyrc count controller finished reconciling") }() - fedObj, err := fedobjectadapters.GetFromLister(c.fedObjectInformer.Lister(), c.clusterFedObjectInformer.Lister(), qualifiedName.Namespace, qualifiedName.Name) + fedObj, err := fedobjectadapters.GetFromLister( + c.fedObjectInformer.Lister(), + c.clusterFedObjectInformer.Lister(), + qualifiedName.Namespace, + qualifiedName.Name, + ) if err != nil && !apierrors.IsNotFound(err) { logger.Error(err, "Failed to get federated object") return worker.StatusError @@ -270,7 +279,9 @@ func (c *Controller) reconcilePersist( startTime := time.Now() defer func() { c.metrics.Duration(fmt.Sprintf("policyrc-persist-%s-controller.latency", metricName), startTime) - logger.V(3).WithValues("duration", time.Since(startTime)).Info("Policyrc persist controller finished reconciling") + logger.V(3). + WithValues("duration", time.Since(startTime)). + Info("Policyrc persist controller finished reconciling") }() store := clusterScopeStore diff --git a/pkg/controllers/status/controller.go b/pkg/controllers/status/controller.go index 688b98f1..15959145 100644 --- a/pkg/controllers/status/controller.go +++ b/pkg/controllers/status/controller.go @@ -38,7 +38,6 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/labels" - pkgruntime "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/client-go/kubernetes" "k8s.io/client-go/tools/cache" @@ -153,9 +152,12 @@ func NewStatusController( // Build queue for triggering cluster reconciliations. s.clusterQueue = workqueue.NewNamedDelayingQueue("status-controller-cluster-queue") - fedObjectHandler := util.NewTriggerOnAllChanges(func(o pkgruntime.Object) { - s.enqueueEnableCollectedStatusObject(common.NewQualifiedName(o), 0) - }) + fedObjectHandler := eventhandlers.NewTriggerOnAllChanges( + common.NewQualifiedName, + func(key common.QualifiedName) { + s.enqueueEnableCollectedStatusObject(key, 0) + }, + ) if _, err := s.fedObjectInformer.Informer().AddEventHandler(fedObjectHandler); err != nil { return nil, err @@ -165,15 +167,17 @@ func NewStatusController( return nil, err } - if _, err := s.collectedStatusInformer.Informer().AddEventHandler(util.NewTriggerOnAllChanges(func(o pkgruntime.Object) { - s.worker.Enqueue(common.NewQualifiedName(o)) - })); err != nil { + if _, err := s.collectedStatusInformer.Informer().AddEventHandler(eventhandlers.NewTriggerOnAllChanges( + common.NewQualifiedName, + s.worker.Enqueue, + )); err != nil { return nil, err } - if _, err := s.clusterCollectedStatusInformer.Informer().AddEventHandler(util.NewTriggerOnAllChanges(func(o pkgruntime.Object) { - s.worker.Enqueue(common.NewQualifiedName(o)) - })); err != nil { + if _, err := s.clusterCollectedStatusInformer.Informer().AddEventHandler(eventhandlers.NewTriggerOnAllChanges( + common.NewQualifiedName, + s.worker.Enqueue, + )); err != nil { return nil, err } @@ -189,20 +193,23 @@ func NewStatusController( return nil } - return eventhandlers.NewTriggerOnAllChanges(func(o pkgruntime.Object) { - obj := o.(*unstructured.Unstructured) - - ftc, exists := s.ftcManager.GetResourceFTC(obj.GroupVersionKind()) - if !exists { - return - } + return eventhandlers.NewTriggerOnAllChanges( + func(uns *unstructured.Unstructured) *unstructured.Unstructured { + return uns + }, + func(uns *unstructured.Unstructured) { + ftc, exists := s.ftcManager.GetResourceFTC(uns.GroupVersionKind()) + if !exists { + return + } - federatedName := common.QualifiedName{ - Namespace: obj.GetNamespace(), - Name: naming.GenerateFederatedObjectName(obj.GetName(), ftc.GetName()), - } - s.worker.EnqueueWithDelay(federatedName, s.memberObjectEnqueueDelay) - }) + federatedName := common.QualifiedName{ + Namespace: uns.GetNamespace(), + Name: naming.GenerateFederatedObjectName(uns.GetName(), ftc.GetName()), + } + s.worker.EnqueueWithDelay(federatedName, s.memberObjectEnqueueDelay) + }, + ) }, }); err != nil { return nil, fmt.Errorf("failed to add event handler generator: %w", err) @@ -298,7 +305,10 @@ func (s *StatusController) reconcileOnClusterChange() { } } -func (s *StatusController) reconcile(ctx context.Context, qualifiedName common.QualifiedName) (reconcileStatus worker.Result) { +func (s *StatusController) reconcile( + ctx context.Context, + qualifiedName common.QualifiedName, +) (reconcileStatus worker.Result) { keyedLogger := s.logger.WithValues("federated-name", qualifiedName.String()) ctx = klog.NewContext(ctx, keyedLogger) @@ -327,7 +337,13 @@ func (s *StatusController) reconcile(ctx context.Context, qualifiedName common.Q if fedObject == nil || fedObject.GetDeletionTimestamp() != nil { keyedLogger.V(1).Info("No federated type found, deleting status object") - err = collectedstatusadapters.Delete(ctx, s.fedClient.CoreV1alpha1(), qualifiedName.Namespace, qualifiedName.Name, metav1.DeleteOptions{}) + err = collectedstatusadapters.Delete( + ctx, + s.fedClient.CoreV1alpha1(), + qualifiedName.Namespace, + qualifiedName.Name, + metav1.DeleteOptions{}, + ) if err != nil && !apierrors.IsNotFound(err) { return worker.StatusError } @@ -379,7 +395,13 @@ func (s *StatusController) reconcile(ctx context.Context, qualifiedName common.Q var rsDigestsAnnotation string if targetIsDeployment { - latestReplicasetDigests, err := s.latestReplicasetDigests(ctx, clusterNames, templateQualifiedName, templateGVK, typeConfig) + latestReplicasetDigests, err := s.latestReplicasetDigests( + ctx, + clusterNames, + templateQualifiedName, + templateGVK, + typeConfig, + ) if err != nil { keyedLogger.Error(err, "Failed to get latest replicaset digests") } else { @@ -426,7 +448,12 @@ func (s *StatusController) reconcile(ctx context.Context, qualifiedName common.Q if existingStatus == nil { collectedStatus.GetLastUpdateTime().Time = time.Now() - _, err = collectedstatusadapters.Create(ctx, s.fedClient.CoreV1alpha1(), collectedStatus, metav1.CreateOptions{}) + _, err = collectedstatusadapters.Create( + ctx, + s.fedClient.CoreV1alpha1(), + collectedStatus, + metav1.CreateOptions{}, + ) if err != nil { if apierrors.IsAlreadyExists(err) { return worker.StatusConflict @@ -594,8 +621,14 @@ func (s *StatusController) clusterStatuses( sort.Slice(failedFields, func(i, j int) bool { return failedFields[i] < failedFields[j] }) - resourceClusterStatus.Error = fmt.Sprintf("Failed to get those fields: %s", strings.Join(failedFields, ", ")) - errList = append(errList, fmt.Sprintf("cluster-name: %s, error-info: %s", clusterName, resourceClusterStatus.Error)) + resourceClusterStatus.Error = fmt.Sprintf( + "Failed to get those fields: %s", + strings.Join(failedFields, ", "), + ) + errList = append( + errList, + fmt.Sprintf("cluster-name: %s, error-info: %s", clusterName, resourceClusterStatus.Error), + ) } clusterStatus = append(clusterStatus, resourceClusterStatus) } @@ -698,7 +731,9 @@ func (s *StatusController) realUpdatedReplicas( keyedLogger.WithValues("cluster-name", clusterName).Error(err, "Failed to get latestreplicaset digest") continue } - keyedLogger.WithValues("cluster-name", clusterName, "replicas-digest", digest).V(4).Info("Got latestreplicaset digest") + keyedLogger.WithValues("cluster-name", clusterName, "replicas-digest", digest). + V(4). + Info("Got latestreplicaset digest") if digest.CurrentRevision != revision { continue } diff --git a/pkg/util/eventhandlers/eventhandler.go b/pkg/util/eventhandlers/eventhandler.go index a5dbd3ae..1e2cdf5a 100644 --- a/pkg/util/eventhandlers/eventhandler.go +++ b/pkg/util/eventhandlers/eventhandler.go @@ -19,13 +19,15 @@ package eventhandlers import ( "reflect" - "k8s.io/apimachinery/pkg/runtime" "k8s.io/client-go/tools/cache" ) -// NewTriggerOnAllChanges returns a cache.ResourceEventHandlerFuncs that will call the given function on all object -// changes. The given function will also be called on receiving cache.DeletedFinalStateUnknown deletion events. -func NewTriggerOnAllChanges(triggerFunc func(runtime.Object)) *cache.ResourceEventHandlerFuncs { +// NewTriggerOnAllChanges returns a cache.ResourceEventHandlerFuncs that will call the given triggerFunc on all object +// changes. The object is first transformed with the given keyFunc. triggerFunc is also called for add or delete events. +func NewTriggerOnAllChanges[Source any, Key any]( + keyFunc func(Source) Key, + triggerFunc func(Key), +) *cache.ResourceEventHandlerFuncs { return &cache.ResourceEventHandlerFuncs{ DeleteFunc: func(old interface{}) { if deleted, ok := old.(cache.DeletedFinalStateUnknown); ok { @@ -34,17 +36,51 @@ func NewTriggerOnAllChanges(triggerFunc func(runtime.Object)) *cache.ResourceEve return } } - oldObj := old.(runtime.Object) - triggerFunc(oldObj) + oldSource := old.(Source) + triggerFunc(keyFunc(oldSource)) }, AddFunc: func(cur interface{}) { - curObj := cur.(runtime.Object) - triggerFunc(curObj) + curObj := cur.(Source) + triggerFunc(keyFunc(curObj)) }, UpdateFunc: func(old, cur interface{}) { if !reflect.DeepEqual(old, cur) { - curObj := cur.(runtime.Object) - triggerFunc(curObj) + curObj := cur.(Source) + triggerFunc(keyFunc(curObj)) + } + }, + } +} + +// NewTriggerOnChanges returns a cache.ResourceEventHandlerFuncs that will call the given triggerFunc on object changes +// that passes the given predicate. The object is first transformed with the given keyFunc. triggerFunc is also called +// for add and delete events. +func NewTriggerOnChanges[Source any, Key any]( + predicate func(old, cur Source) bool, + keyFunc func(Source) Key, + triggerFunc func(Key), +) *cache.ResourceEventHandlerFuncs { + return &cache.ResourceEventHandlerFuncs{ + DeleteFunc: func(old interface{}) { + if deleted, ok := old.(cache.DeletedFinalStateUnknown); ok { + // This object might be stale but ok for our current usage. + old = deleted.Obj + if old == nil { + return + } + } + oldObj := old.(Source) + triggerFunc(keyFunc(oldObj)) + }, + AddFunc: func(cur interface{}) { + curObj := cur.(Source) + triggerFunc(keyFunc(curObj)) + }, + UpdateFunc: func(old, cur interface{}) { + oldObj := old.(Source) + curObj := cur.(Source) + if predicate(oldObj, curObj) { + triggerFunc(keyFunc(curObj)) } }, } From f87a860550a037d5c27598ffa2d551b337ae9eb0 Mon Sep 17 00:00:00 2001 From: "hawjia.lim" Date: Tue, 25 Jul 2023 11:54:04 +0800 Subject: [PATCH 092/173] refactor(cluster-controller): bootstrap controller-manager with cluster controller --- .../app/controllermanager.go | 3 +-- cmd/controller-manager/app/core.go | 25 +++++++++++++++++++ 2 files changed, 26 insertions(+), 2 deletions(-) diff --git a/cmd/controller-manager/app/controllermanager.go b/cmd/controller-manager/app/controllermanager.go index 2300a2b1..889dc68c 100644 --- a/cmd/controller-manager/app/controllermanager.go +++ b/cmd/controller-manager/app/controllermanager.go @@ -37,7 +37,6 @@ import ( const ( FederatedClusterControllerName = "cluster" FederateControllerName = "federate" - MonitorControllerName = "monitor" FollowerControllerName = "follower" PolicyRCControllerName = "policyrc" OverrideControllerName = "overridepolicy" @@ -53,7 +52,7 @@ var knownControllers = map[string]controllermanager.StartControllerFunc{ StatusControllerName: startStatusController, } -var controllersDisabledByDefault = sets.New(MonitorControllerName) +var controllersDisabledByDefault = sets.New[string]() // Run starts the controller manager according to the given options. func Run(ctx context.Context, opts *options.Options) { diff --git a/cmd/controller-manager/app/core.go b/cmd/controller-manager/app/core.go index ccc98b00..a0ee4513 100644 --- a/cmd/controller-manager/app/core.go +++ b/cmd/controller-manager/app/core.go @@ -29,6 +29,7 @@ import ( "github.com/kubewharf/kubeadmiral/pkg/controllers/override" "github.com/kubewharf/kubeadmiral/pkg/controllers/policyrc" "github.com/kubewharf/kubeadmiral/pkg/controllers/status" + "github.com/kubewharf/kubeadmiral/pkg/controllers/federatedcluster" ) func startFederateController( @@ -152,3 +153,27 @@ func startStatusController( return statusController, nil } + +func startFederatedClusterController( + ctx context.Context, + controllerCtx *controllercontext.Context, +) (controllermanager.Controller, error) { + federatedClusterController, err := federatedcluster.NewFederatedClusterController( + controllerCtx.KubeClientset, + controllerCtx.FedClientset, + controllerCtx.FedInformerFactory.Core().V1alpha1().FederatedClusters(), + controllerCtx.FederatedInformerManager, + controllerCtx.Metrics, + klog.Background(), + controllerCtx.ComponentConfig.ClusterJoinTimeout, + controllerCtx.WorkerCount, + controllerCtx.FedSystemNamespace, + ) + if err != nil { + return nil, fmt.Errorf("error creating federate controller: %w", err) + } + + go federatedClusterController.Run(ctx) + + return federatedClusterController, nil +} From a54768f602c9b0ed06282a7536c993947ca057c6 Mon Sep 17 00:00:00 2001 From: "hawjia.lim" Date: Sat, 22 Jul 2023 19:50:05 +0800 Subject: [PATCH 093/173] refactor(scheduler): adopt unified types --- .../app/controllermanager.go | 3 + cmd/controller-manager/app/core.go | 30 + .../v1alpha1/extensions_federatedobject.go | 79 ++- pkg/controllers/common/constants.go | 3 + pkg/controllers/federate/util.go | 4 +- pkg/controllers/override/util.go | 2 +- pkg/controllers/scheduler/constants.go | 3 - .../scheduler/core/generic_scheduler.go | 8 +- .../extensions/webhook/v1alpha1/adapter.go | 5 +- .../extensions/webhook/v1alpha1/plugin.go | 1 - .../scheduler/framework/interface.go | 1 - .../plugins/apiresources/apiresources.go | 1 - .../clusteraffinity/cluster_affinity.go | 3 +- .../clusterresources/balanced_allocation.go | 1 - .../framework/plugins/clusterresources/fit.go | 1 - .../clusterresources/least_allocated.go | 1 - .../clusterresources/most_allocated.go | 1 - .../plugins/maxcluster/max_cluster.go | 1 - .../framework/plugins/placement/filter.go | 1 - .../scheduler/framework/plugins/rsp/rsp.go | 3 +- .../tainttoleration/taint_toleration.go | 1 - .../scheduler/framework/runtime/framework.go | 1 - .../scheduler/framework/runtime/registry.go | 1 - pkg/controllers/scheduler/framework/types.go | 1 - pkg/controllers/scheduler/framework/util.go | 1 - pkg/controllers/scheduler/handle.go | 1 - .../{util => scheduler}/planner/planner.go | 1 - .../planner/planner_test.go | 0 pkg/controllers/scheduler/profile.go | 1 - pkg/controllers/scheduler/scheduler.go | 574 +++++++++++------- .../scheduler/schedulingtriggers.go | 88 +-- pkg/controllers/scheduler/schedulingunit.go | 117 ++-- pkg/controllers/scheduler/util.go | 169 +----- pkg/controllers/scheduler/util_test.go | 2 +- pkg/controllers/scheduler/webhook.go | 1 - .../util/clusterselector/util.go | 0 pkg/util/eventhandlers/eventhandler.go | 35 ++ pkg/util/fedobjectadapters/adapters.go | 5 +- .../util/unstructured/unstructured.go | 3 +- 39 files changed, 612 insertions(+), 542 deletions(-) rename pkg/controllers/{util => scheduler}/planner/planner.go (99%) rename pkg/controllers/{util => scheduler}/planner/planner_test.go (100%) rename pkg/{controllers => }/util/clusterselector/util.go (100%) rename pkg/{controllers => }/util/unstructured/unstructured.go (98%) diff --git a/cmd/controller-manager/app/controllermanager.go b/cmd/controller-manager/app/controllermanager.go index 889dc68c..a23c4fbc 100644 --- a/cmd/controller-manager/app/controllermanager.go +++ b/cmd/controller-manager/app/controllermanager.go @@ -42,6 +42,7 @@ const ( OverrideControllerName = "overridepolicy" NamespaceAutoPropagationControllerName = "nsautoprop" StatusControllerName = "status" + SchedulerName = "scheduler" ) var knownControllers = map[string]controllermanager.StartControllerFunc{ @@ -50,6 +51,8 @@ var knownControllers = map[string]controllermanager.StartControllerFunc{ OverrideControllerName: startOverridePolicyController, NamespaceAutoPropagationControllerName: startNamespaceAutoPropagationController, StatusControllerName: startStatusController, + FederatedClusterControllerName: startFederatedClusterController, + SchedulerName: startScheduler, } var controllersDisabledByDefault = sets.New[string]() diff --git a/cmd/controller-manager/app/core.go b/cmd/controller-manager/app/core.go index a0ee4513..442bc45c 100644 --- a/cmd/controller-manager/app/core.go +++ b/cmd/controller-manager/app/core.go @@ -30,6 +30,7 @@ import ( "github.com/kubewharf/kubeadmiral/pkg/controllers/policyrc" "github.com/kubewharf/kubeadmiral/pkg/controllers/status" "github.com/kubewharf/kubeadmiral/pkg/controllers/federatedcluster" + "github.com/kubewharf/kubeadmiral/pkg/controllers/scheduler" ) func startFederateController( @@ -177,3 +178,32 @@ func startFederatedClusterController( return federatedClusterController, nil } + +func startScheduler( + ctx context.Context, + controllerCtx *controllercontext.Context, +) (controllermanager.Controller, error) { + scheduler, err := scheduler.NewScheduler( + controllerCtx.KubeClientset, + controllerCtx.FedClientset, + controllerCtx.DynamicClientset, + controllerCtx.FedInformerFactory.Core().V1alpha1().FederatedObjects(), + controllerCtx.FedInformerFactory.Core().V1alpha1().ClusterFederatedObjects(), + controllerCtx.FedInformerFactory.Core().V1alpha1().PropagationPolicies(), + controllerCtx.FedInformerFactory.Core().V1alpha1().ClusterPropagationPolicies(), + controllerCtx.FedInformerFactory.Core().V1alpha1().FederatedClusters(), + controllerCtx.FedInformerFactory.Core().V1alpha1().SchedulingProfiles(), + controllerCtx.InformerManager, + controllerCtx.FedInformerFactory.Core().V1alpha1().SchedulerPluginWebhookConfigurations(), + controllerCtx.Metrics, + klog.Background(), + controllerCtx.WorkerCount, + ) + if err != nil { + return nil, fmt.Errorf("error creating scheduler: %w", err) + } + + go scheduler.Run(ctx) + + return scheduler, nil +} diff --git a/pkg/apis/core/v1alpha1/extensions_federatedobject.go b/pkg/apis/core/v1alpha1/extensions_federatedobject.go index 92d4df7a..f7ec115a 100644 --- a/pkg/apis/core/v1alpha1/extensions_federatedobject.go +++ b/pkg/apis/core/v1alpha1/extensions_federatedobject.go @@ -81,8 +81,9 @@ func (spec *GenericFederatedObjectSpec) GetControllerPlacement(controller string return nil } -// SetControllerPlacement sets the ClusterPlacements for a given controller. If clusterNames is nil or empty, the previous -// placement for the given controller will be deleted. Returns a bool indicating if the GenericFederatedObject has changed. +// SetControllerPlacement sets the cluster placements for a given controller. If clusterNames is nil or empty, the +// previous placement for the given controller will be deleted. Returns a bool indicating if the GenericFederatedObject +// has changed. func (spec *GenericFederatedObjectSpec) SetControllerPlacement(controller string, clusterNames []string) bool { if len(clusterNames) == 0 { return spec.DeleteControllerPlacement(controller) @@ -140,6 +141,79 @@ func (spec *GenericFederatedObjectSpec) DeleteControllerPlacement(controller str return true } +// Overrides extensions + +func (spec *GenericFederatedObjectSpec) GetControllerOverrides(controller string) []ClusterReferenceWithPatches { + for _, overrides := range spec.Overrides { + if overrides.Controller == controller { + return overrides.Override + } + } + return nil +} + +// SetControllerOverrides sets the cluster overrides for a given controller. If clusterNames is nil or empty, the +// previous overrides for the given controller will be deleted. Returns a bool indicating if the GenericFederatedObject +// has changed. +func (spec *GenericFederatedObjectSpec) SetControllerOverrides( + controller string, + clusterOverrides []ClusterReferenceWithPatches, +) bool { + if len(clusterOverrides) == 0 { + return spec.DeleteControllerOverrides(controller) + } + + // sort the clusters by name for readability and to avoid unnecessary updates + sort.Slice(clusterOverrides, func(i, j int) bool { + return clusterOverrides[i].Cluster < clusterOverrides[j].Cluster + }) + + oldOverridesWithControllerIdx := -1 + for i := range spec.Overrides { + if spec.Overrides[i].Controller == controller { + oldOverridesWithControllerIdx = i + break + } + } + + newOverridesWithController := OverrideWithController{ + Controller: controller, + Override: clusterOverrides, + } + if oldOverridesWithControllerIdx == -1 { + spec.Overrides = append(spec.Overrides, newOverridesWithController) + return true + } + if !reflect.DeepEqual(newOverridesWithController, spec.Overrides[oldOverridesWithControllerIdx]) { + spec.Overrides[oldOverridesWithControllerIdx] = newOverridesWithController + return true + } + + return false +} + +// DeleteControllerOverrides deletes a controller's overrides, returning a bool to indicate if the +// GenericFederatedObject has changed. +func (spec *GenericFederatedObjectSpec) DeleteControllerOverrides(controller string) bool { + oldOverridesIdx := -1 + for i := range spec.Overrides { + if spec.Overrides[i].Controller == controller { + oldOverridesIdx = i + break + } + } + + if oldOverridesIdx == -1 { + return false + } + + spec.Overrides = append(spec.Overrides[:oldOverridesIdx], spec.Overrides[(oldOverridesIdx+1):]...) + return true +} + +// Template extensions + +// GetTemplateAsUnstructured returns the FederatedObject's template unmarshalled into an *unstructured.Unstructured. func (spec *GenericFederatedObjectSpec) GetTemplateAsUnstructured() (*unstructured.Unstructured, error) { template := &unstructured.Unstructured{} if err := template.UnmarshalJSON(spec.Template.Raw); err != nil { @@ -148,6 +222,7 @@ func (spec *GenericFederatedObjectSpec) GetTemplateAsUnstructured() (*unstructur return template, nil } +// GetTemplateGVK returns the GVK of the FederatedObject's source object by parsing the FederatedObject's template. func (spec *GenericFederatedObjectSpec) GetTemplateGVK() (schema.GroupVersionKind, error) { type partialTypeMetadata struct { metav1.TypeMeta `json:",inline"` diff --git a/pkg/controllers/common/constants.go b/pkg/controllers/common/constants.go index 810817e5..41d9c783 100644 --- a/pkg/controllers/common/constants.go +++ b/pkg/controllers/common/constants.go @@ -140,6 +140,9 @@ const ( // TemplateGeneratorMergePatchAnnotation indicates the merge patch document capable of converting // the source object to the template object. TemplateGeneratorMergePatchAnnotation = FederateControllerPrefix + "template-generator-merge-patch" + + PropagationPolicyNameLabel = DefaultPrefix + "propagation-policy-name" + ClusterPropagationPolicyNameLabel = DefaultPrefix + "cluster-propagation-policy-name" ) // PropagatedAnnotationKeys and PropagatedLabelKeys are used to store the keys of annotations and labels that are present diff --git a/pkg/controllers/federate/util.go b/pkg/controllers/federate/util.go index 96802afd..104831de 100644 --- a/pkg/controllers/federate/util.go +++ b/pkg/controllers/federate/util.go @@ -270,8 +270,8 @@ var ( ) federatedLabelSet = sets.New[string]( - scheduler.PropagationPolicyNameLabel, - scheduler.ClusterPropagationPolicyNameLabel, + common.PropagationPolicyNameLabel, + common.ClusterPropagationPolicyNameLabel, override.OverridePolicyNameLabel, override.ClusterOverridePolicyNameLabel, ) diff --git a/pkg/controllers/override/util.go b/pkg/controllers/override/util.go index cea0b9f7..3652f153 100644 --- a/pkg/controllers/override/util.go +++ b/pkg/controllers/override/util.go @@ -27,7 +27,7 @@ import ( fedcorev1a1 "github.com/kubewharf/kubeadmiral/pkg/apis/core/v1alpha1" fedcorev1a1listers "github.com/kubewharf/kubeadmiral/pkg/client/listers/core/v1alpha1" "github.com/kubewharf/kubeadmiral/pkg/controllers/util" - "github.com/kubewharf/kubeadmiral/pkg/controllers/util/clusterselector" + "github.com/kubewharf/kubeadmiral/pkg/util/clusterselector" ) /* diff --git a/pkg/controllers/scheduler/constants.go b/pkg/controllers/scheduler/constants.go index 6bd4a6ce..460ede03 100644 --- a/pkg/controllers/scheduler/constants.go +++ b/pkg/controllers/scheduler/constants.go @@ -25,9 +25,6 @@ const ( GlobalSchedulerName = "global-scheduler" PrefixedGlobalSchedulerName = common.DefaultPrefix + "global-scheduler" - PropagationPolicyNameLabel = common.DefaultPrefix + "propagation-policy-name" - ClusterPropagationPolicyNameLabel = common.DefaultPrefix + "cluster-propagation-policy-name" - // Marks that the annotated object must follow the placement of the followed object. // Value is in the form G/V/R/ns/name, e.g. `types.kubeadmiral.io/v1alpha1/federateddeployments/default/fed-dp-xxx`. FollowsObjectAnnotation = common.DefaultPrefix + "follows-object" diff --git a/pkg/controllers/scheduler/core/generic_scheduler.go b/pkg/controllers/scheduler/core/generic_scheduler.go index dfd04a0b..55af8d8a 100644 --- a/pkg/controllers/scheduler/core/generic_scheduler.go +++ b/pkg/controllers/scheduler/core/generic_scheduler.go @@ -1,4 +1,3 @@ -//go:build exclude /* Copyright 2014 The Kubernetes Authors. @@ -28,6 +27,7 @@ import ( "strings" "github.com/davecgh/go-spew/spew" + "k8s.io/apimachinery/pkg/util/sets" "k8s.io/klog/v2" "k8s.io/utils/pointer" @@ -53,10 +53,10 @@ type ScheduleResult struct { SuggestedClusters map[string]*int64 } -func (result ScheduleResult) ClusterSet() map[string]struct{} { - clusterSet := make(map[string]struct{}, len(result.SuggestedClusters)) +func (result ScheduleResult) ClusterSet() sets.Set[string] { + clusterSet := sets.New[string]() for cluster := range result.SuggestedClusters { - clusterSet[cluster] = struct{}{} + clusterSet.Insert(cluster) } return clusterSet } diff --git a/pkg/controllers/scheduler/extensions/webhook/v1alpha1/adapter.go b/pkg/controllers/scheduler/extensions/webhook/v1alpha1/adapter.go index 9b964747..f19e7b37 100644 --- a/pkg/controllers/scheduler/extensions/webhook/v1alpha1/adapter.go +++ b/pkg/controllers/scheduler/extensions/webhook/v1alpha1/adapter.go @@ -1,4 +1,3 @@ -//go:build exclude /* Copyright 2023 The KubeAdmiral Authors. @@ -39,7 +38,7 @@ func ConvertSchedulingUnit(su *framework.SchedulingUnit) *schedwebhookv1a1.Sched } } - placements := []fedcorev1a1.ClusterReference{} + placements := []fedcorev1a1.DesiredPlacement{} for cluster := range su.ClusterNames { var weight *int64 if w, ok := su.Weights[cluster]; ok { @@ -51,7 +50,7 @@ func ConvertSchedulingUnit(su *framework.SchedulingUnit) *schedwebhookv1a1.Sched maxReplicas = &max } - placement := fedcorev1a1.ClusterReference{ + placement := fedcorev1a1.DesiredPlacement{ Cluster: cluster, Preferences: fedcorev1a1.Preferences{ MinReplicas: su.MinReplicas[cluster], diff --git a/pkg/controllers/scheduler/extensions/webhook/v1alpha1/plugin.go b/pkg/controllers/scheduler/extensions/webhook/v1alpha1/plugin.go index 78b78979..ea1da454 100644 --- a/pkg/controllers/scheduler/extensions/webhook/v1alpha1/plugin.go +++ b/pkg/controllers/scheduler/extensions/webhook/v1alpha1/plugin.go @@ -1,4 +1,3 @@ -//go:build exclude /* Copyright 2023 The KubeAdmiral Authors. diff --git a/pkg/controllers/scheduler/framework/interface.go b/pkg/controllers/scheduler/framework/interface.go index 3052b1cc..50165e89 100644 --- a/pkg/controllers/scheduler/framework/interface.go +++ b/pkg/controllers/scheduler/framework/interface.go @@ -1,4 +1,3 @@ -//go:build exclude /* Copyright 2019 The Kubernetes Authors. diff --git a/pkg/controllers/scheduler/framework/plugins/apiresources/apiresources.go b/pkg/controllers/scheduler/framework/plugins/apiresources/apiresources.go index a949e86f..71f23d1c 100644 --- a/pkg/controllers/scheduler/framework/plugins/apiresources/apiresources.go +++ b/pkg/controllers/scheduler/framework/plugins/apiresources/apiresources.go @@ -1,4 +1,3 @@ -//go:build exclude // The design of this plugin is heavily inspired by karmada-scheduler. Kudos! package apiresources diff --git a/pkg/controllers/scheduler/framework/plugins/clusteraffinity/cluster_affinity.go b/pkg/controllers/scheduler/framework/plugins/clusteraffinity/cluster_affinity.go index e034f79c..2e1914d5 100644 --- a/pkg/controllers/scheduler/framework/plugins/clusteraffinity/cluster_affinity.go +++ b/pkg/controllers/scheduler/framework/plugins/clusteraffinity/cluster_affinity.go @@ -1,4 +1,3 @@ -//go:build exclude /* Copyright 2019 The Kubernetes Authors. @@ -29,7 +28,7 @@ import ( fedcorev1a1 "github.com/kubewharf/kubeadmiral/pkg/apis/core/v1alpha1" "github.com/kubewharf/kubeadmiral/pkg/controllers/scheduler/framework" "github.com/kubewharf/kubeadmiral/pkg/controllers/scheduler/framework/plugins/names" - "github.com/kubewharf/kubeadmiral/pkg/controllers/util/clusterselector" + "github.com/kubewharf/kubeadmiral/pkg/util/clusterselector" ) const ( diff --git a/pkg/controllers/scheduler/framework/plugins/clusterresources/balanced_allocation.go b/pkg/controllers/scheduler/framework/plugins/clusterresources/balanced_allocation.go index a99d2a6d..46e41d80 100644 --- a/pkg/controllers/scheduler/framework/plugins/clusterresources/balanced_allocation.go +++ b/pkg/controllers/scheduler/framework/plugins/clusterresources/balanced_allocation.go @@ -1,4 +1,3 @@ -//go:build exclude /* Copyright 2019 The Kubernetes Authors. diff --git a/pkg/controllers/scheduler/framework/plugins/clusterresources/fit.go b/pkg/controllers/scheduler/framework/plugins/clusterresources/fit.go index 114926a6..0310205b 100644 --- a/pkg/controllers/scheduler/framework/plugins/clusterresources/fit.go +++ b/pkg/controllers/scheduler/framework/plugins/clusterresources/fit.go @@ -1,4 +1,3 @@ -//go:build exclude /* Copyright 2019 The Kubernetes Authors. diff --git a/pkg/controllers/scheduler/framework/plugins/clusterresources/least_allocated.go b/pkg/controllers/scheduler/framework/plugins/clusterresources/least_allocated.go index 38d365bf..1eca9f16 100644 --- a/pkg/controllers/scheduler/framework/plugins/clusterresources/least_allocated.go +++ b/pkg/controllers/scheduler/framework/plugins/clusterresources/least_allocated.go @@ -1,4 +1,3 @@ -//go:build exclude /* Copyright 2019 The Kubernetes Authors. diff --git a/pkg/controllers/scheduler/framework/plugins/clusterresources/most_allocated.go b/pkg/controllers/scheduler/framework/plugins/clusterresources/most_allocated.go index 6f5b830a..586455ab 100644 --- a/pkg/controllers/scheduler/framework/plugins/clusterresources/most_allocated.go +++ b/pkg/controllers/scheduler/framework/plugins/clusterresources/most_allocated.go @@ -1,4 +1,3 @@ -//go:build exclude /* Copyright 2019 The Kubernetes Authors. diff --git a/pkg/controllers/scheduler/framework/plugins/maxcluster/max_cluster.go b/pkg/controllers/scheduler/framework/plugins/maxcluster/max_cluster.go index 08ca56f1..7a1691de 100644 --- a/pkg/controllers/scheduler/framework/plugins/maxcluster/max_cluster.go +++ b/pkg/controllers/scheduler/framework/plugins/maxcluster/max_cluster.go @@ -1,4 +1,3 @@ -//go:build exclude /* Copyright 2023 The KubeAdmiral Authors. diff --git a/pkg/controllers/scheduler/framework/plugins/placement/filter.go b/pkg/controllers/scheduler/framework/plugins/placement/filter.go index 19c3253b..35748d7d 100644 --- a/pkg/controllers/scheduler/framework/plugins/placement/filter.go +++ b/pkg/controllers/scheduler/framework/plugins/placement/filter.go @@ -1,4 +1,3 @@ -//go:build exclude /* Copyright 2023 The KubeAdmiral Authors. diff --git a/pkg/controllers/scheduler/framework/plugins/rsp/rsp.go b/pkg/controllers/scheduler/framework/plugins/rsp/rsp.go index 50b94495..129021bd 100644 --- a/pkg/controllers/scheduler/framework/plugins/rsp/rsp.go +++ b/pkg/controllers/scheduler/framework/plugins/rsp/rsp.go @@ -1,4 +1,3 @@ -//go:build exclude /* Copyright 2023 The KubeAdmiral Authors. @@ -36,7 +35,7 @@ import ( fedcorev1a1 "github.com/kubewharf/kubeadmiral/pkg/apis/core/v1alpha1" "github.com/kubewharf/kubeadmiral/pkg/controllers/scheduler/framework" "github.com/kubewharf/kubeadmiral/pkg/controllers/scheduler/framework/plugins/names" - "github.com/kubewharf/kubeadmiral/pkg/controllers/util/planner" + "github.com/kubewharf/kubeadmiral/pkg/controllers/scheduler/planner" ) const ( diff --git a/pkg/controllers/scheduler/framework/plugins/tainttoleration/taint_toleration.go b/pkg/controllers/scheduler/framework/plugins/tainttoleration/taint_toleration.go index 80571411..dae64acf 100644 --- a/pkg/controllers/scheduler/framework/plugins/tainttoleration/taint_toleration.go +++ b/pkg/controllers/scheduler/framework/plugins/tainttoleration/taint_toleration.go @@ -1,4 +1,3 @@ -//go:build exclude /* Copyright 2019 The Kubernetes Authors. diff --git a/pkg/controllers/scheduler/framework/runtime/framework.go b/pkg/controllers/scheduler/framework/runtime/framework.go index 3cf2bee7..8da7e857 100644 --- a/pkg/controllers/scheduler/framework/runtime/framework.go +++ b/pkg/controllers/scheduler/framework/runtime/framework.go @@ -1,4 +1,3 @@ -//go:build exclude /* Copyright 2019 The Kubernetes Authors. diff --git a/pkg/controllers/scheduler/framework/runtime/registry.go b/pkg/controllers/scheduler/framework/runtime/registry.go index 278ed0d2..b8970bed 100644 --- a/pkg/controllers/scheduler/framework/runtime/registry.go +++ b/pkg/controllers/scheduler/framework/runtime/registry.go @@ -1,4 +1,3 @@ -//go:build exclude /* Copyright 2019 The Kubernetes Authors. diff --git a/pkg/controllers/scheduler/framework/types.go b/pkg/controllers/scheduler/framework/types.go index b8b8a212..3b1abbde 100644 --- a/pkg/controllers/scheduler/framework/types.go +++ b/pkg/controllers/scheduler/framework/types.go @@ -1,4 +1,3 @@ -//go:build exclude /* Copyright 2019 The Kubernetes Authors. diff --git a/pkg/controllers/scheduler/framework/util.go b/pkg/controllers/scheduler/framework/util.go index 165e73a0..a12dedd9 100644 --- a/pkg/controllers/scheduler/framework/util.go +++ b/pkg/controllers/scheduler/framework/util.go @@ -1,4 +1,3 @@ -//go:build exclude /* Copyright 2015 The Kubernetes Authors. diff --git a/pkg/controllers/scheduler/handle.go b/pkg/controllers/scheduler/handle.go index 84589766..6b99558e 100644 --- a/pkg/controllers/scheduler/handle.go +++ b/pkg/controllers/scheduler/handle.go @@ -1,4 +1,3 @@ -//go:build exclude /* Copyright 2023 The KubeAdmiral Authors. diff --git a/pkg/controllers/util/planner/planner.go b/pkg/controllers/scheduler/planner/planner.go similarity index 99% rename from pkg/controllers/util/planner/planner.go rename to pkg/controllers/scheduler/planner/planner.go index d4034bcd..46f91c3f 100644 --- a/pkg/controllers/util/planner/planner.go +++ b/pkg/controllers/scheduler/planner/planner.go @@ -1,4 +1,3 @@ -//go:build exclude /* Copyright 2016 The Kubernetes Authors. diff --git a/pkg/controllers/util/planner/planner_test.go b/pkg/controllers/scheduler/planner/planner_test.go similarity index 100% rename from pkg/controllers/util/planner/planner_test.go rename to pkg/controllers/scheduler/planner/planner_test.go diff --git a/pkg/controllers/scheduler/profile.go b/pkg/controllers/scheduler/profile.go index 811d9325..3c6f8c44 100644 --- a/pkg/controllers/scheduler/profile.go +++ b/pkg/controllers/scheduler/profile.go @@ -1,4 +1,3 @@ -//go:build exclude /* Copyright 2023 The KubeAdmiral Authors. diff --git a/pkg/controllers/scheduler/scheduler.go b/pkg/controllers/scheduler/scheduler.go index b62cf5a2..0c4a2645 100644 --- a/pkg/controllers/scheduler/scheduler.go +++ b/pkg/controllers/scheduler/scheduler.go @@ -1,4 +1,3 @@ -//go:build exclude /* Copyright 2023 The KubeAdmiral Authors. @@ -30,12 +29,10 @@ import ( "k8s.io/apimachinery/pkg/api/equality" apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/labels" - pkgruntime "k8s.io/apimachinery/pkg/runtime" - dynamicclient "k8s.io/client-go/dynamic" - "k8s.io/client-go/informers" - kubeclient "k8s.io/client-go/kubernetes" + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/client-go/dynamic" + "k8s.io/client-go/kubernetes" "k8s.io/client-go/tools/cache" "k8s.io/client-go/tools/record" "k8s.io/klog/v2" @@ -44,17 +41,22 @@ import ( fedcorev1a1 "github.com/kubewharf/kubeadmiral/pkg/apis/core/v1alpha1" fedclient "github.com/kubewharf/kubeadmiral/pkg/client/clientset/versioned" fedcorev1a1informers "github.com/kubewharf/kubeadmiral/pkg/client/informers/externalversions/core/v1alpha1" - fedcorev1a1listers "github.com/kubewharf/kubeadmiral/pkg/client/listers/core/v1alpha1" "github.com/kubewharf/kubeadmiral/pkg/controllers/common" "github.com/kubewharf/kubeadmiral/pkg/controllers/scheduler/core" - "github.com/kubewharf/kubeadmiral/pkg/controllers/util" - annotationutil "github.com/kubewharf/kubeadmiral/pkg/controllers/util/annotation" - "github.com/kubewharf/kubeadmiral/pkg/controllers/util/delayingdeliver" - "github.com/kubewharf/kubeadmiral/pkg/controllers/util/eventsink" - "github.com/kubewharf/kubeadmiral/pkg/controllers/util/pendingcontrollers" - schemautil "github.com/kubewharf/kubeadmiral/pkg/controllers/util/schema" - "github.com/kubewharf/kubeadmiral/pkg/controllers/util/worker" "github.com/kubewharf/kubeadmiral/pkg/stats" + "github.com/kubewharf/kubeadmiral/pkg/util/annotation" + clusterutil "github.com/kubewharf/kubeadmiral/pkg/util/cluster" + "github.com/kubewharf/kubeadmiral/pkg/util/eventhandlers" + "github.com/kubewharf/kubeadmiral/pkg/util/eventsink" + "github.com/kubewharf/kubeadmiral/pkg/util/fedobjectadapters" + "github.com/kubewharf/kubeadmiral/pkg/util/informermanager" + "github.com/kubewharf/kubeadmiral/pkg/util/logging" + "github.com/kubewharf/kubeadmiral/pkg/util/pendingcontrollers" + "github.com/kubewharf/kubeadmiral/pkg/util/worker" +) + +const ( + SchedulerName = "scheduler" ) type ClusterWeight struct { @@ -63,31 +65,22 @@ type ClusterWeight struct { } type Scheduler struct { - typeConfig *fedcorev1a1.FederatedTypeConfig - name string - fedClient fedclient.Interface - dynamicClient dynamicclient.Interface - - federatedObjectClient dynamicclient.NamespaceableResourceInterface - federatedObjectLister cache.GenericLister - federatedObjectSynced cache.InformerSynced + dynamicClient dynamic.Interface - propagationPolicyLister fedcorev1a1listers.PropagationPolicyLister - clusterPropagationPolicyLister fedcorev1a1listers.ClusterPropagationPolicyLister - propagationPolicySynced cache.InformerSynced - clusterPropagationPolicySynced cache.InformerSynced + fedObjectInformer fedcorev1a1informers.FederatedObjectInformer + clusterFedObjectInformer fedcorev1a1informers.ClusterFederatedObjectInformer + propagationPolicyInformer fedcorev1a1informers.PropagationPolicyInformer + clusterPropagationPolicyInformer fedcorev1a1informers.ClusterPropagationPolicyInformer + federatedClusterInformer fedcorev1a1informers.FederatedClusterInformer + schedulingProfileInformer fedcorev1a1informers.SchedulingProfileInformer - clusterLister fedcorev1a1listers.FederatedClusterLister - clusterSynced cache.InformerSynced + informerManager informermanager.InformerManager - schedulingProfileLister fedcorev1a1listers.SchedulingProfileLister - schedulingProfileSynced cache.InformerSynced - - webhookConfigurationSynced cache.InformerSynced webhookPlugins sync.Map + webhookConfigurationSynced cache.InformerSynced - worker worker.ReconcileWorker + worker worker.ReconcileWorker[common.QualifiedName] eventRecorder record.EventRecorder algorithm core.ScheduleAlgorithm @@ -101,84 +94,76 @@ func (s *Scheduler) IsControllerReady() bool { } func NewScheduler( - logger klog.Logger, - typeConfig *fedcorev1a1.FederatedTypeConfig, - kubeClient kubeclient.Interface, + kubeClient kubernetes.Interface, fedClient fedclient.Interface, - dynamicClient dynamicclient.Interface, - federatedObjectInformer informers.GenericInformer, + dynamicClient dynamic.Interface, + fedObjectInformer fedcorev1a1informers.FederatedObjectInformer, + clusterFedObjectInformer fedcorev1a1informers.ClusterFederatedObjectInformer, propagationPolicyInformer fedcorev1a1informers.PropagationPolicyInformer, clusterPropagationPolicyInformer fedcorev1a1informers.ClusterPropagationPolicyInformer, - clusterInformer fedcorev1a1informers.FederatedClusterInformer, + federatedClusterInformer fedcorev1a1informers.FederatedClusterInformer, schedulingProfileInformer fedcorev1a1informers.SchedulingProfileInformer, + informerManager informermanager.InformerManager, webhookConfigurationInformer fedcorev1a1informers.SchedulerPluginWebhookConfigurationInformer, metrics stats.Metrics, + logger klog.Logger, workerCount int, ) (*Scheduler, error) { - schedulerName := fmt.Sprintf("%s-scheduler", typeConfig.GetFederatedType().Name) - s := &Scheduler{ - typeConfig: typeConfig, - name: schedulerName, - fedClient: fedClient, - dynamicClient: dynamicClient, - metrics: metrics, - logger: logger.WithValues("controller", GlobalSchedulerName, "ftc", typeConfig.Name), - } - - s.worker = worker.NewReconcileWorker( + fedClient: fedClient, + dynamicClient: dynamicClient, + fedObjectInformer: fedObjectInformer, + clusterFedObjectInformer: clusterFedObjectInformer, + propagationPolicyInformer: propagationPolicyInformer, + clusterPropagationPolicyInformer: clusterPropagationPolicyInformer, + federatedClusterInformer: federatedClusterInformer, + schedulingProfileInformer: schedulingProfileInformer, + informerManager: informerManager, + webhookConfigurationSynced: webhookConfigurationInformer.Informer().HasSynced, + webhookPlugins: sync.Map{}, + metrics: metrics, + logger: logger.WithValues("controller", SchedulerName), + } + + s.eventRecorder = eventsink.NewDefederatingRecorderMux(kubeClient, SchedulerName, 6) + s.worker = worker.NewReconcileWorker[common.QualifiedName]( + SchedulerName, + nil, s.reconcile, worker.RateLimiterOptions{}, workerCount, metrics, - delayingdeliver.NewMetricTags("scheduler-worker", s.typeConfig.GetFederatedType().Kind), ) - s.eventRecorder = eventsink.NewDefederatingRecorderMux(kubeClient, s.name, 6) - - apiResource := typeConfig.GetFederatedType() - s.federatedObjectClient = dynamicClient.Resource(schemautil.APIResourceToGVR(&apiResource)) - - s.federatedObjectLister = federatedObjectInformer.Lister() - s.federatedObjectSynced = federatedObjectInformer.Informer().HasSynced - federatedObjectInformer.Informer().AddEventHandler(util.NewTriggerOnAllChanges(s.worker.EnqueueObject)) - - // only required if namespaced - if s.typeConfig.GetNamespaced() { - s.propagationPolicyLister = propagationPolicyInformer.Lister() - s.propagationPolicySynced = propagationPolicyInformer.Informer().HasSynced - propagationPolicyInformer.Informer().AddEventHandler(util.NewTriggerOnGenerationChanges(s.enqueueFederatedObjectsForPolicy)) - } - s.clusterPropagationPolicyLister = clusterPropagationPolicyInformer.Lister() - s.clusterPropagationPolicySynced = clusterPropagationPolicyInformer.Informer().HasSynced - clusterPropagationPolicyInformer.Informer().AddEventHandler(util.NewTriggerOnGenerationChanges(s.enqueueFederatedObjectsForPolicy)) - - s.clusterLister = clusterInformer.Lister() - s.clusterSynced = clusterInformer.Informer().HasSynced - clusterInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{ - AddFunc: func(obj interface{}) { s.enqueueFederatedObjectsForCluster(obj.(pkgruntime.Object)) }, - DeleteFunc: func(obj interface{}) { - if deleted, ok := obj.(cache.DeletedFinalStateUnknown); ok { - // This object might be stale but ok for our current usage. - obj = deleted.Obj - if obj == nil { - return - } - } - s.enqueueFederatedObjectsForCluster(obj.(pkgruntime.Object)) + fedObjectInformer.Informer().AddEventHandler(eventhandlers.NewTriggerOnAllChanges( + common.NewQualifiedName, + s.worker.Enqueue, + )) + clusterFedObjectInformer.Informer().AddEventHandler(eventhandlers.NewTriggerOnAllChanges( + common.NewQualifiedName, + s.worker.Enqueue, + )) + + propagationPolicyInformer.Informer().AddEventHandler(eventhandlers.NewTriggerOnGenerationChanges( + func(obj metav1.Object) metav1.Object { return obj }, + s.enqueueFederatedObjectsForPolicy, + )) + clusterPropagationPolicyInformer.Informer().AddEventHandler(eventhandlers.NewTriggerOnGenerationChanges( + func(obj metav1.Object) metav1.Object { return obj }, + s.enqueueFederatedObjectsForPolicy, + )) + + federatedClusterInformer.Informer().AddEventHandler(eventhandlers.NewTriggerOnChanges( + func(oldCluster, curCluster *fedcorev1a1.FederatedCluster) bool { + return !equality.Semantic.DeepEqual(oldCluster.Labels, curCluster.Labels) || + !equality.Semantic.DeepEqual(oldCluster.Spec.Taints, curCluster.Spec.Taints) || + !equality.Semantic.DeepEqual(oldCluster.Status.APIResourceTypes, curCluster.Status.APIResourceTypes) }, - UpdateFunc: func(oldUntyped, newUntyped interface{}) { - oldCluster, newCluster := oldUntyped.(*fedcorev1a1.FederatedCluster), newUntyped.(*fedcorev1a1.FederatedCluster) - if !equality.Semantic.DeepEqual(oldCluster.Labels, newCluster.Labels) || - !equality.Semantic.DeepEqual(oldCluster.Spec.Taints, newCluster.Spec.Taints) || - !equality.Semantic.DeepEqual(oldCluster.Status.APIResourceTypes, newCluster.Status.APIResourceTypes) { - s.enqueueFederatedObjectsForCluster(newCluster) - } + func(cluster *fedcorev1a1.FederatedCluster) *fedcorev1a1.FederatedCluster { + return cluster }, - }) - - s.schedulingProfileLister = schedulingProfileInformer.Lister() - s.schedulingProfileSynced = schedulingProfileInformer.Informer().HasSynced + s.enqueueFederatedObjectsForCluster, + )) s.webhookConfigurationSynced = webhookConfigurationInformer.Informer().HasSynced webhookConfigurationInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{ @@ -206,6 +191,13 @@ func NewScheduler( }, }) + informerManager.AddFTCUpdateHandler(func(lastObserved, latest *fedcorev1a1.FederatedTypeConfig) { + if lastObserved == nil && latest != nil { + s.enqueueFederatedObjectsForFTC(latest) + return + } + }) + s.algorithm = core.NewSchedulerAlgorithm() return s, nil @@ -213,15 +205,15 @@ func NewScheduler( func (s *Scheduler) HasSynced() bool { cachesSynced := []cache.InformerSynced{ - s.federatedObjectSynced, - s.clusterPropagationPolicySynced, - s.clusterSynced, - s.schedulingProfileSynced, + s.fedObjectInformer.Informer().HasSynced, + s.clusterFedObjectInformer.Informer().HasSynced, + s.propagationPolicyInformer.Informer().HasSynced, + s.clusterPropagationPolicyInformer.Informer().HasSynced, + s.federatedClusterInformer.Informer().HasSynced, + s.informerManager.HasSynced, + s.schedulingProfileInformer.Informer().HasSynced, s.webhookConfigurationSynced, } - if s.typeConfig.GetNamespaced() { - cachesSynced = append(cachesSynced, s.propagationPolicySynced) - } for _, synced := range cachesSynced { if !synced() { @@ -233,61 +225,88 @@ func (s *Scheduler) HasSynced() bool { } func (s *Scheduler) Run(ctx context.Context) { - s.logger.Info("Starting controller") - defer s.logger.Info("Stopping controller") + ctx, logger := logging.InjectLogger(ctx, s.logger) - if !cache.WaitForNamedCacheSync(s.name, ctx.Done(), s.HasSynced) { + logger.Info("Starting controller") + defer logger.Info("Stopping controller") + + if !cache.WaitForNamedCacheSync(SchedulerName, ctx.Done(), s.HasSynced) { + logger.Error(nil, "Timed out waiting for cache sync") return } - s.worker.Run(ctx.Done()) + logger.Info("Caches are synced") + + s.worker.Run(ctx) <-ctx.Done() } -func (s *Scheduler) reconcile(qualifiedName common.QualifiedName) (status worker.Result) { +func (s *Scheduler) reconcile(ctx context.Context, key common.QualifiedName) (status worker.Result) { _ = s.metrics.Rate("scheduler.throughput", 1) - keyedLogger := s.logger.WithValues("origin", "reconcile", "object", qualifiedName.String()) - ctx := klog.NewContext(context.TODO(), keyedLogger) + ctx, logger := logging.InjectLoggerValues(ctx, "key", key.String()) + startTime := time.Now() - keyedLogger.V(3).Info("Start reconcile") + logger.V(3).Info("Start reconcile") defer func() { - s.metrics.Duration(fmt.Sprintf("%s.latency", s.name), startTime) - keyedLogger.V(3).WithValues("duration", time.Since(startTime), "status", status.String()).Info("Finished reconcile") + s.metrics.Duration(fmt.Sprintf("%s.latency", SchedulerName), startTime) + logger.V(3).WithValues("duration", time.Since(startTime), "status", status.String()).Info("Finished reconcile") }() - fedObject, err := s.federatedObjectFromStore(qualifiedName) + fedObject, err := fedobjectadapters.GetFromLister( + s.fedObjectInformer.Lister(), + s.clusterFedObjectInformer.Lister(), + key.Namespace, + key.Name, + ) if err != nil && !apierrors.IsNotFound(err) { - keyedLogger.Error(err, "Failed to get object from store") + logger.Error(err, "Failed to get FederatedObject from store") return worker.StatusError } if apierrors.IsNotFound(err) || fedObject.GetDeletionTimestamp() != nil { - keyedLogger.V(3).Info("Observed object deletion") + logger.V(3).Info("Observed FederatedObject deletion") return worker.StatusAllOK } - fedObject = fedObject.DeepCopy() + fedObject = fedObject.DeepCopyGenericFederatedObject() + + sourceGVK, err := fedObject.GetSpec().GetTemplateGVK() + if err != nil { + logger.Error(err, "Failed to get source GVK from FederatedObject") + return worker.StatusError + } + ctx, logger = logging.InjectLoggerValues(ctx, "source-gvk", sourceGVK) + + ftc, exists := s.informerManager.GetResourceFTC(sourceGVK) + if !exists { + logger.V(3).Info("FTC for FederatedObject source type does not exist, will skip scheduling") + return worker.StatusAllOK + } + ctx, logger = logging.InjectLoggerValues(ctx, "ftc", ftc.GetName()) - policy, clusters, schedulingProfile, earlyReturnResult := s.prepareToSchedule(ctx, fedObject) + policy, clusters, schedulingProfile, earlyReturnResult := s.prepareToSchedule(ctx, fedObject, ftc) if earlyReturnResult != nil { return *earlyReturnResult } if policy != nil { - keyedLogger = keyedLogger.WithValues("policy", common.NewQualifiedName(policy).String()) + ctx, logger = logging.InjectLoggerValues(ctx, "policy", common.NewQualifiedName(policy).String()) } if schedulingProfile != nil { - keyedLogger = keyedLogger.WithValues("schedulingProfile", common.NewQualifiedName(schedulingProfile).String()) + ctx, logger = logging.InjectLoggerValues( + ctx, + "schedulingProfile", + common.NewQualifiedName(schedulingProfile).String(), + ) } - ctx = klog.NewContext(ctx, keyedLogger) - result, earlyReturnWorkerResult := s.schedule(ctx, fedObject, policy, schedulingProfile, clusters) + result, earlyReturnWorkerResult := s.schedule(ctx, ftc, fedObject, policy, schedulingProfile, clusters) if earlyReturnWorkerResult != nil { return *earlyReturnWorkerResult } - keyedLogger = keyedLogger.WithValues("result", result.String()) - keyedLogger.V(2).Info("Scheduling result obtained") + ctx, logger = logging.InjectLoggerValues(ctx, "result", result.String()) + logger.V(2).Info("Scheduling result obtained") auxInfo := &auxiliarySchedulingInformation{ enableFollowerScheduling: false, @@ -297,49 +316,53 @@ func (s *Scheduler) reconcile(qualifiedName common.QualifiedName) (status worker spec := policy.GetSpec() auxInfo.enableFollowerScheduling = !spec.DisableFollowerScheduling - keyedLogger = keyedLogger.WithValues("enableFollowerScheduling", auxInfo.enableFollowerScheduling) + ctx, logger = logging.InjectLoggerValues(ctx, "enableFollowerScheduling", auxInfo.enableFollowerScheduling) if autoMigration := spec.AutoMigration; autoMigration != nil { auxInfo.unschedulableThreshold = pointer.Duration(autoMigration.Trigger.PodUnschedulableDuration.Duration) - keyedLogger = keyedLogger.WithValues("unschedulableThreshold", auxInfo.unschedulableThreshold.String()) + ctx, logger = logging.InjectLoggerValues( + ctx, + "unschedulableThreshold", + auxInfo.unschedulableThreshold.String(), + ) } } - ctx = klog.NewContext(ctx, keyedLogger) - return s.persistSchedulingResult(ctx, fedObject, *result, auxInfo) + return s.persistSchedulingResult(ctx, ftc, fedObject, *result, auxInfo) } func (s *Scheduler) prepareToSchedule( ctx context.Context, - fedObject *unstructured.Unstructured, + fedObject fedcorev1a1.GenericFederatedObject, + ftc *fedcorev1a1.FederatedTypeConfig, ) ( fedcorev1a1.GenericPropagationPolicy, []*fedcorev1a1.FederatedCluster, *fedcorev1a1.SchedulingProfile, *worker.Result, ) { - keyedLogger := klog.FromContext(ctx) + logger := klog.FromContext(ctx) // check pending controllers if ok, err := pendingcontrollers.ControllerDependenciesFulfilled(fedObject, PrefixedGlobalSchedulerName); err != nil { - keyedLogger.Error(err, "Failed to check controller dependencies") + logger.Error(err, "Failed to check controller dependencies") return nil, nil, nil, &worker.StatusError } else if !ok { - keyedLogger.V(3).Info("Controller dependencies not fulfilled") + logger.V(3).Info("Controller dependencies not fulfilled") return nil, nil, nil, &worker.StatusAllOK } // check whether to skip scheduling - allClusters, err := s.clusterLister.List(labels.Everything()) + allClusters, err := s.federatedClusterInformer.Lister().List(labels.Everything()) if err != nil { - keyedLogger.Error(err, "Failed to get clusters from store") + logger.Error(err, "Failed to get clusters from store") return nil, nil, nil, &worker.StatusError } clusters := make([]*fedcorev1a1.FederatedCluster, 0) for _, cluster := range allClusters { - if util.IsClusterJoined(&cluster.Status) { + if clusterutil.IsClusterJoined(&cluster.Status) { clusters = append(clusters, cluster) } } @@ -347,13 +370,13 @@ func (s *Scheduler) prepareToSchedule( var policy fedcorev1a1.GenericPropagationPolicy var schedulingProfile *fedcorev1a1.SchedulingProfile - policyKey, hasSchedulingPolicy := MatchedPolicyKey(fedObject, s.typeConfig.GetNamespaced()) + policyKey, hasSchedulingPolicy := GetMatchedPolicyKey(fedObject) if hasSchedulingPolicy { - keyedLogger = keyedLogger.WithValues("policy", policyKey.String()) + ctx, logger = logging.InjectLoggerValues(ctx, "policy", policyKey.String()) if policy, err = s.policyFromStore(policyKey); err != nil { - keyedLogger.Error(err, "Failed to find matched policy") + logger.Error(err, "Failed to find matched policy") if apierrors.IsNotFound(err) { // do not retry since the object will be reenqueued after the policy is subsequently created // emit an event to warn users that the assigned propagation policy does not exist @@ -361,7 +384,7 @@ func (s *Scheduler) prepareToSchedule( fedObject, corev1.EventTypeWarning, EventReasonScheduleFederatedObject, - "object propagation policy %s not found", + "PropagationPolicy %s not found", policyKey.String(), ) return nil, nil, nil, &worker.StatusAllOK @@ -371,15 +394,15 @@ func (s *Scheduler) prepareToSchedule( profileName := policy.GetSpec().SchedulingProfile if len(profileName) > 0 { - keyedLogger = keyedLogger.WithValues("profile", profileName) - schedulingProfile, err = s.schedulingProfileLister.Get(profileName) + ctx, logger = logging.InjectLoggerValues(ctx, "profile", profileName) + schedulingProfile, err = s.schedulingProfileInformer.Lister().Get(profileName) if err != nil { - keyedLogger.Error(err, "Failed to get scheduling profile") + logger.Error(err, "Failed to get scheduling profile") s.eventRecorder.Eventf( fedObject, corev1.EventTypeWarning, EventReasonScheduleFederatedObject, - "failed to schedule object: %v", + "Failed to schedule object: %v", fmt.Errorf("failed to get scheduling profile %s: %w", profileName, err), ) @@ -392,15 +415,15 @@ func (s *Scheduler) prepareToSchedule( } } - triggerHash, err := s.computeSchedulingTriggerHash(fedObject, policy, clusters) + triggerHash, err := s.computeSchedulingTriggerHash(ftc, fedObject, policy, clusters) if err != nil { - keyedLogger.Error(err, "Failed to compute scheduling trigger hash") + logger.Error(err, "Failed to compute scheduling trigger hash") return nil, nil, nil, &worker.StatusError } - triggersChanged, err := annotationutil.AddAnnotation(fedObject, SchedulingTriggerHashAnnotation, triggerHash) + triggersChanged, err := annotation.AddAnnotation(fedObject, SchedulingTriggerHashAnnotation, triggerHash) if err != nil { - keyedLogger.Error(err, "Failed to update scheduling trigger hash") + logger.Error(err, "Failed to update scheduling trigger hash") return nil, nil, nil, &worker.StatusError } @@ -408,11 +431,11 @@ func (s *Scheduler) prepareToSchedule( if !triggersChanged { // scheduling triggers have not changed, skip scheduling shouldSkipScheduling = true - keyedLogger.V(3).Info("Scheduling triggers not changed, skip scheduling") + logger.V(3).Info("Scheduling triggers not changed, skip scheduling") } else if len(fedObject.GetAnnotations()[common.NoSchedulingAnnotation]) > 0 { // skip scheduling if no-scheduling annotation is found shouldSkipScheduling = true - keyedLogger.V(3).Info("No-scheduling annotation found, skip scheduling") + logger.V(3).Info("no-scheduling annotation found, skip scheduling") s.eventRecorder.Eventf( fedObject, corev1.EventTypeNormal, @@ -422,14 +445,17 @@ func (s *Scheduler) prepareToSchedule( } if shouldSkipScheduling { - if updated, err := s.updatePendingControllers(fedObject, false); err != nil { - keyedLogger.Error(err, "Failed to update pending controllers") + if updated, err := s.updatePendingControllers(ftc, fedObject, false); err != nil { + logger.Error(err, "Failed to update pending controllers") return nil, nil, nil, &worker.StatusError } else if updated { - if _, err := s.federatedObjectClient.Namespace(fedObject.GetNamespace()).Update( - ctx, fedObject, metav1.UpdateOptions{}, + if _, err := fedobjectadapters.Update( + ctx, + s.fedClient.CoreV1alpha1(), + fedObject, + metav1.UpdateOptions{}, ); err != nil { - keyedLogger.Error(err, "Failed to update pending controllers") + logger.Error(err, "Failed to update pending controllers") if apierrors.IsConflict(err) { return nil, nil, nil, &worker.StatusConflict } @@ -445,46 +471,45 @@ func (s *Scheduler) prepareToSchedule( func (s *Scheduler) schedule( ctx context.Context, - fedObject *unstructured.Unstructured, + ftc *fedcorev1a1.FederatedTypeConfig, + fedObject fedcorev1a1.GenericFederatedObject, policy fedcorev1a1.GenericPropagationPolicy, schedulingProfile *fedcorev1a1.SchedulingProfile, clusters []*fedcorev1a1.FederatedCluster, ) (*core.ScheduleResult, *worker.Result) { - keyedLogger := klog.FromContext(ctx) + logger := klog.FromContext(ctx) if policy == nil { // deschedule the federated object if there is no policy attached - keyedLogger.V(2).Info("No policy specified, scheduling to no clusters") + logger.V(2).Info("No policy specified, scheduling to no clusters") s.eventRecorder.Eventf( fedObject, corev1.EventTypeNormal, EventReasonScheduleFederatedObject, - "no scheduling policy specified, will schedule object to no clusters", + "No scheduling policy specified, will schedule object to no clusters", ) - return &core.ScheduleResult{ - SuggestedClusters: make(map[string]*int64), - }, nil + return &core.ScheduleResult{SuggestedClusters: make(map[string]*int64)}, nil } // schedule according to matched policy - keyedLogger.V(2).Info("Matched policy found, start scheduling") + logger.V(2).Info("Matched policy found, start scheduling") s.eventRecorder.Eventf( fedObject, corev1.EventTypeNormal, EventReasonScheduleFederatedObject, - "scheduling policy %s specified, scheduling object", + "Scheduling policy %s specified, scheduling object", common.NewQualifiedName(policy).String(), ) - schedulingUnit, err := schedulingUnitForFedObject(s.typeConfig, fedObject, policy) + schedulingUnit, err := schedulingUnitForFedObject(ftc, fedObject, policy) if err != nil { - keyedLogger.Error(err, "Failed to get scheduling unit") + logger.Error(err, "Failed to get scheduling unit") s.eventRecorder.Eventf( fedObject, corev1.EventTypeWarning, EventReasonScheduleFederatedObject, - "failed to schedule object: %v", + "Failed to schedule object: %v", fmt.Errorf("failed to get scheduling unit: %w", err), ) return nil, &worker.StatusError @@ -492,27 +517,27 @@ func (s *Scheduler) schedule( framework, err := s.createFramework(schedulingProfile, s.buildFrameworkHandle()) if err != nil { - keyedLogger.Error(err, "Failed to construct scheduling profile") + logger.Error(err, "Failed to construct scheduling profile") s.eventRecorder.Eventf( fedObject, corev1.EventTypeWarning, EventReasonScheduleFederatedObject, - "failed to schedule object: %v", + "Failed to schedule object: %v", fmt.Errorf("failed to construct scheduling profile: %w", err), ) return nil, &worker.StatusError } - ctx = klog.NewContext(ctx, keyedLogger) + ctx = klog.NewContext(ctx, logger) result, err := s.algorithm.Schedule(ctx, framework, *schedulingUnit, clusters) if err != nil { - keyedLogger.Error(err, "Failed to compute scheduling result") + logger.Error(err, "Failed to compute scheduling result") s.eventRecorder.Eventf( fedObject, corev1.EventTypeWarning, EventReasonScheduleFederatedObject, - "failed to schedule object: %v", + "Failed to schedule object: %v", fmt.Errorf("failed to compute scheduling result: %w", err), ) return nil, &worker.StatusError @@ -523,15 +548,16 @@ func (s *Scheduler) schedule( func (s *Scheduler) persistSchedulingResult( ctx context.Context, - fedObject *unstructured.Unstructured, + ftc *fedcorev1a1.FederatedTypeConfig, + fedObject fedcorev1a1.GenericFederatedObject, result core.ScheduleResult, auxInfo *auxiliarySchedulingInformation, ) worker.Result { - keyedLogger := klog.FromContext(ctx) + logger := klog.FromContext(ctx) - schedulingResultsChanged, err := s.applySchedulingResult(fedObject, result, auxInfo) + schedulingResultsChanged, err := s.applySchedulingResult(ftc, fedObject, result, auxInfo) if err != nil { - keyedLogger.Error(err, "Failed to apply scheduling result") + logger.Error(err, "Failed to apply scheduling result") s.eventRecorder.Eventf( fedObject, corev1.EventTypeWarning, @@ -541,9 +567,9 @@ func (s *Scheduler) persistSchedulingResult( ) return worker.StatusError } - _, err = s.updatePendingControllers(fedObject, schedulingResultsChanged) + _, err = s.updatePendingControllers(ftc, fedObject, schedulingResultsChanged) if err != nil { - keyedLogger.Error(err, "Failed to update pending controllers") + logger.Error(err, "Failed to update pending controllers") s.eventRecorder.Eventf( fedObject, corev1.EventTypeWarning, @@ -556,13 +582,14 @@ func (s *Scheduler) persistSchedulingResult( // We always update the federated object because the fact that scheduling even occurred minimally implies that the // scheduling trigger hash must have changed. - keyedLogger.V(1).Info("Updating federated object") - if _, err := s.federatedObjectClient.Namespace(fedObject.GetNamespace()).Update( + logger.V(1).Info("Updating federated object") + if _, err := fedobjectadapters.Update( ctx, + s.fedClient.CoreV1alpha1(), fedObject, metav1.UpdateOptions{}, ); err != nil { - keyedLogger.Error(err, "Failed to update federated object") + logger.Error(err, "Failed to update federated object") if apierrors.IsConflict(err) { return worker.StatusConflict } @@ -576,7 +603,7 @@ func (s *Scheduler) persistSchedulingResult( return worker.StatusError } - keyedLogger.V(1).Info("Updated federated object") + logger.V(1).Info("Updated federated object") s.eventRecorder.Eventf( fedObject, corev1.EventTypeNormal, @@ -588,38 +615,19 @@ func (s *Scheduler) persistSchedulingResult( return worker.StatusAllOK } -// federatedObjectFromStore uses the given qualified name to retrieve a federated object from the scheduler's lister, it will help to -// resolve the object's scope and namespace based on the scheduler's type config. -func (s *Scheduler) federatedObjectFromStore(qualifiedName common.QualifiedName) (*unstructured.Unstructured, error) { - var obj pkgruntime.Object - var err error - - if s.typeConfig.GetNamespaced() { - obj, err = s.federatedObjectLister.ByNamespace(qualifiedName.Namespace).Get(qualifiedName.Name) - } else { - obj, err = s.federatedObjectLister.Get(qualifiedName.Name) - } - - return obj.(*unstructured.Unstructured), err -} - -// policyFromStore uses the given qualified name to retrieve a policy from the scheduler's policy listers. -func (s *Scheduler) policyFromStore(qualifiedName common.QualifiedName) (fedcorev1a1.GenericPropagationPolicy, error) { - if len(qualifiedName.Namespace) > 0 { - return s.propagationPolicyLister.PropagationPolicies(qualifiedName.Namespace).Get(qualifiedName.Name) - } - return s.clusterPropagationPolicyLister.Get(qualifiedName.Name) -} - -// updatePendingControllers removes the scheduler from the object's pending controller annotation. If wasModified is true (the scheduling -// result was not modified), it will additionally set the downstream processors to notify them to reconcile the changes made by the -// scheduler. -func (s *Scheduler) updatePendingControllers(fedObject *unstructured.Unstructured, wasModified bool) (bool, error) { +// updatePendingControllers removes the scheduler from the object's pending controller annotation. If wasModified is +// true (the scheduling result was not modified), it will additionally set the downstream processors to notify them to +// reconcile the changes made by the scheduler. +func (s *Scheduler) updatePendingControllers( + ftc *fedcorev1a1.FederatedTypeConfig, + fedObject fedcorev1a1.GenericFederatedObject, + wasModified bool, +) (bool, error) { return pendingcontrollers.UpdatePendingControllers( fedObject, PrefixedGlobalSchedulerName, wasModified, - s.typeConfig.GetControllers(), + ftc.GetControllers(), ) } @@ -628,37 +636,38 @@ type auxiliarySchedulingInformation struct { unschedulableThreshold *time.Duration } -// applySchedulingResult updates the federated object with the scheduling result and the enableFollowerScheduling annotation, it returns a -// bool indicating if the scheduling result has changed. +// applySchedulingResult updates the federated object with the scheduling result and the enableFollowerScheduling +// annotation, it returns a bool indicating if the scheduling result has changed. func (s *Scheduler) applySchedulingResult( - fedObject *unstructured.Unstructured, + ftc *fedcorev1a1.FederatedTypeConfig, + fedObject fedcorev1a1.GenericFederatedObject, result core.ScheduleResult, auxInfo *auxiliarySchedulingInformation, ) (bool, error) { objectModified := false clusterSet := result.ClusterSet() - // set placements - placementUpdated, err := util.SetPlacementClusterNames(fedObject, PrefixedGlobalSchedulerName, clusterSet) - if err != nil { - return false, err - } + // 1. Set placements + + placementUpdated := fedObject.GetSpec().SetControllerPlacement(PrefixedGlobalSchedulerName, sets.List(clusterSet)) objectModified = objectModified || placementUpdated - // set replicas overrides + // 2. Set replicas overrides + desiredOverrides := map[string]int64{} for clusterName, replicaCount := range result.SuggestedClusters { if replicaCount != nil { desiredOverrides[clusterName] = *replicaCount } } - overridesUpdated, err := UpdateReplicasOverride(s.typeConfig, fedObject, desiredOverrides) + overridesUpdated, err := UpdateReplicasOverride(ftc, fedObject, desiredOverrides) if err != nil { return false, err } objectModified = objectModified || overridesUpdated - // set annotations + // 3. Set annotations + annotations := fedObject.GetAnnotations() if annotations == nil { annotations = make(map[string]string, 2) @@ -694,3 +703,132 @@ func (s *Scheduler) applySchedulingResult( return objectModified, nil } + +func (s *Scheduler) enqueueFederatedObjectsForPolicy(policy metav1.Object) { + policyAccessor, ok := policy.(fedcorev1a1.GenericPropagationPolicy) + if !ok { + s.logger.Error( + fmt.Errorf("policy is not a valid type (%T)", policy), + "Failed to enqueue federated object for policy", + ) + return + } + + policyKey := common.NewQualifiedName(policyAccessor) + logger := s.logger.WithValues("policy", policyKey.String()) + logger.V(2).Info("Enqueue FederatedObjects and ClusterFederatedObjects for policy") + + allObjects := []metav1.Object{} + + if len(policyKey.Namespace) > 0 { + // If the policy is namespaced, we only need to scan FederatedObjects in the same namespace. + fedObjects, err := s.fedObjectInformer.Lister().FederatedObjects(policyKey.Namespace).List(labels.Everything()) + if err != nil { + s.logger.Error(err, "Failed to enqueue FederatedObjects for policy") + return + } + for _, obj := range fedObjects { + allObjects = append(allObjects, obj) + } + } else { + // If the policy is cluster-scoped, we need to scan all FederatedObjects and ClusterFederatedObjects + fedObjects, err := s.fedObjectInformer.Lister().List(labels.Everything()) + if err != nil { + s.logger.Error(err, "Failed to enqueue FederatedObjects for policy") + return + } + for _, obj := range fedObjects { + allObjects = append(allObjects, obj) + } + + clusterFedObjects, err := s.clusterFedObjectInformer.Lister().List(labels.Everything()) + if err != nil { + s.logger.Error(err, "Failed to enqueue ClusterFederatedObjects for policy") + return + } + for _, obj := range clusterFedObjects { + allObjects = append(allObjects, obj) + } + } + + for _, obj := range allObjects { + if policyKey, found := GetMatchedPolicyKey(obj); !found { + continue + } else if policyKey.Name == policyAccessor.GetName() && policyKey.Namespace == policyAccessor.GetNamespace() { + s.worker.EnqueueObject(obj) + } + } +} + +func (s *Scheduler) enqueueFederatedObjectsForCluster(cluster *fedcorev1a1.FederatedCluster) { + logger := s.logger.WithValues("cluster", cluster.GetName()) + + if !clusterutil.IsClusterJoined(&cluster.Status) { + s.logger.WithValues("cluster", cluster.Name). + V(3). + Info("Skip enqueue federated objects for cluster, cluster not joined") + return + } + + logger.V(2).Info("Enqueue federated objects for cluster") + + fedObjects, err := s.fedObjectInformer.Lister().List(labels.Everything()) + if err != nil { + s.logger.Error(err, "Failed to enquue FederatedObjects for policy") + return + } + for _, obj := range fedObjects { + s.worker.EnqueueObject(obj) + } + clusterFedObjects, err := s.clusterFedObjectInformer.Lister().List(labels.Everything()) + if err != nil { + s.logger.Error(err, "Failed to enquue ClusterFederatedObjects for policy") + return + } + for _, obj := range clusterFedObjects { + s.worker.EnqueueObject(obj) + } +} + +func (s *Scheduler) enqueueFederatedObjectsForFTC(ftc *fedcorev1a1.FederatedTypeConfig) { + logger := s.logger.WithValues("ftc", ftc.GetName()) + + logger.V(2).Info("Enqueue federated objects for FTC") + + allObjects := []fedcorev1a1.GenericFederatedObject{} + fedObjects, err := s.fedObjectInformer.Lister().List(labels.Everything()) + if err != nil { + s.logger.Error(err, "Failed to enquue FederatedObjects for policy") + return + } + for _, obj := range fedObjects { + allObjects = append(allObjects, obj) + } + clusterFedObjects, err := s.clusterFedObjectInformer.Lister().List(labels.Everything()) + if err != nil { + s.logger.Error(err, "Failed to enquue ClusterFederatedObjects for policy") + return + } + for _, obj := range clusterFedObjects { + allObjects = append(allObjects, obj) + } + + for _, obj := range allObjects { + sourceGVK, err := obj.GetSpec().GetTemplateGVK() + if err != nil { + s.logger.Error(err, "Failed to get source GVK from FederatedObject, will not enqueue") + continue + } + if sourceGVK == ftc.GetSourceTypeGVK() { + s.worker.EnqueueObject(obj) + } + } +} + +// policyFromStore uses the given qualified name to retrieve a policy from the scheduler's policy listers. +func (s *Scheduler) policyFromStore(qualifiedName common.QualifiedName) (fedcorev1a1.GenericPropagationPolicy, error) { + if len(qualifiedName.Namespace) > 0 { + return s.propagationPolicyInformer.Lister().PropagationPolicies(qualifiedName.Namespace).Get(qualifiedName.Name) + } + return s.clusterPropagationPolicyInformer.Lister().Get(qualifiedName.Name) +} diff --git a/pkg/controllers/scheduler/schedulingtriggers.go b/pkg/controllers/scheduler/schedulingtriggers.go index e1e1bab8..118a624d 100644 --- a/pkg/controllers/scheduler/schedulingtriggers.go +++ b/pkg/controllers/scheduler/schedulingtriggers.go @@ -1,4 +1,3 @@ -//go:build exclude /* Copyright 2023 The KubeAdmiral Authors. @@ -26,16 +25,12 @@ import ( "golang.org/x/exp/constraints" corev1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" - "k8s.io/apimachinery/pkg/labels" - pkgruntime "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/util/sets" fedcorev1a1 "github.com/kubewharf/kubeadmiral/pkg/apis/core/v1alpha1" "github.com/kubewharf/kubeadmiral/pkg/controllers/common" "github.com/kubewharf/kubeadmiral/pkg/controllers/scheduler/framework" - "github.com/kubewharf/kubeadmiral/pkg/controllers/util" - utilunstructured "github.com/kubewharf/kubeadmiral/pkg/controllers/util/unstructured" + unstructuredutil "github.com/kubewharf/kubeadmiral/pkg/util/unstructured" ) /* @@ -105,7 +100,8 @@ type schedulingTriggers struct { } func (s *Scheduler) computeSchedulingTriggerHash( - fedObject *unstructured.Unstructured, + ftc *fedcorev1a1.FederatedTypeConfig, + fedObject fedcorev1a1.GenericFederatedObject, policy fedcorev1a1.GenericPropagationPolicy, clusters []*fedcorev1a1.FederatedCluster, ) (string, error) { @@ -114,7 +110,7 @@ func (s *Scheduler) computeSchedulingTriggerHash( var err error trigger.SchedulingAnnotations = getSchedulingAnnotations(fedObject) - if trigger.ReplicaCount, err = getReplicaCount(s.typeConfig, fedObject); err != nil { + if trigger.ReplicaCount, err = getReplicaCount(ftc, fedObject); err != nil { return "", fmt.Errorf("failed to get object replica count: %w", err) } trigger.ResourceRequest = getResourceRequest(fedObject) @@ -159,7 +155,7 @@ var knownSchedulingAnnotations = sets.New( FollowsObjectAnnotation, ) -func getSchedulingAnnotations(fedObject *unstructured.Unstructured) []keyValue[string, string] { +func getSchedulingAnnotations(fedObject fedcorev1a1.GenericFederatedObject) []keyValue[string, string] { annotations := fedObject.GetAnnotations() // this is a deep copy for k := range annotations { if !knownSchedulingAnnotations.Has(k) { @@ -169,16 +165,20 @@ func getSchedulingAnnotations(fedObject *unstructured.Unstructured) []keyValue[s return sortMap(annotations) } -func getReplicaCount(typeConfig *fedcorev1a1.FederatedTypeConfig, fedObject *unstructured.Unstructured) (int64, error) { - if len(typeConfig.Spec.PathDefinition.ReplicasSpec) == 0 { +func getReplicaCount( + ftc *fedcorev1a1.FederatedTypeConfig, + fedObject fedcorev1a1.GenericFederatedObject, +) (int64, error) { + if len(ftc.Spec.PathDefinition.ReplicasSpec) == 0 { return 0, nil } - value, err := utilunstructured.GetInt64FromPath( - fedObject, - typeConfig.Spec.PathDefinition.ReplicasSpec, - common.TemplatePath, - ) + template, err := fedObject.GetSpec().GetTemplateAsUnstructured() + if err != nil { + return 0, err + } + + value, err := unstructuredutil.GetInt64FromPath(template, ftc.Spec.PathDefinition.ReplicasSpec, nil) if err != nil || value == nil { return 0, err } @@ -186,7 +186,7 @@ func getReplicaCount(typeConfig *fedcorev1a1.FederatedTypeConfig, fedObject *uns return *value, nil } -func getResourceRequest(fedObject *unstructured.Unstructured) framework.Resource { +func getResourceRequest(fedObject fedcorev1a1.GenericFederatedObject) framework.Resource { // TODO: update once we have a proper way to obtian resource request from federated objects return framework.Resource{} } @@ -231,7 +231,9 @@ func getClusterTaints(clusters []*fedcorev1a1.FederatedCluster) []keyValue[strin return sortMap(ret) } -func getClusterAPIResourceTypes(clusters []*fedcorev1a1.FederatedCluster) []keyValue[string, []fedcorev1a1.APIResource] { +func getClusterAPIResourceTypes( + clusters []*fedcorev1a1.FederatedCluster, +) []keyValue[string, []fedcorev1a1.APIResource] { ret := make(map[string][]fedcorev1a1.APIResource) for _, cluster := range clusters { @@ -261,53 +263,3 @@ func getClusterAPIResourceTypes(clusters []*fedcorev1a1.FederatedCluster) []keyV } return sortMap(ret) } - -// enqueueFederatedObjectsForPolicy enqueues federated objects which match the policy -func (s *Scheduler) enqueueFederatedObjectsForPolicy(policy pkgruntime.Object) { - policyAccessor, ok := policy.(fedcorev1a1.GenericPropagationPolicy) - if !ok { - s.logger.Error(fmt.Errorf("policy is not a valid type (%T)", policy), "Failed to enqueue federated object for policy") - return - } - - s.logger.WithValues("policy", policyAccessor.GetName()).V(2).Info("Enqueue federated objects for policy") - - fedObjects, err := s.federatedObjectLister.List(labels.Everything()) - if err != nil { - s.logger.WithValues("policy", policyAccessor.GetName()).Error(err, "Failed to enqueue federated objects for policy") - return - } - - for _, fedObject := range fedObjects { - fedObject := fedObject.(*unstructured.Unstructured) - policyKey, found := MatchedPolicyKey(fedObject, s.typeConfig.GetNamespaced()) - if !found { - continue - } - - if policyKey.Name == policyAccessor.GetName() && policyKey.Namespace == policyAccessor.GetNamespace() { - s.worker.EnqueueObject(fedObject) - } - } -} - -// enqueueFederatedObjectsForCluster enqueues all federated objects only if the cluster is joined -func (s *Scheduler) enqueueFederatedObjectsForCluster(cluster pkgruntime.Object) { - clusterObj := cluster.(*fedcorev1a1.FederatedCluster) - if !util.IsClusterJoined(&clusterObj.Status) { - s.logger.WithValues("cluster", clusterObj.Name).V(3).Info("Skip enqueue federated objects for cluster, cluster not joined") - return - } - - s.logger.WithValues("cluster", clusterObj.Name).V(2).Info("Enqueue federated objects for cluster") - - fedObjects, err := s.federatedObjectLister.List(labels.Everything()) - if err != nil { - s.logger.Error(err, "Failed to enqueue federated object for cluster") - return - } - - for _, fedObject := range fedObjects { - s.worker.EnqueueObject(fedObject) - } -} diff --git a/pkg/controllers/scheduler/schedulingunit.go b/pkg/controllers/scheduler/schedulingunit.go index 6e2f3cb6..a69fc87f 100644 --- a/pkg/controllers/scheduler/schedulingunit.go +++ b/pkg/controllers/scheduler/schedulingunit.go @@ -1,4 +1,3 @@ -//go:build exclude /* Copyright 2023 The KubeAdmiral Authors. @@ -23,27 +22,23 @@ import ( "strconv" corev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" - "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/klog/v2" fedcorev1a1 "github.com/kubewharf/kubeadmiral/pkg/apis/core/v1alpha1" "github.com/kubewharf/kubeadmiral/pkg/controllers/common" "github.com/kubewharf/kubeadmiral/pkg/controllers/scheduler/framework" - "github.com/kubewharf/kubeadmiral/pkg/controllers/util" - utilunstructured "github.com/kubewharf/kubeadmiral/pkg/controllers/util/unstructured" + unstructuredutil "github.com/kubewharf/kubeadmiral/pkg/util/unstructured" ) func schedulingUnitForFedObject( typeConfig *fedcorev1a1.FederatedTypeConfig, - fedObject *unstructured.Unstructured, + fedObject fedcorev1a1.GenericFederatedObject, policy fedcorev1a1.GenericPropagationPolicy, ) (*framework.SchedulingUnit, error) { - template, err := getTemplate(fedObject) + template, err := fedObject.GetSpec().GetTemplateAsUnstructured() if err != nil { - return nil, fmt.Errorf("error retrieving template: %w", err) + return nil, fmt.Errorf("error retrieving template from FederatedObject: %w", err) } schedulingMode := getSchedulingModeFromPolicy(policy) @@ -58,11 +53,7 @@ func schedulingUnitForFedObject( schedulingMode = fedcorev1a1.SchedulingModeDuplicate } if schedulingMode == fedcorev1a1.SchedulingModeDivide { - value, err := utilunstructured.GetInt64FromPath( - fedObject, - typeConfig.Spec.PathDefinition.ReplicasSpec, - common.TemplatePath, - ) + value, err := unstructuredutil.GetInt64FromPath(template, typeConfig.Spec.PathDefinition.ReplicasSpec, nil) if err != nil { return nil, err } @@ -75,11 +66,11 @@ func schedulingUnitForFedObject( return nil, err } - targetType := typeConfig.GetTargetType() + sourceType := typeConfig.GetSourceType() schedulingUnit := &framework.SchedulingUnit{ - GroupVersion: schema.GroupVersion{Group: targetType.Group, Version: targetType.Version}, - Kind: targetType.Kind, - Resource: targetType.Name, + GroupVersion: schema.GroupVersion{Group: sourceType.Group, Version: sourceType.Version}, + Kind: sourceType.Kind, + Resource: sourceType.Name, Namespace: template.GetNamespace(), Name: template.GetName(), Labels: template.GetLabels(), @@ -163,57 +154,33 @@ func schedulingUnitForFedObject( return schedulingUnit, nil } -func getTemplate(fedObject *unstructured.Unstructured) (*metav1.PartialObjectMetadata, error) { - templateContent, exists, err := unstructured.NestedMap(fedObject.Object, common.TemplatePath...) - if err != nil { - return nil, fmt.Errorf("error retrieving template: %w", err) - } - if !exists { - return nil, fmt.Errorf("template not found") - } - obj := metav1.PartialObjectMetadata{} - err = runtime.DefaultUnstructuredConverter.FromUnstructured(templateContent, &obj) - if err != nil { - return nil, fmt.Errorf("template cannot be converted from unstructured: %w", err) - } - return &obj, nil -} - func getCurrentReplicasFromObject( - typeConfig *fedcorev1a1.FederatedTypeConfig, - object *unstructured.Unstructured, + ftc *fedcorev1a1.FederatedTypeConfig, + fedObject fedcorev1a1.GenericFederatedObject, ) (map[string]*int64, error) { - placementObj, err := util.UnmarshalGenericPlacements(object) - if err != nil { - return nil, err - } + placements := fedObject.GetSpec().GetControllerPlacement(PrefixedGlobalSchedulerName) + overrides := fedObject.GetSpec().GetControllerOverrides(PrefixedGlobalSchedulerName) - var clusterNames map[string]struct{} - if placement := placementObj.Spec.GetPlacementOrNil(PrefixedGlobalSchedulerName); placement != nil { - clusterNames = placement.ClusterNames() - } + res := make(map[string]*int64, len(placements)) - clusterOverridesMap, err := util.GetOverrides(object, PrefixedGlobalSchedulerName) - if err != nil { - return nil, err + for _, placement := range placements { + res[placement.Cluster] = nil } - res := make(map[string]*int64, len(clusterNames)) - for cluster := range clusterNames { - res[cluster] = nil + replicasPath := unstructuredutil.ToSlashPath(ftc.Spec.PathDefinition.ReplicasSpec) - clusterOverrides, exists := clusterOverridesMap[cluster] - if !exists { + for _, override := range overrides { + if _, ok := res[override.Cluster]; !ok { continue } - for _, override := range clusterOverrides { - if override.Path == utilunstructured.ToSlashPath(typeConfig.Spec.PathDefinition.ReplicasSpec) && - (override.Op == operationReplace || override.Op == "") { - // The type of the value will be float64 due to how json - // marshalling works for interfaces. - replicas := int64(override.Value.(float64)) - res[cluster] = &replicas + for _, patch := range override.Patches { + if patch.Op == replicasPath && (patch.Op == overridePatchOpReplace || patch.Op == "") { + var replicas *int64 + if err := json.Unmarshal(patch.Value.Raw, replicas); err != nil { + continue + } + res[override.Cluster] = replicas break } } @@ -232,8 +199,8 @@ func getSchedulingModeFromPolicy(policy fedcorev1a1.GenericPropagationPolicy) fe return DefaultSchedulingMode } -func getSchedulingModeFromObject(object *unstructured.Unstructured) (fedcorev1a1.SchedulingMode, bool) { - annotations := object.GetAnnotations() +func getSchedulingModeFromObject(fedObject fedcorev1a1.GenericFederatedObject) (fedcorev1a1.SchedulingMode, bool) { + annotations := fedObject.GetAnnotations() if annotations == nil { return "", false } @@ -254,12 +221,12 @@ func getSchedulingModeFromObject(object *unstructured.Unstructured) (fedcorev1a1 "Invalid value %s for scheduling mode annotation (%s) on fed object %s", annotation, SchedulingModeAnnotation, - object.GetName(), + fedObject.GetName(), ) return "", false } -func getAutoMigrationInfo(fedObject *unstructured.Unstructured) (*framework.AutoMigrationInfo, error) { +func getAutoMigrationInfo(fedObject fedcorev1a1.GenericFederatedObject) (*framework.AutoMigrationInfo, error) { value, exists := fedObject.GetAnnotations()[common.AutoMigrationInfoAnnotation] if !exists { return nil, nil @@ -276,7 +243,7 @@ func getIsStickyClusterFromPolicy(policy fedcorev1a1.GenericPropagationPolicy) b return policy.GetSpec().StickyCluster } -func getIsStickyClusterFromObject(object *unstructured.Unstructured) (bool, bool) { +func getIsStickyClusterFromObject(object fedcorev1a1.GenericFederatedObject) (bool, bool) { // TODO: consider passing in the annotations directly to prevent incurring a deep copy for each call annotations := object.GetAnnotations() if annotations == nil { @@ -308,7 +275,7 @@ func getClusterSelectorFromPolicy(policy fedcorev1a1.GenericPropagationPolicy) m return policy.GetSpec().ClusterSelector } -func getClusterSelectorFromObject(object *unstructured.Unstructured) (map[string]string, bool) { +func getClusterSelectorFromObject(object fedcorev1a1.GenericFederatedObject) (map[string]string, bool) { annotations := object.GetAnnotations() if annotations == nil { return nil, false @@ -351,7 +318,7 @@ func getAffinityFromPolicy(policy fedcorev1a1.GenericPropagationPolicy) *framewo return affinity } -func getAffinityFromObject(object *unstructured.Unstructured) (*framework.Affinity, bool) { +func getAffinityFromObject(object fedcorev1a1.GenericFederatedObject) (*framework.Affinity, bool) { annotations := object.GetAnnotations() if annotations == nil { return nil, false @@ -381,7 +348,7 @@ func getTolerationsFromPolicy(policy fedcorev1a1.GenericPropagationPolicy) []cor return policy.GetSpec().Tolerations } -func getTolerationsFromObject(object *unstructured.Unstructured) ([]corev1.Toleration, bool) { +func getTolerationsFromObject(object fedcorev1a1.GenericFederatedObject) ([]corev1.Toleration, bool) { annotations := object.GetAnnotations() if annotations == nil { return nil, false @@ -411,7 +378,7 @@ func getMaxClustersFromPolicy(policy fedcorev1a1.GenericPropagationPolicy) *int6 return policy.GetSpec().MaxClusters } -func getMaxClustersFromObject(object *unstructured.Unstructured) (*int64, bool) { +func getMaxClustersFromObject(object fedcorev1a1.GenericFederatedObject) (*int64, bool) { annotations := object.GetAnnotations() if annotations == nil { return nil, false @@ -463,7 +430,7 @@ func getWeightsFromPolicy(policy fedcorev1a1.GenericPropagationPolicy) map[strin return weights } -func getWeightsFromObject(object *unstructured.Unstructured) (map[string]int64, bool) { +func getWeightsFromObject(object fedcorev1a1.GenericFederatedObject) (map[string]int64, bool) { annotations := object.GetAnnotations() if annotations == nil { return nil, false @@ -474,7 +441,7 @@ func getWeightsFromObject(object *unstructured.Unstructured) (map[string]int64, return nil, false } - var placements []fedcorev1a1.ClusterReference + var placements []fedcorev1a1.DesiredPlacement err := json.Unmarshal([]byte(annotation), &placements) if err != nil { klog.Errorf( @@ -521,7 +488,7 @@ func getMinReplicasFromPolicy(policy fedcorev1a1.GenericPropagationPolicy) map[s return minReplicas } -func getMinReplicasFromObject(object *unstructured.Unstructured) (map[string]int64, bool) { +func getMinReplicasFromObject(object fedcorev1a1.GenericFederatedObject) (map[string]int64, bool) { annotations := object.GetAnnotations() if annotations == nil { return nil, false @@ -532,7 +499,7 @@ func getMinReplicasFromObject(object *unstructured.Unstructured) (map[string]int return nil, false } - var placements []fedcorev1a1.ClusterReference + var placements []fedcorev1a1.DesiredPlacement err := json.Unmarshal([]byte(annotation), &placements) if err != nil { klog.Errorf( @@ -579,7 +546,7 @@ func getMaxReplicasFromPolicy(policy fedcorev1a1.GenericPropagationPolicy) map[s return maxReplicas } -func getMaxReplicasFromObject(object *unstructured.Unstructured) (map[string]int64, bool) { +func getMaxReplicasFromObject(object fedcorev1a1.GenericFederatedObject) (map[string]int64, bool) { annotations := object.GetAnnotations() if annotations == nil { return nil, false @@ -590,7 +557,7 @@ func getMaxReplicasFromObject(object *unstructured.Unstructured) (map[string]int return nil, false } - var placements []fedcorev1a1.ClusterReference + var placements []fedcorev1a1.DesiredPlacement err := json.Unmarshal([]byte(annotation), &placements) if err != nil { klog.Errorf( @@ -637,7 +604,7 @@ func getClusterNamesFromPolicy(policy fedcorev1a1.GenericPropagationPolicy) map[ return clusterNames } -func getClusterNamesFromObject(object *unstructured.Unstructured) (map[string]struct{}, bool) { +func getClusterNamesFromObject(object fedcorev1a1.GenericFederatedObject) (map[string]struct{}, bool) { annotations := object.GetAnnotations() if annotations == nil { return nil, false diff --git a/pkg/controllers/scheduler/util.go b/pkg/controllers/scheduler/util.go index 2a2b69d9..0bd63424 100644 --- a/pkg/controllers/scheduler/util.go +++ b/pkg/controllers/scheduler/util.go @@ -1,4 +1,3 @@ -//go:build exclude /* Copyright 2023 The KubeAdmiral Authors. @@ -18,170 +17,64 @@ limitations under the License. package scheduler import ( - "sync" + "fmt" - "github.com/pkg/errors" - "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" - "k8s.io/client-go/dynamic" - - fedtypesv1a1 "github.com/kubewharf/kubeadmiral/pkg/apis/types/v1alpha1" + v1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/json" fedcorev1a1 "github.com/kubewharf/kubeadmiral/pkg/apis/core/v1alpha1" "github.com/kubewharf/kubeadmiral/pkg/controllers/common" - "github.com/kubewharf/kubeadmiral/pkg/controllers/util" - utilunstructured "github.com/kubewharf/kubeadmiral/pkg/controllers/util/unstructured" + "github.com/kubewharf/kubeadmiral/pkg/util/unstructured" ) const ( - operationReplace = "replace" + overridePatchOpReplace = "replace" ) -func MatchedPolicyKey(obj fedcorev1a1.GenericFederatedObject, isNamespaced bool) (result common.QualifiedName, ok bool) { +func GetMatchedPolicyKey(obj metav1.Object) (result common.QualifiedName, ok bool) { labels := obj.GetLabels() + isNamespaced := len(obj.GetNamespace()) > 0 - if policyName, exists := labels[PropagationPolicyNameLabel]; exists && isNamespaced { + if policyName, exists := labels[common.PropagationPolicyNameLabel]; exists && isNamespaced { return common.QualifiedName{Namespace: obj.GetNamespace(), Name: policyName}, true } - if policyName, exists := labels[ClusterPropagationPolicyNameLabel]; exists { + if policyName, exists := labels[common.ClusterPropagationPolicyNameLabel]; exists { return common.QualifiedName{Namespace: "", Name: policyName}, true } return common.QualifiedName{}, false } -type ClusterClients struct { - clients sync.Map -} - -func (c *ClusterClients) Get(cluster string) dynamic.Interface { - val, ok := c.clients.Load(cluster) - if ok { - return val.(dynamic.Interface) - } - return nil -} - -func (c *ClusterClients) Set(cluster string, client dynamic.Interface) { - c.clients.Store(cluster, client) -} - -func (c *ClusterClients) Delete(cluster string) { - c.clients.Delete(cluster) -} - func UpdateReplicasOverride( - typeConfig *fedcorev1a1.FederatedTypeConfig, - fedObject *unstructured.Unstructured, + ftc *fedcorev1a1.FederatedTypeConfig, + fedObject fedcorev1a1.GenericFederatedObject, result map[string]int64, ) (updated bool, err error) { - overridesMap, err := util.GetOverrides(fedObject, PrefixedGlobalSchedulerName) - if err != nil { - return updated, errors.Wrapf( - err, - "Error reading cluster overrides for %s/%s", - fedObject.GetNamespace(), - fedObject.GetName(), - ) - } + replicasPath := unstructured.ToSlashPath(ftc.Spec.PathDefinition.ReplicasSpec) - if OverrideUpdateNeeded(typeConfig, overridesMap, result) { - err := setOverrides(typeConfig, fedObject, overridesMap, result) + newOverrides := []fedcorev1a1.ClusterReferenceWithPatches{} + for cluster, replicas := range result { + replicasRaw, err := json.Marshal(replicas) if err != nil { - return updated, err + return false, fmt.Errorf("failed to marshal replicas value: %w", err) } - updated = true - } - return updated, nil -} - -func setOverrides( - typeConfig *fedcorev1a1.FederatedTypeConfig, - obj *unstructured.Unstructured, - overridesMap util.OverridesMap, - replicasMap map[string]int64, -) error { - if overridesMap == nil { - overridesMap = make(util.OverridesMap) - } - updateOverridesMap(typeConfig, overridesMap, replicasMap) - return util.SetOverrides(obj, PrefixedGlobalSchedulerName, overridesMap) -} - -func updateOverridesMap( - typeConfig *fedcorev1a1.FederatedTypeConfig, - overridesMap util.OverridesMap, - replicasMap map[string]int64, -) { - replicasPath := utilunstructured.ToSlashPath(typeConfig.Spec.PathDefinition.ReplicasSpec) - - // Remove replicas override for clusters that are not scheduled - for clusterName, clusterOverrides := range overridesMap { - if _, ok := replicasMap[clusterName]; !ok { - for i, overrideItem := range clusterOverrides { - if overrideItem.Path == replicasPath { - clusterOverrides = append(clusterOverrides[:i], clusterOverrides[i+1:]...) - if len(clusterOverrides) == 0 { - // delete empty ClusterOverrides item - delete(overridesMap, clusterName) - } else { - overridesMap[clusterName] = clusterOverrides - } - break - } - } - } - } - // Add/update replicas override for clusters that are scheduled - for clusterName, replicas := range replicasMap { - replicasOverrideFound := false - for idx, overrideItem := range overridesMap[clusterName] { - if overrideItem.Path == replicasPath { - overridesMap[clusterName][idx].Value = replicas - replicasOverrideFound = true - break - } - } - if !replicasOverrideFound { - clusterOverrides, exist := overridesMap[clusterName] - if !exist { - clusterOverrides = fedtypesv1a1.OverridePatches{} - } - clusterOverrides = append(clusterOverrides, fedtypesv1a1.OverridePatch{Path: replicasPath, Value: replicas}) - overridesMap[clusterName] = clusterOverrides + override := fedcorev1a1.ClusterReferenceWithPatches{ + Cluster: cluster, + Patches: fedcorev1a1.OverridePatches{ + { + Op: overridePatchOpReplace, + Path: replicasPath, + Value: v1.JSON{ + Raw: replicasRaw, + }, + }, + }, } + newOverrides = append(newOverrides, override) } -} - -func OverrideUpdateNeeded( - typeConfig *fedcorev1a1.FederatedTypeConfig, - overridesMap util.OverridesMap, - result map[string]int64, -) bool { - resultLen := len(result) - checkLen := 0 - for clusterName, clusterOverridesMap := range overridesMap { - for _, overrideItem := range clusterOverridesMap { - path := overrideItem.Path - rawValue := overrideItem.Value - if path != utilunstructured.ToSlashPath(typeConfig.Spec.PathDefinition.ReplicasSpec) { - continue - } - // The type of the value will be float64 due to how json - // marshalling works for interfaces. - floatValue, ok := rawValue.(float64) - if !ok { - return true - } - value := int64(floatValue) - replicas, ok := result[clusterName] - if !ok || value != replicas { - return true - } - checkLen += 1 - } - } - - return checkLen != resultLen + updated = fedObject.GetSpec().SetControllerOverrides(PrefixedGlobalSchedulerName, newOverrides) + return updated, nil } diff --git a/pkg/controllers/scheduler/util_test.go b/pkg/controllers/scheduler/util_test.go index 2073c9de..88f68b9b 100644 --- a/pkg/controllers/scheduler/util_test.go +++ b/pkg/controllers/scheduler/util_test.go @@ -94,7 +94,7 @@ func TestMatchedPolicyKey(t *testing.T) { } object.SetLabels(labels) - policy, found := MatchedPolicyKey(object, object.GetNamespace() != "") + policy, found := GetMatchedPolicyKey(object, object.GetNamespace() != "") if found != testCase.expectedPolicyFound { t.Fatalf("found = %v, but expectedPolicyFound = %v", found, testCase.expectedPolicyFound) } diff --git a/pkg/controllers/scheduler/webhook.go b/pkg/controllers/scheduler/webhook.go index ee978c51..f287d206 100644 --- a/pkg/controllers/scheduler/webhook.go +++ b/pkg/controllers/scheduler/webhook.go @@ -1,4 +1,3 @@ -//go:build exclude /* Copyright 2023 The KubeAdmiral Authors. diff --git a/pkg/controllers/util/clusterselector/util.go b/pkg/util/clusterselector/util.go similarity index 100% rename from pkg/controllers/util/clusterselector/util.go rename to pkg/util/clusterselector/util.go diff --git a/pkg/util/eventhandlers/eventhandler.go b/pkg/util/eventhandlers/eventhandler.go index 1e2cdf5a..f8e5046a 100644 --- a/pkg/util/eventhandlers/eventhandler.go +++ b/pkg/util/eventhandlers/eventhandler.go @@ -19,6 +19,7 @@ package eventhandlers import ( "reflect" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/tools/cache" ) @@ -85,3 +86,37 @@ func NewTriggerOnChanges[Source any, Key any]( }, } } + +// NewTriggerOnGenerationChanges returns a cache.ResourceEventHandlerFuncs that will call the given triggerFunc on +// object generation changes. The object is first transformed with the given keyFunc. triggerFunc is also called for add +// and delete events. +func NewTriggerOnGenerationChanges[Source any, Key any]( + keyFunc func(Source) Key, + triggerFunc func(Key), +) *cache.ResourceEventHandlerFuncs { + return &cache.ResourceEventHandlerFuncs{ + DeleteFunc: func(old interface{}) { + if deleted, ok := old.(cache.DeletedFinalStateUnknown); ok { + // This object might be stale but ok for our current usage. + old = deleted.Obj + if old == nil { + return + } + } + oldObj := old.(Source) + triggerFunc(keyFunc(oldObj)) + }, + AddFunc: func(cur interface{}) { + curObj := cur.(Source) + triggerFunc(keyFunc(curObj)) + }, + UpdateFunc: func(old, cur interface{}) { + oldObj := old.(metav1.Object) + curObj := cur.(metav1.Object) + + if oldObj.GetGeneration() != curObj.GetGeneration() { + triggerFunc(keyFunc(cur.(Source))) + } + }, + } +} diff --git a/pkg/util/fedobjectadapters/adapters.go b/pkg/util/fedobjectadapters/adapters.go index c7761adf..d4c7c945 100644 --- a/pkg/util/fedobjectadapters/adapters.go +++ b/pkg/util/fedobjectadapters/adapters.go @@ -10,7 +10,6 @@ import ( fedcorev1a1client "github.com/kubewharf/kubeadmiral/pkg/client/clientset/versioned/typed/core/v1alpha1" fedcorev1a1listers "github.com/kubewharf/kubeadmiral/pkg/client/listers/core/v1alpha1" "github.com/kubewharf/kubeadmiral/pkg/controllers/common" - "github.com/kubewharf/kubeadmiral/pkg/controllers/scheduler" ) func ensureNilInterface( @@ -127,11 +126,11 @@ func Delete( func MatchedPolicyKey(obj fedcorev1a1.GenericFederatedObject, isNamespaced bool) (result common.QualifiedName, ok bool) { labels := obj.GetLabels() - if policyName, exists := labels[scheduler.PropagationPolicyNameLabel]; exists && isNamespaced { + if policyName, exists := labels[common.PropagationPolicyNameLabel]; exists && isNamespaced { return common.QualifiedName{Namespace: obj.GetNamespace(), Name: policyName}, true } - if policyName, exists := labels[scheduler.ClusterPropagationPolicyNameLabel]; exists { + if policyName, exists := labels[common.ClusterPropagationPolicyNameLabel]; exists { return common.QualifiedName{Namespace: "", Name: policyName}, true } diff --git a/pkg/controllers/util/unstructured/unstructured.go b/pkg/util/unstructured/unstructured.go similarity index 98% rename from pkg/controllers/util/unstructured/unstructured.go rename to pkg/util/unstructured/unstructured.go index f1ab977f..de9de421 100644 --- a/pkg/controllers/util/unstructured/unstructured.go +++ b/pkg/util/unstructured/unstructured.go @@ -1,4 +1,3 @@ -//go:build exclude /* Copyright 2023 The KubeAdmiral Authors. @@ -15,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package utilunstructured +package unstructured import ( "fmt" From 89c37206b63c2d84aacdff1f3bd4f55f744be991 Mon Sep 17 00:00:00 2001 From: "hawjia.lim" Date: Sat, 22 Jul 2023 20:19:56 +0800 Subject: [PATCH 094/173] fix(scheduler): trigger schedulingprofile informer start --- pkg/controllers/federate/controller.go | 2 -- pkg/controllers/scheduler/scheduler.go | 6 ++++-- pkg/controllers/scheduler/schedulingtriggers.go | 10 +++++----- 3 files changed, 9 insertions(+), 9 deletions(-) diff --git a/pkg/controllers/federate/controller.go b/pkg/controllers/federate/controller.go index bf6ea4a0..63eb2c94 100644 --- a/pkg/controllers/federate/controller.go +++ b/pkg/controllers/federate/controller.go @@ -408,8 +408,6 @@ func (c *FederateController) reconcile(ctx context.Context, key workerKey) (stat "Federated object updated: %s", fedObject.GetName(), ) - } else { - logger.V(3).Info("No updates required to the federated object") } return worker.StatusAllOK diff --git a/pkg/controllers/scheduler/scheduler.go b/pkg/controllers/scheduler/scheduler.go index 0c4a2645..cf43b8f2 100644 --- a/pkg/controllers/scheduler/scheduler.go +++ b/pkg/controllers/scheduler/scheduler.go @@ -165,7 +165,8 @@ func NewScheduler( s.enqueueFederatedObjectsForCluster, )) - s.webhookConfigurationSynced = webhookConfigurationInformer.Informer().HasSynced + schedulingProfileInformer.Informer() + webhookConfigurationInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{ AddFunc: func(obj interface{}) { s.cacheWebhookPlugin(obj.(*fedcorev1a1.SchedulerPluginWebhookConfiguration)) @@ -190,6 +191,7 @@ func NewScheduler( s.webhookPlugins.Delete(obj.(*fedcorev1a1.SchedulerPluginWebhookConfiguration).Name) }, }) + s.webhookConfigurationSynced = webhookConfigurationInformer.Informer().HasSynced informerManager.AddFTCUpdateHandler(func(lastObserved, latest *fedcorev1a1.FederatedTypeConfig) { if lastObserved == nil && latest != nil { @@ -603,7 +605,7 @@ func (s *Scheduler) persistSchedulingResult( return worker.StatusError } - logger.V(1).Info("Updated federated object") + logger.V(1).Info("Scheduling success") s.eventRecorder.Eventf( fedObject, corev1.EventTypeNormal, diff --git a/pkg/controllers/scheduler/schedulingtriggers.go b/pkg/controllers/scheduler/schedulingtriggers.go index 118a624d..db00812b 100644 --- a/pkg/controllers/scheduler/schedulingtriggers.go +++ b/pkg/controllers/scheduler/schedulingtriggers.go @@ -156,13 +156,13 @@ var knownSchedulingAnnotations = sets.New( ) func getSchedulingAnnotations(fedObject fedcorev1a1.GenericFederatedObject) []keyValue[string, string] { - annotations := fedObject.GetAnnotations() // this is a deep copy - for k := range annotations { - if !knownSchedulingAnnotations.Has(k) { - delete(annotations, k) + result := map[string]string{} + for k, v := range fedObject.GetAnnotations() { + if knownSchedulingAnnotations.Has(k) { + result[k] = v } } - return sortMap(annotations) + return sortMap(result) } func getReplicaCount( From adf4a0792cb1f68227d5c64d4d81b5fa690cf93f Mon Sep 17 00:00:00 2001 From: "lihanbo.0316" Date: Thu, 27 Jul 2023 17:00:33 +0800 Subject: [PATCH 095/173] feat: add resource gvk info in federatedobject label --- pkg/controllers/federate/util.go | 2 ++ 1 file changed, 2 insertions(+) diff --git a/pkg/controllers/federate/util.go b/pkg/controllers/federate/util.go index 96802afd..47bb24db 100644 --- a/pkg/controllers/federate/util.go +++ b/pkg/controllers/federate/util.go @@ -98,6 +98,7 @@ func newFederatedObjectForSourceObject( // the FederatedObject's template. federatedLabels, templateLabels := classifyLabels(sourceObj.GetLabels()) + federatedLabels[sourceObj.GetAPIVersion()] = sourceObj.GetKind() // Classify annotations into annotations that should be copied onto the FederatedObject and labels that should be // copied onto the FederatedObject's template. @@ -163,6 +164,7 @@ func updateFederatedObjectForSourceObject( // the FederatedObject's template and update the FederatedObject's template. federatedLabels, templateLabels := classifyLabels(sourceObject.GetLabels()) + federatedLabels[sourceObject.GetAPIVersion()] = sourceObject.GetKind() if !equality.Semantic.DeepEqual(federatedLabels, fedObject.GetLabels()) { fedObject.SetLabels(federatedLabels) isUpdated = true From 937a2bd0ab991dd9108cf6f60a9aeb1cb448a01a Mon Sep 17 00:00:00 2001 From: "hawjia.lim" Date: Fri, 28 Jul 2023 13:06:05 +0800 Subject: [PATCH 096/173] refactor(eventhandlers): refactor event handlers --- pkg/controllers/federate/controller.go | 13 +--- .../federatedcluster/controller.go | 5 +- pkg/controllers/nsautoprop/controller.go | 3 - pkg/controllers/policyrc/controller.go | 32 ++++----- pkg/controllers/scheduler/scheduler.go | 21 +++--- pkg/controllers/status/controller.go | 19 ++--- pkg/util/eventhandlers/eventhandler.go | 72 ++++++++++++------- 7 files changed, 82 insertions(+), 83 deletions(-) diff --git a/pkg/controllers/federate/controller.go b/pkg/controllers/federate/controller.go index 63eb2c94..b965ea92 100644 --- a/pkg/controllers/federate/controller.go +++ b/pkg/controllers/federate/controller.go @@ -130,14 +130,13 @@ func NewFederateController( return uns.GetNamespace() != fedSystemNamespace }, Handler: eventhandlers.NewTriggerOnAllChanges( - func(uns *unstructured.Unstructured) workerKey { - return workerKey{ + func(uns *unstructured.Unstructured) { + c.worker.Enqueue(workerKey{ name: uns.GetName(), namespace: uns.GetNamespace(), gvk: ftc.GetSourceTypeGVK(), - } + }) }, - c.worker.Enqueue, ), } }, @@ -154,9 +153,6 @@ func NewFederateController( return fedObj.Namespace != fedSystemNamespace }, Handler: eventhandlers.NewTriggerOnAllChanges( - func(fedObj *fedcorev1a1.FederatedObject) *fedcorev1a1.FederatedObject { - return fedObj - }, func(fedObj *fedcorev1a1.FederatedObject) { srcMeta, err := fedObj.Spec.GetTemplateAsUnstructured() if err != nil { @@ -183,9 +179,6 @@ func NewFederateController( if _, err := clusterFedObjectInformer.Informer().AddEventHandler( eventhandlers.NewTriggerOnAllChanges( - func(fedObj *fedcorev1a1.ClusterFederatedObject) *fedcorev1a1.ClusterFederatedObject { - return fedObj - }, func(fedObj *fedcorev1a1.ClusterFederatedObject) { srcMeta, err := fedObj.Spec.GetTemplateAsUnstructured() if err != nil { diff --git a/pkg/controllers/federatedcluster/controller.go b/pkg/controllers/federatedcluster/controller.go index 9be0041d..78abd27b 100644 --- a/pkg/controllers/federatedcluster/controller.go +++ b/pkg/controllers/federatedcluster/controller.go @@ -155,8 +155,9 @@ func NewFederatedClusterController( } return false }, - common.NewQualifiedName, - c.worker.Enqueue, + func(cluster metav1.Object) { + c.worker.Enqueue(common.NewQualifiedName(cluster)) + }, )) return c, nil diff --git a/pkg/controllers/nsautoprop/controller.go b/pkg/controllers/nsautoprop/controller.go index ed138372..cc929855 100644 --- a/pkg/controllers/nsautoprop/controller.go +++ b/pkg/controllers/nsautoprop/controller.go @@ -133,9 +133,6 @@ func NewNamespaceAutoPropagationController( if _, err := c.clusterFedObjectInformer.Informer().AddEventHandlerWithResyncPeriod( eventhandlers.NewTriggerOnAllChanges( - func(obj *fedcorev1a1.ClusterFederatedObject) *fedcorev1a1.ClusterFederatedObject { - return obj - }, func(obj *fedcorev1a1.ClusterFederatedObject) { srcMeta, err := obj.Spec.GetTemplateAsUnstructured() if err != nil { diff --git a/pkg/controllers/policyrc/controller.go b/pkg/controllers/policyrc/controller.go index 869ef57e..04571969 100644 --- a/pkg/controllers/policyrc/controller.go +++ b/pkg/controllers/policyrc/controller.go @@ -82,14 +82,14 @@ func NewPolicyRCController( logger: logger.WithValues("controller", ControllerName), } - if _, err := c.fedObjectInformer.Informer().AddEventHandler(eventhandlers.NewTriggerOnAllChanges( + if _, err := c.fedObjectInformer.Informer().AddEventHandler(eventhandlers.NewTriggerOnAllChangesWithTransform( common.NewQualifiedName, c.countWorker.Enqueue, )); err != nil { return nil, err } - if _, err := c.clusterFedObjectInformer.Informer().AddEventHandler(eventhandlers.NewTriggerOnAllChanges( + if _, err := c.clusterFedObjectInformer.Informer().AddEventHandler(eventhandlers.NewTriggerOnAllChangesWithTransform( common.NewQualifiedName, c.countWorker.Enqueue, )); err != nil { @@ -141,31 +141,27 @@ func NewPolicyRCController( metrics, ) - if _, err := c.propagationPolicyInformer.Informer().AddEventHandler(eventhandlers.NewTriggerOnAllChanges( - common.NewQualifiedName, - c.persistPpWorker.Enqueue, - )); err != nil { + if _, err := c.propagationPolicyInformer.Informer().AddEventHandler( + eventhandlers.NewTriggerOnAllChangesWithTransform(common.NewQualifiedName, c.persistPpWorker.Enqueue), + ); err != nil { return nil, err } - if _, err := c.clusterPropagationPolicyInformer.Informer().AddEventHandler(eventhandlers.NewTriggerOnAllChanges( - common.NewQualifiedName, - c.persistPpWorker.Enqueue, - )); err != nil { + if _, err := c.clusterPropagationPolicyInformer.Informer().AddEventHandler( + eventhandlers.NewTriggerOnAllChangesWithTransform(common.NewQualifiedName, c.persistPpWorker.Enqueue), + ); err != nil { return nil, err } - if _, err := c.overridePolicyInformer.Informer().AddEventHandler(eventhandlers.NewTriggerOnAllChanges( - common.NewQualifiedName, - c.persistOpWorker.Enqueue, - )); err != nil { + if _, err := c.overridePolicyInformer.Informer().AddEventHandler( + eventhandlers.NewTriggerOnAllChangesWithTransform(common.NewQualifiedName, c.persistOpWorker.Enqueue), + ); err != nil { return nil, err } - if _, err := c.clusterOverridePolicyInformer.Informer().AddEventHandler(eventhandlers.NewTriggerOnAllChanges( - common.NewQualifiedName, - c.persistPpWorker.Enqueue, - )); err != nil { + if _, err := c.clusterOverridePolicyInformer.Informer().AddEventHandler( + eventhandlers.NewTriggerOnAllChangesWithTransform(common.NewQualifiedName, c.persistPpWorker.Enqueue), + ); err != nil { return nil, err } diff --git a/pkg/controllers/scheduler/scheduler.go b/pkg/controllers/scheduler/scheduler.go index cf43b8f2..b7076747 100644 --- a/pkg/controllers/scheduler/scheduler.go +++ b/pkg/controllers/scheduler/scheduler.go @@ -135,23 +135,21 @@ func NewScheduler( metrics, ) - fedObjectInformer.Informer().AddEventHandler(eventhandlers.NewTriggerOnAllChanges( + fedObjectInformer.Informer().AddEventHandler(eventhandlers.NewTriggerOnAllChangesWithTransform( common.NewQualifiedName, s.worker.Enqueue, )) - clusterFedObjectInformer.Informer().AddEventHandler(eventhandlers.NewTriggerOnAllChanges( + clusterFedObjectInformer.Informer().AddEventHandler(eventhandlers.NewTriggerOnAllChangesWithTransform( common.NewQualifiedName, s.worker.Enqueue, )) - propagationPolicyInformer.Informer().AddEventHandler(eventhandlers.NewTriggerOnGenerationChanges( - func(obj metav1.Object) metav1.Object { return obj }, - s.enqueueFederatedObjectsForPolicy, - )) - clusterPropagationPolicyInformer.Informer().AddEventHandler(eventhandlers.NewTriggerOnGenerationChanges( - func(obj metav1.Object) metav1.Object { return obj }, - s.enqueueFederatedObjectsForPolicy, - )) + propagationPolicyInformer.Informer().AddEventHandler( + eventhandlers.NewTriggerOnGenerationChanges(s.enqueueFederatedObjectsForPolicy), + ) + clusterPropagationPolicyInformer.Informer().AddEventHandler( + eventhandlers.NewTriggerOnGenerationChanges(s.enqueueFederatedObjectsForPolicy), + ) federatedClusterInformer.Informer().AddEventHandler(eventhandlers.NewTriggerOnChanges( func(oldCluster, curCluster *fedcorev1a1.FederatedCluster) bool { @@ -159,9 +157,6 @@ func NewScheduler( !equality.Semantic.DeepEqual(oldCluster.Spec.Taints, curCluster.Spec.Taints) || !equality.Semantic.DeepEqual(oldCluster.Status.APIResourceTypes, curCluster.Status.APIResourceTypes) }, - func(cluster *fedcorev1a1.FederatedCluster) *fedcorev1a1.FederatedCluster { - return cluster - }, s.enqueueFederatedObjectsForCluster, )) diff --git a/pkg/controllers/status/controller.go b/pkg/controllers/status/controller.go index 15959145..6f3a7838 100644 --- a/pkg/controllers/status/controller.go +++ b/pkg/controllers/status/controller.go @@ -152,7 +152,7 @@ func NewStatusController( // Build queue for triggering cluster reconciliations. s.clusterQueue = workqueue.NewNamedDelayingQueue("status-controller-cluster-queue") - fedObjectHandler := eventhandlers.NewTriggerOnAllChanges( + fedObjectHandler := eventhandlers.NewTriggerOnAllChangesWithTransform( common.NewQualifiedName, func(key common.QualifiedName) { s.enqueueEnableCollectedStatusObject(key, 0) @@ -167,17 +167,15 @@ func NewStatusController( return nil, err } - if _, err := s.collectedStatusInformer.Informer().AddEventHandler(eventhandlers.NewTriggerOnAllChanges( - common.NewQualifiedName, - s.worker.Enqueue, - )); err != nil { + if _, err := s.collectedStatusInformer.Informer().AddEventHandler( + eventhandlers.NewTriggerOnAllChangesWithTransform(common.NewQualifiedName, s.worker.Enqueue), + ); err != nil { return nil, err } - if _, err := s.clusterCollectedStatusInformer.Informer().AddEventHandler(eventhandlers.NewTriggerOnAllChanges( - common.NewQualifiedName, - s.worker.Enqueue, - )); err != nil { + if _, err := s.clusterCollectedStatusInformer.Informer().AddEventHandler( + eventhandlers.NewTriggerOnAllChangesWithTransform(common.NewQualifiedName, s.worker.Enqueue), + ); err != nil { return nil, err } @@ -194,9 +192,6 @@ func NewStatusController( } return eventhandlers.NewTriggerOnAllChanges( - func(uns *unstructured.Unstructured) *unstructured.Unstructured { - return uns - }, func(uns *unstructured.Unstructured) { ftc, exists := s.ftcManager.GetResourceFTC(uns.GroupVersionKind()) if !exists { diff --git a/pkg/util/eventhandlers/eventhandler.go b/pkg/util/eventhandlers/eventhandler.go index f8e5046a..d02dda13 100644 --- a/pkg/util/eventhandlers/eventhandler.go +++ b/pkg/util/eventhandlers/eventhandler.go @@ -24,11 +24,8 @@ import ( ) // NewTriggerOnAllChanges returns a cache.ResourceEventHandlerFuncs that will call the given triggerFunc on all object -// changes. The object is first transformed with the given keyFunc. triggerFunc is also called for add or delete events. -func NewTriggerOnAllChanges[Source any, Key any]( - keyFunc func(Source) Key, - triggerFunc func(Key), -) *cache.ResourceEventHandlerFuncs { +// changes. triggerFunc is also called for add or delete events. +func NewTriggerOnAllChanges[Source any](triggerFunc func(Source)) *cache.ResourceEventHandlerFuncs { return &cache.ResourceEventHandlerFuncs{ DeleteFunc: func(old interface{}) { if deleted, ok := old.(cache.DeletedFinalStateUnknown); ok { @@ -38,28 +35,57 @@ func NewTriggerOnAllChanges[Source any, Key any]( } } oldSource := old.(Source) - triggerFunc(keyFunc(oldSource)) + triggerFunc(oldSource) }, AddFunc: func(cur interface{}) { curObj := cur.(Source) - triggerFunc(keyFunc(curObj)) + triggerFunc(curObj) }, UpdateFunc: func(old, cur interface{}) { if !reflect.DeepEqual(old, cur) { curObj := cur.(Source) - triggerFunc(keyFunc(curObj)) + triggerFunc(curObj) + } + }, + } +} + +// NewTriggerOnAllChangesWithTransform returns a cache.ResourceEventHandler that will call the given triggerFunc on all +// object changes. triggerFunc is also called for add or delete events. transformFunc will first be used to transform +// the original object into the target type. +func NewTriggerOnAllChangesWithTransform[Source any, Target any]( + transformFunc func(Source) Target, + triggerFunc func(Target), +) cache.ResourceEventHandler { + return &cache.ResourceEventHandlerFuncs{ + DeleteFunc: func(old interface{}) { + if deleted, ok := old.(cache.DeletedFinalStateUnknown); ok { + old = deleted.Obj + if old == nil { + return + } + } + oldSource := transformFunc(old.(Source)) + triggerFunc(oldSource) + }, + AddFunc: func(cur interface{}) { + curObj := transformFunc(cur.(Source)) + triggerFunc(curObj) + }, + UpdateFunc: func(old, cur interface{}) { + if !reflect.DeepEqual(old, cur) { + curObj := transformFunc(cur.(Source)) + triggerFunc(curObj) } }, } } // NewTriggerOnChanges returns a cache.ResourceEventHandlerFuncs that will call the given triggerFunc on object changes -// that passes the given predicate. The object is first transformed with the given keyFunc. triggerFunc is also called -// for add and delete events. -func NewTriggerOnChanges[Source any, Key any]( +// that passes the given predicate. triggerFunc is also called for add and delete events. +func NewTriggerOnChanges[Source any]( predicate func(old, cur Source) bool, - keyFunc func(Source) Key, - triggerFunc func(Key), + triggerFunc func(Source), ) *cache.ResourceEventHandlerFuncs { return &cache.ResourceEventHandlerFuncs{ DeleteFunc: func(old interface{}) { @@ -71,29 +97,25 @@ func NewTriggerOnChanges[Source any, Key any]( } } oldObj := old.(Source) - triggerFunc(keyFunc(oldObj)) + triggerFunc(oldObj) }, AddFunc: func(cur interface{}) { curObj := cur.(Source) - triggerFunc(keyFunc(curObj)) + triggerFunc(curObj) }, UpdateFunc: func(old, cur interface{}) { oldObj := old.(Source) curObj := cur.(Source) if predicate(oldObj, curObj) { - triggerFunc(keyFunc(curObj)) + triggerFunc(curObj) } }, } } // NewTriggerOnGenerationChanges returns a cache.ResourceEventHandlerFuncs that will call the given triggerFunc on -// object generation changes. The object is first transformed with the given keyFunc. triggerFunc is also called for add -// and delete events. -func NewTriggerOnGenerationChanges[Source any, Key any]( - keyFunc func(Source) Key, - triggerFunc func(Key), -) *cache.ResourceEventHandlerFuncs { +// object generation changes. triggerFunc is also called for add and delete events. +func NewTriggerOnGenerationChanges[Source any](triggerFunc func(Source)) *cache.ResourceEventHandlerFuncs { return &cache.ResourceEventHandlerFuncs{ DeleteFunc: func(old interface{}) { if deleted, ok := old.(cache.DeletedFinalStateUnknown); ok { @@ -104,18 +126,18 @@ func NewTriggerOnGenerationChanges[Source any, Key any]( } } oldObj := old.(Source) - triggerFunc(keyFunc(oldObj)) + triggerFunc(oldObj) }, AddFunc: func(cur interface{}) { curObj := cur.(Source) - triggerFunc(keyFunc(curObj)) + triggerFunc(curObj) }, UpdateFunc: func(old, cur interface{}) { oldObj := old.(metav1.Object) curObj := cur.(metav1.Object) if oldObj.GetGeneration() != curObj.GetGeneration() { - triggerFunc(keyFunc(cur.(Source))) + triggerFunc(cur.(Source)) } }, } From 94ca742c01e201d04bb9bb9572803c0740eb6621 Mon Sep 17 00:00:00 2001 From: "hawjia.lim" Date: Fri, 28 Jul 2023 13:09:49 +0800 Subject: [PATCH 097/173] refactor(core): change override controller name --- cmd/controller-manager/app/controllermanager.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmd/controller-manager/app/controllermanager.go b/cmd/controller-manager/app/controllermanager.go index a23c4fbc..fdb5f2ef 100644 --- a/cmd/controller-manager/app/controllermanager.go +++ b/cmd/controller-manager/app/controllermanager.go @@ -39,7 +39,7 @@ const ( FederateControllerName = "federate" FollowerControllerName = "follower" PolicyRCControllerName = "policyrc" - OverrideControllerName = "overridepolicy" + OverrideControllerName = "override" NamespaceAutoPropagationControllerName = "nsautoprop" StatusControllerName = "status" SchedulerName = "scheduler" From 338ce5f56b774c1dabb3f18ca18430833ad8bd55 Mon Sep 17 00:00:00 2001 From: "hawjia.lim" Date: Fri, 28 Jul 2023 13:28:25 +0800 Subject: [PATCH 098/173] feat(override): deprecate override util package --- .../override/overridepolicy_controller.go | 11 +- pkg/controllers/override/util.go | 37 +++- pkg/controllers/override/util_test.go | 1 - pkg/controllers/util/overrides.go | 203 ------------------ 4 files changed, 35 insertions(+), 217 deletions(-) delete mode 100644 pkg/controllers/util/overrides.go diff --git a/pkg/controllers/override/overridepolicy_controller.go b/pkg/controllers/override/overridepolicy_controller.go index 3d870150..0ab9db33 100644 --- a/pkg/controllers/override/overridepolicy_controller.go +++ b/pkg/controllers/override/overridepolicy_controller.go @@ -339,7 +339,7 @@ func (c *Controller) reconcile(ctx context.Context, qualifiedName common.Qualifi return worker.StatusError } - var overrides util.OverridesMap + var overrides overridesMap // Apply overrides from each policy in order for _, policy := range policies { newOverrides, err := parseOverrides(policy, placedClusters) @@ -358,16 +358,11 @@ func (c *Controller) reconcile(ctx context.Context, qualifiedName common.Qualifi overrides = mergeOverrides(overrides, newOverrides) } - currentOverrides, err := util.GetOverrides(fedObject, PrefixedControllerName) - if err != nil { - keyedLogger.Error(err, "Failed to get overrides") - return worker.StatusError - } - + currentOverrides := fedObject.GetSpec().GetControllerOverrides(PrefixedControllerName) needsUpdate := !equality.Semantic.DeepEqual(overrides, currentOverrides) if needsUpdate { - err = util.SetOverrides(fedObject, PrefixedControllerName, overrides) + err = setOverrides(fedObject, overrides) if err != nil { keyedLogger.Error(err, "Failed to set overrides") return worker.StatusError diff --git a/pkg/controllers/override/util.go b/pkg/controllers/override/util.go index 3652f153..519d41f7 100644 --- a/pkg/controllers/override/util.go +++ b/pkg/controllers/override/util.go @@ -26,10 +26,11 @@ import ( fedcorev1a1 "github.com/kubewharf/kubeadmiral/pkg/apis/core/v1alpha1" fedcorev1a1listers "github.com/kubewharf/kubeadmiral/pkg/client/listers/core/v1alpha1" - "github.com/kubewharf/kubeadmiral/pkg/controllers/util" "github.com/kubewharf/kubeadmiral/pkg/util/clusterselector" ) +type overridesMap map[string]fedcorev1a1.OverridePatches + /* lookForMatchedPolicies looks for OverridePolicy and/or ClusterOverridePolicy that match the obj in the stores. @@ -90,8 +91,8 @@ func lookForMatchedPolicies( func parseOverrides( policy fedcorev1a1.GenericOverridePolicy, clusters []*fedcorev1a1.FederatedCluster, -) (util.OverridesMap, error) { - overridesMap := make(util.OverridesMap) +) (overridesMap, error) { + overridesMap := make(overridesMap) for _, cluster := range clusters { patches := make(fedcorev1a1.OverridePatches, 0) @@ -130,9 +131,9 @@ func parseOverrides( return overridesMap, nil } -func mergeOverrides(dest, src util.OverridesMap) util.OverridesMap { +func mergeOverrides(dest, src overridesMap) overridesMap { if dest == nil { - dest = make(util.OverridesMap) + dest = make(overridesMap) } for clusterName, srcOverrides := range src { @@ -229,3 +230,29 @@ func policyJsonPatchOverriderToOverridePatch( return overridePatch, nil } + +func setOverrides(federatedObj fedcorev1a1.GenericFederatedObject, overridesMap overridesMap) error { + for clusterName, clusterOverrides := range overridesMap { + if len(clusterOverrides) == 0 { + delete(overridesMap, clusterName) + } + } + + if len(overridesMap) == 0 { + federatedObj.GetSpec().DeleteControllerOverrides(PrefixedControllerName) + return nil + } + + overrides := []fedcorev1a1.ClusterReferenceWithPatches{} + + for clusterName, clusterOverrides := range overridesMap { + overrides = append(overrides, fedcorev1a1.ClusterReferenceWithPatches{ + Cluster: clusterName, + Patches: clusterOverrides, + }) + } + + federatedObj.GetSpec().SetControllerOverrides(PrefixedControllerName, overrides) + + return nil +} diff --git a/pkg/controllers/override/util_test.go b/pkg/controllers/override/util_test.go index 75d836b4..d2d6f4d6 100644 --- a/pkg/controllers/override/util_test.go +++ b/pkg/controllers/override/util_test.go @@ -27,7 +27,6 @@ import ( "k8s.io/client-go/tools/cache" fedcorev1a1 "github.com/kubewharf/kubeadmiral/pkg/apis/core/v1alpha1" - fedtypesv1a1 "github.com/kubewharf/kubeadmiral/pkg/apis/types/v1alpha1" "github.com/kubewharf/kubeadmiral/pkg/controllers/util" ) diff --git a/pkg/controllers/util/overrides.go b/pkg/controllers/util/overrides.go deleted file mode 100644 index 76a2cbc4..00000000 --- a/pkg/controllers/util/overrides.go +++ /dev/null @@ -1,203 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. - -This file may have been modified by The KubeAdmiral Authors -("KubeAdmiral Modifications"). All KubeAdmiral Modifications -are Copyright 2023 The KubeAdmiral Authors. -*/ - -package util - -import ( - "encoding/json" - "sort" - - jsonpatch "github.com/evanphx/json-patch" - "github.com/pkg/errors" - "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" - "k8s.io/apimachinery/pkg/util/sets" - - fedcorev1a1 "github.com/kubewharf/kubeadmiral/pkg/apis/core/v1alpha1" -) - -// Namespace and name may not be overridden since these fields are the -// primary mechanism of association between a federated resource in -// the host cluster and the target resources in the member clusters. -// -// Kind should always be sourced from the FTC and not vary across -// member clusters. -// -// apiVersion can be overridden to support managing resources like -// Ingress which can exist in different groups at different -// versions. Users will need to take care not to abuse this -// capability. -var invalidPaths = sets.NewString( - "/metadata/namespace", - "/metadata/name", - "/metadata/generateName", - "/kind", -) - -// Mapping of clusterName to overrides for the cluster -type OverridesMap map[string]fedcorev1a1.OverridePatches - -// GetOverrides returns a map of overrides populated from the given -// unstructured object. -func GetOverrides(federatedObj fedcorev1a1.GenericFederatedObject, controller string) (OverridesMap, error) { - overridesMap := make(OverridesMap) - - if federatedObj == nil || federatedObj.GetSpec().Overrides == nil { - // No overrides defined for the federated type - return overridesMap, nil - } - - overrides := federatedObj.GetSpec().Overrides - var clusterOverrides []fedcorev1a1.ClusterReferenceWithPatches - for i := range overrides { - if overrides[i].Controller == controller { - clusterOverrides = overrides[i].Override - break - } - } - - if clusterOverrides == nil { - return overridesMap, nil - } - - for _, overrideItem := range clusterOverrides { - clusterName := overrideItem.Cluster - if _, ok := overridesMap[clusterName]; ok { - return nil, errors.Errorf("cluster %q appears more than once", clusterName) - } - - for i, pathEntry := range overrideItem.Patches { - path := pathEntry.Path - if invalidPaths.Has(path) { - return nil, errors.Errorf("override[%d] for cluster %q has an invalid path: %s", i, clusterName, path) - } - } - overridesMap[clusterName] = overrideItem.Patches - } - - return overridesMap, nil -} - -// SetOverrides sets the spec.overrides field of the unstructured -// object from the provided overrides map. -// -// This function takes ownership of the `overridesMap` and may mutate it arbitrarily. -func SetOverrides(federatedObj fedcorev1a1.GenericFederatedObject, controller string, overridesMap OverridesMap) error { - for clusterName, clusterOverrides := range overridesMap { - if len(clusterOverrides) == 0 { - delete(overridesMap, clusterName) - } - } - - index := -1 - for i, overrides := range federatedObj.GetSpec().Overrides { - if overrides.Controller == controller { - index = i - break - } - } - - if len(overridesMap) == 0 { - // delete index - if index != -1 { - federatedObj.GetSpec().Overrides = append(federatedObj.GetSpec().Overrides[:index], federatedObj.GetSpec().Overrides[(index+1):]...) - } - } else { - if index == -1 { - index = len(federatedObj.GetSpec().Overrides) - federatedObj.GetSpec().Overrides = append(federatedObj.GetSpec().Overrides, fedcorev1a1.OverrideWithController{ - Controller: controller, - }) - } - - overrides := &federatedObj.GetSpec().Overrides[index] - overrides.Override = nil - - // Write in ascending order of cluster names for better readability - clusterNames := make([]string, 0, len(overridesMap)) - for clusterName := range overridesMap { - clusterNames = append(clusterNames, clusterName) - } - sort.Strings(clusterNames) - for _, clusterName := range clusterNames { - clusterOverrides := overridesMap[clusterName] - overrides.Override = append(overrides.Override, fedcorev1a1.ClusterReferenceWithPatches{ - Cluster: clusterName, - Patches: clusterOverrides, - }) - } - } - - return nil -} - -// UnstructuredToInterface converts an unstructured object to the -// provided interface by json marshalling/unmarshalling. -func UnstructuredToInterface(rawObj *unstructured.Unstructured, obj interface{}) error { - content, err := rawObj.MarshalJSON() - if err != nil { - return err - } - return json.Unmarshal(content, obj) -} - -// InterfaceToUnstructured converts the provided object to an -// unstructured by json marshalling/unmarshalling. -func InterfaceToUnstructured(obj interface{}) (ret interface{}, err error) { - var buf []byte - buf, err = json.Marshal(obj) - if err != nil { - return - } - - err = json.Unmarshal(buf, &ret) - return -} - -// ApplyJsonPatch applies the override on to the given unstructured object. -func ApplyJsonPatch(obj *unstructured.Unstructured, overrides fedcorev1a1.OverridePatches) error { - // TODO: Do the defaulting of "op" field to "replace" in API defaulting - for i, overrideItem := range overrides { - if overrideItem.Op == "" { - overrides[i].Op = "replace" - } - } - jsonPatchBytes, err := json.Marshal(overrides) - if err != nil { - return err - } - - patch, err := jsonpatch.DecodePatch(jsonPatchBytes) - if err != nil { - return err - } - - ObjectJSONBytes, err := obj.MarshalJSON() - if err != nil { - return err - } - - patchedObjectJSONBytes, err := patch.Apply(ObjectJSONBytes) - if err != nil { - return err - } - - err = obj.UnmarshalJSON(patchedObjectJSONBytes) - return err -} From d4038e83310dbacf263c5e4f84e678466ef4a540 Mon Sep 17 00:00:00 2001 From: "hawjia.lim" Date: Fri, 28 Jul 2023 13:45:56 +0800 Subject: [PATCH 099/173] fix(policyrc): fix imports --- pkg/controllers/common/constants.go | 3 --- pkg/controllers/federate/util.go | 4 ++-- pkg/controllers/policyrc/controller.go | 3 ++- pkg/controllers/scheduler/scheduler.go | 4 +++- pkg/controllers/scheduler/util.go | 4 ++-- pkg/util/fedobjectadapters/adapters.go | 14 -------------- 6 files changed, 9 insertions(+), 23 deletions(-) diff --git a/pkg/controllers/common/constants.go b/pkg/controllers/common/constants.go index 41d9c783..810817e5 100644 --- a/pkg/controllers/common/constants.go +++ b/pkg/controllers/common/constants.go @@ -140,9 +140,6 @@ const ( // TemplateGeneratorMergePatchAnnotation indicates the merge patch document capable of converting // the source object to the template object. TemplateGeneratorMergePatchAnnotation = FederateControllerPrefix + "template-generator-merge-patch" - - PropagationPolicyNameLabel = DefaultPrefix + "propagation-policy-name" - ClusterPropagationPolicyNameLabel = DefaultPrefix + "cluster-propagation-policy-name" ) // PropagatedAnnotationKeys and PropagatedLabelKeys are used to store the keys of annotations and labels that are present diff --git a/pkg/controllers/federate/util.go b/pkg/controllers/federate/util.go index 104831de..96802afd 100644 --- a/pkg/controllers/federate/util.go +++ b/pkg/controllers/federate/util.go @@ -270,8 +270,8 @@ var ( ) federatedLabelSet = sets.New[string]( - common.PropagationPolicyNameLabel, - common.ClusterPropagationPolicyNameLabel, + scheduler.PropagationPolicyNameLabel, + scheduler.ClusterPropagationPolicyNameLabel, override.OverridePolicyNameLabel, override.ClusterOverridePolicyNameLabel, ) diff --git a/pkg/controllers/policyrc/controller.go b/pkg/controllers/policyrc/controller.go index 04571969..2041af21 100644 --- a/pkg/controllers/policyrc/controller.go +++ b/pkg/controllers/policyrc/controller.go @@ -32,6 +32,7 @@ import ( fedcorev1a1informers "github.com/kubewharf/kubeadmiral/pkg/client/informers/externalversions/core/v1alpha1" "github.com/kubewharf/kubeadmiral/pkg/controllers/common" "github.com/kubewharf/kubeadmiral/pkg/controllers/override" + "github.com/kubewharf/kubeadmiral/pkg/controllers/scheduler" "github.com/kubewharf/kubeadmiral/pkg/stats" "github.com/kubewharf/kubeadmiral/pkg/util/eventhandlers" "github.com/kubewharf/kubeadmiral/pkg/util/fedobjectadapters" @@ -236,7 +237,7 @@ func (c *Controller) reconcileCount(ctx context.Context, qualifiedName common.Qu var newPps []PolicyKey if fedObj != nil { - newPolicy, newHasPolicy := fedobjectadapters.MatchedPolicyKey(fedObj, fedObj.GetNamespace() != "") + newPolicy, newHasPolicy := scheduler.GetMatchedPolicyKey(fedObj) if newHasPolicy { newPps = []PolicyKey{PolicyKey(newPolicy)} } diff --git a/pkg/controllers/scheduler/scheduler.go b/pkg/controllers/scheduler/scheduler.go index b7076747..b20fda85 100644 --- a/pkg/controllers/scheduler/scheduler.go +++ b/pkg/controllers/scheduler/scheduler.go @@ -56,7 +56,9 @@ import ( ) const ( - SchedulerName = "scheduler" + SchedulerName = "scheduler" + PropagationPolicyNameLabel = common.DefaultPrefix + "propagation-policy-name" + ClusterPropagationPolicyNameLabel = common.DefaultPrefix + "cluster-propagation-policy-name" ) type ClusterWeight struct { diff --git a/pkg/controllers/scheduler/util.go b/pkg/controllers/scheduler/util.go index 0bd63424..dd82630d 100644 --- a/pkg/controllers/scheduler/util.go +++ b/pkg/controllers/scheduler/util.go @@ -36,11 +36,11 @@ func GetMatchedPolicyKey(obj metav1.Object) (result common.QualifiedName, ok boo labels := obj.GetLabels() isNamespaced := len(obj.GetNamespace()) > 0 - if policyName, exists := labels[common.PropagationPolicyNameLabel]; exists && isNamespaced { + if policyName, exists := labels[PropagationPolicyNameLabel]; exists && isNamespaced { return common.QualifiedName{Namespace: obj.GetNamespace(), Name: policyName}, true } - if policyName, exists := labels[common.ClusterPropagationPolicyNameLabel]; exists { + if policyName, exists := labels[ClusterPropagationPolicyNameLabel]; exists { return common.QualifiedName{Namespace: "", Name: policyName}, true } diff --git a/pkg/util/fedobjectadapters/adapters.go b/pkg/util/fedobjectadapters/adapters.go index d4c7c945..b30f52ff 100644 --- a/pkg/util/fedobjectadapters/adapters.go +++ b/pkg/util/fedobjectadapters/adapters.go @@ -9,7 +9,6 @@ import ( fedcorev1a1 "github.com/kubewharf/kubeadmiral/pkg/apis/core/v1alpha1" fedcorev1a1client "github.com/kubewharf/kubeadmiral/pkg/client/clientset/versioned/typed/core/v1alpha1" fedcorev1a1listers "github.com/kubewharf/kubeadmiral/pkg/client/listers/core/v1alpha1" - "github.com/kubewharf/kubeadmiral/pkg/controllers/common" ) func ensureNilInterface( @@ -123,16 +122,3 @@ func Delete( } } -func MatchedPolicyKey(obj fedcorev1a1.GenericFederatedObject, isNamespaced bool) (result common.QualifiedName, ok bool) { - labels := obj.GetLabels() - - if policyName, exists := labels[common.PropagationPolicyNameLabel]; exists && isNamespaced { - return common.QualifiedName{Namespace: obj.GetNamespace(), Name: policyName}, true - } - - if policyName, exists := labels[common.ClusterPropagationPolicyNameLabel]; exists { - return common.QualifiedName{Namespace: "", Name: policyName}, true - } - - return common.QualifiedName{}, false -} From ed50baf8c7769b9577dc1eb2a667c92c6372988c Mon Sep 17 00:00:00 2001 From: "hawjia.lim" Date: Fri, 28 Jul 2023 14:07:46 +0800 Subject: [PATCH 100/173] fix(cluster-controller): status update on cache not synced --- .../federatedcluster/clusterstatus.go | 28 +++++++++++-------- test/e2e/federatedcluster/clusterstatus.go | 20 ++++++++++--- 2 files changed, 33 insertions(+), 15 deletions(-) diff --git a/pkg/controllers/federatedcluster/clusterstatus.go b/pkg/controllers/federatedcluster/clusterstatus.go index 80c6fa8f..94608bf8 100644 --- a/pkg/controllers/federatedcluster/clusterstatus.go +++ b/pkg/controllers/federatedcluster/clusterstatus.go @@ -33,6 +33,7 @@ import ( "k8s.io/apimachinery/pkg/labels" "k8s.io/client-go/discovery" corev1listers "k8s.io/client-go/listers/core/v1" + "k8s.io/client-go/tools/cache" "k8s.io/client-go/util/retry" "k8s.io/klog/v2" @@ -68,24 +69,14 @@ func (c *FederatedClusterController) collectIndividualClusterStatus( if !exists { return 0, fmt.Errorf("failed to get cluster client: FederatedInformerManager not yet up-to-date") } - podLister, podsSynced, exists := c.federatedInformerManager.GetPodLister(cluster.Name) if !exists { return 0, fmt.Errorf("failed to get pod lister: FederatedInformerManager not yet up-to-date") } - if !podsSynced() { - logger.V(3).Info("Pod informer not synced, will reenqueue") - return 100 * time.Millisecond, nil - } - nodeLister, nodesSynced, exists := c.federatedInformerManager.GetNodeLister(cluster.Name) if !exists { return 0, fmt.Errorf("failed to get node lister: FederatedInformerManager not yet up-to-date") } - if !nodesSynced() { - logger.V(3).Info("Pod informer not synced, will reenqueue") - return 100 * time.Millisecond, nil - } discoveryClient := clusterKubeClient.Discovery() @@ -108,7 +99,14 @@ func (c *FederatedClusterController) collectIndividualClusterStatus( // We skip updating cluster resources and api resources if cluster is not ready if readyStatus == corev1.ConditionTrue { - if err := updateClusterResources(ctx, &cluster.Status, podLister, nodeLister); err != nil { + if err := updateClusterResources( + ctx, + &cluster.Status, + podLister, + podsSynced, + nodeLister, + nodesSynced, + ); err != nil { logger.Error(err, "Failed to update cluster resources") readyStatus = corev1.ConditionFalse readyReason = ClusterResourceCollectionFailedReason @@ -174,8 +172,16 @@ func updateClusterResources( ctx context.Context, clusterStatus *fedcorev1a1.FederatedClusterStatus, podLister corev1listers.PodLister, + podsSynced cache.InformerSynced, nodeLister corev1listers.NodeLister, + nodesSynced cache.InformerSynced, ) error { + ctx, cancel := context.WithTimeout(ctx, 5*time.Second) + defer cancel() + if !cache.WaitForCacheSync(ctx.Done(), podsSynced, nodesSynced) { + return fmt.Errorf("timeout waiting for node and pod informer sync") + } + nodes, err := nodeLister.List(labels.Everything()) if err != nil { return fmt.Errorf("failed to list nodes: %w", err) diff --git a/test/e2e/federatedcluster/clusterstatus.go b/test/e2e/federatedcluster/clusterstatus.go index 98ceedc3..e74dc7e3 100644 --- a/test/e2e/federatedcluster/clusterstatus.go +++ b/test/e2e/federatedcluster/clusterstatus.go @@ -39,7 +39,10 @@ var _ = ginkgo.Describe("Cluster Status", federatedClusterTestLabels, func() { waitForClusterJoin := func(ctx context.Context) { gomega.Eventually(func(g gomega.Gomega, ctx context.Context) { - cluster, err := f.HostFedClient().CoreV1alpha1().FederatedClusters().Get(ctx, cluster.Name, metav1.GetOptions{}) + cluster, err := f.HostFedClient(). + CoreV1alpha1(). + FederatedClusters(). + Get(ctx, cluster.Name, metav1.GetOptions{}) gomega.Expect(err).ToNot(gomega.HaveOccurred(), framework.MessageUnexpectedError) g.Expect(cluster).To(gomega.Satisfy(clusterfwk.ClusterJoined)) }).WithTimeout(clusterJoinTimeout).WithContext(ctx).Should(gomega.Succeed(), "Timed out waiting for cluster join") @@ -48,7 +51,10 @@ var _ = ginkgo.Describe("Cluster Status", federatedClusterTestLabels, func() { waitForFirstStatusUpdate := func(ctx context.Context) { // check initial status update gomega.Eventually(func(g gomega.Gomega, ctx context.Context) { - cluster, err := f.HostFedClient().CoreV1alpha1().FederatedClusters().Get(ctx, cluster.Name, metav1.GetOptions{}) + cluster, err := f.HostFedClient(). + CoreV1alpha1(). + FederatedClusters(). + Get(ctx, cluster.Name, metav1.GetOptions{}) gomega.Expect(err).ToNot(gomega.HaveOccurred(), framework.MessageUnexpectedError) g.Expect(cluster).To(gomega.Satisfy(statusCollected)) }).WithTimeout(clusterStatusCollectTimeout).WithContext(ctx).Should(gomega.Succeed(), "Timed out waiting for first status update") @@ -58,7 +64,10 @@ var _ = ginkgo.Describe("Cluster Status", federatedClusterTestLabels, func() { waitForStatusConvergence := func(ctx context.Context) { gomega.Eventually(func(g gomega.Gomega, ctx context.Context) { var err error - cluster, err := f.HostFedClient().CoreV1alpha1().FederatedClusters().Get(ctx, cluster.Name, metav1.GetOptions{}) + cluster, err := f.HostFedClient(). + CoreV1alpha1(). + FederatedClusters(). + Get(ctx, cluster.Name, metav1.GetOptions{}) gomega.Expect(err).ToNot(gomega.HaveOccurred(), framework.MessageUnexpectedError) g.Expect(cluster).To(gomega.Satisfy(clusterfwk.ClusterReachable)) g.Expect(cluster.Status.APIResourceTypes).ToNot(gomega.BeEmpty()) @@ -118,7 +127,10 @@ var _ = ginkgo.Describe("Cluster Status", federatedClusterTestLabels, func() { waitForStatusConvergence := func(ctx context.Context) { gomega.Eventually(func(g gomega.Gomega, ctx context.Context) { var err error - cluster, err := f.HostFedClient().CoreV1alpha1().FederatedClusters().Get(ctx, cluster.Name, metav1.GetOptions{}) + cluster, err := f.HostFedClient(). + CoreV1alpha1(). + FederatedClusters(). + Get(ctx, cluster.Name, metav1.GetOptions{}) gomega.Expect(err).ToNot(gomega.HaveOccurred(), framework.MessageUnexpectedError) g.Expect(cluster).To(gomega.Satisfy(clusterfwk.ClusterUnreachable)) }).WithTimeout(clusterStatusUpdateInterval*2).WithContext(ctx).Should(gomega.Succeed(), "Timed out waiting for status convergence") From cfcaea9cc0877b924bc0f61c95493deac6a68d7e Mon Sep 17 00:00:00 2001 From: "hawjia.lim" Date: Fri, 28 Jul 2023 14:20:12 +0800 Subject: [PATCH 101/173] fix(cluster-controller): cluster controller synced --- pkg/controllers/federatedcluster/controller.go | 6 +++--- pkg/util/informermanager/federatedinformermanager.go | 7 +++++-- 2 files changed, 8 insertions(+), 5 deletions(-) diff --git a/pkg/controllers/federatedcluster/controller.go b/pkg/controllers/federatedcluster/controller.go index 78abd27b..b491c0d0 100644 --- a/pkg/controllers/federatedcluster/controller.go +++ b/pkg/controllers/federatedcluster/controller.go @@ -96,14 +96,14 @@ func NewFederatedClusterController( logger klog.Logger, clusterJoinTimeout time.Duration, workerCount int, - fedsystemNamespace string, + fedSystemNamespace string, ) (*FederatedClusterController, error) { c := &FederatedClusterController{ clusterInformer: clusterInformer, federatedInformerManager: federatedInformerManager, kubeClient: kubeClient, fedClient: fedClient, - fedSystemNamespace: fedsystemNamespace, + fedSystemNamespace: fedSystemNamespace, clusterHealthCheckConfig: &ClusterHealthCheckConfig{ Period: time.Minute, }, @@ -164,7 +164,7 @@ func NewFederatedClusterController( } func (c *FederatedClusterController) HasSynced() bool { - return c.clusterInformer.Informer().HasSynced() + return c.clusterInformer.Informer().HasSynced() && c.federatedInformerManager.HasSynced() } func (c *FederatedClusterController) IsControllerReady() bool { diff --git a/pkg/util/informermanager/federatedinformermanager.go b/pkg/util/informermanager/federatedinformermanager.go index 0f433d23..593105dd 100644 --- a/pkg/util/informermanager/federatedinformermanager.go +++ b/pkg/util/informermanager/federatedinformermanager.go @@ -436,8 +436,11 @@ func (m *federatedInformerManager) Start(ctx context.Context) { // Populate the initial snapshot of clusters clusters := m.clusterInformer.Informer().GetStore().List() - for _, cluster := range clusters { - m.initialClusters.Insert(cluster.(*fedcorev1a1.FederatedCluster).GetName()) + for _, clusterObj := range clusters { + cluster := clusterObj.(*fedcorev1a1.FederatedCluster) + if clusterutil.IsClusterJoined(&cluster.Status) { + m.initialClusters.Insert(cluster.GetName()) + } } for _, handler := range m.clusterEventHandlers { From 4c9895a021a1531733e5840ad5c2e86d9e7a08fb Mon Sep 17 00:00:00 2001 From: "hawjia.lim" Date: Fri, 28 Jul 2023 14:24:42 +0800 Subject: [PATCH 102/173] fix(scheduler): typos and unit test --- pkg/controllers/scheduler/scheduler.go | 4 ++-- pkg/controllers/scheduler/util_test.go | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/pkg/controllers/scheduler/scheduler.go b/pkg/controllers/scheduler/scheduler.go index b20fda85..7fbc7cbd 100644 --- a/pkg/controllers/scheduler/scheduler.go +++ b/pkg/controllers/scheduler/scheduler.go @@ -773,7 +773,7 @@ func (s *Scheduler) enqueueFederatedObjectsForCluster(cluster *fedcorev1a1.Feder fedObjects, err := s.fedObjectInformer.Lister().List(labels.Everything()) if err != nil { - s.logger.Error(err, "Failed to enquue FederatedObjects for policy") + s.logger.Error(err, "Failed to enqueue FederatedObjects for policy") return } for _, obj := range fedObjects { @@ -781,7 +781,7 @@ func (s *Scheduler) enqueueFederatedObjectsForCluster(cluster *fedcorev1a1.Feder } clusterFedObjects, err := s.clusterFedObjectInformer.Lister().List(labels.Everything()) if err != nil { - s.logger.Error(err, "Failed to enquue ClusterFederatedObjects for policy") + s.logger.Error(err, "Failed to enqueue ClusterFederatedObjects for policy") return } for _, obj := range clusterFedObjects { diff --git a/pkg/controllers/scheduler/util_test.go b/pkg/controllers/scheduler/util_test.go index 88f68b9b..26e862dd 100644 --- a/pkg/controllers/scheduler/util_test.go +++ b/pkg/controllers/scheduler/util_test.go @@ -94,7 +94,7 @@ func TestMatchedPolicyKey(t *testing.T) { } object.SetLabels(labels) - policy, found := GetMatchedPolicyKey(object, object.GetNamespace() != "") + policy, found := GetMatchedPolicyKey(object) if found != testCase.expectedPolicyFound { t.Fatalf("found = %v, but expectedPolicyFound = %v", found, testCase.expectedPolicyFound) } From abe213fe9028a94fe58917f752a7164a768ad804 Mon Sep 17 00:00:00 2001 From: "hawjia.lim" Date: Fri, 28 Jul 2023 14:32:24 +0800 Subject: [PATCH 103/173] fix(policyrc): nil pointer on controller initialization --- pkg/controllers/policyrc/controller.go | 29 +++++++++++++------------- 1 file changed, 15 insertions(+), 14 deletions(-) diff --git a/pkg/controllers/policyrc/controller.go b/pkg/controllers/policyrc/controller.go index 2041af21..083a93df 100644 --- a/pkg/controllers/policyrc/controller.go +++ b/pkg/controllers/policyrc/controller.go @@ -83,20 +83,6 @@ func NewPolicyRCController( logger: logger.WithValues("controller", ControllerName), } - if _, err := c.fedObjectInformer.Informer().AddEventHandler(eventhandlers.NewTriggerOnAllChangesWithTransform( - common.NewQualifiedName, - c.countWorker.Enqueue, - )); err != nil { - return nil, err - } - - if _, err := c.clusterFedObjectInformer.Informer().AddEventHandler(eventhandlers.NewTriggerOnAllChangesWithTransform( - common.NewQualifiedName, - c.countWorker.Enqueue, - )); err != nil { - return nil, err - } - c.countWorker = worker.NewReconcileWorker[common.QualifiedName]( "policyrc-controller-count-worker", nil, @@ -142,6 +128,21 @@ func NewPolicyRCController( metrics, ) + if _, err := c.fedObjectInformer.Informer().AddEventHandler(eventhandlers.NewTriggerOnAllChangesWithTransform( + common.NewQualifiedName, + c.countWorker.Enqueue, + )); err != nil { + return nil, err + } + + if _, err := c.clusterFedObjectInformer.Informer().AddEventHandler(eventhandlers.NewTriggerOnAllChangesWithTransform( + common.NewQualifiedName, + c.countWorker.Enqueue, + )); err != nil { + return nil, err + } + + if _, err := c.propagationPolicyInformer.Informer().AddEventHandler( eventhandlers.NewTriggerOnAllChangesWithTransform(common.NewQualifiedName, c.persistPpWorker.Enqueue), ); err != nil { From 5285e18e5b6dd331831eba29dddb1fde179bdb58 Mon Sep 17 00:00:00 2001 From: "hawjia.lim" Date: Fri, 28 Jul 2023 14:35:18 +0800 Subject: [PATCH 104/173] fix(scheduler): nil pointer in cluster join --- pkg/controllers/federatedcluster/controller.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/controllers/federatedcluster/controller.go b/pkg/controllers/federatedcluster/controller.go index b491c0d0..779b8a01 100644 --- a/pkg/controllers/federatedcluster/controller.go +++ b/pkg/controllers/federatedcluster/controller.go @@ -292,7 +292,7 @@ func (c *FederatedClusterController) reconcile( // Trigger initial status collection if successfully joined if joined, alreadyFailed := isClusterJoined(&cluster.Status); joined && !alreadyFailed { - c.statusCollectWorker.EnqueueObject(cluster) + c.statusCollectWorker.Enqueue(common.NewQualifiedName(cluster)) } return worker.StatusAllOK From 44367f0b3af39e8ddc73756b01524cb3692de6a3 Mon Sep 17 00:00:00 2001 From: "hawjia.lim" Date: Fri, 28 Jul 2023 14:37:13 +0800 Subject: [PATCH 105/173] fix(scheduler): nil pointer in enqueue --- pkg/controllers/federatedcluster/controller.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/controllers/federatedcluster/controller.go b/pkg/controllers/federatedcluster/controller.go index 779b8a01..adddc24b 100644 --- a/pkg/controllers/federatedcluster/controller.go +++ b/pkg/controllers/federatedcluster/controller.go @@ -492,7 +492,7 @@ func (c *FederatedClusterController) enqueueAllJoinedClusters() { for _, cluster := range clusters { if clusterutil.IsClusterJoined(&cluster.Status) { - c.statusCollectWorker.EnqueueObject(cluster) + c.statusCollectWorker.Enqueue(common.NewQualifiedName(cluster)) } } } From 8c75b1c0072b5ed02c8ddf4c3fe49eee86a2e504 Mon Sep 17 00:00:00 2001 From: "hawjia.lim" Date: Fri, 28 Jul 2023 14:41:48 +0800 Subject: [PATCH 106/173] fix(worker): remove enqueueObject and keyFunc --- pkg/controllers/federate/controller.go | 1 - pkg/controllers/federatedcluster/controller.go | 2 -- pkg/controllers/nsautoprop/controller.go | 1 - pkg/controllers/override/overridepolicy_controller.go | 1 - pkg/controllers/policyrc/controller.go | 3 --- pkg/controllers/scheduler/scheduler.go | 9 ++++----- pkg/controllers/status/controller.go | 1 - pkg/util/worker/worker.go | 10 ---------- 8 files changed, 4 insertions(+), 24 deletions(-) diff --git a/pkg/controllers/federate/controller.go b/pkg/controllers/federate/controller.go index b965ea92..c81eeac7 100644 --- a/pkg/controllers/federate/controller.go +++ b/pkg/controllers/federate/controller.go @@ -111,7 +111,6 @@ func NewFederateController( c.eventRecorder = eventsink.NewDefederatingRecorderMux(kubeClient, FederateControllerName, 6) c.worker = worker.NewReconcileWorker[workerKey]( FederateControllerName, - nil, c.reconcile, worker.RateLimiterOptions{}, workerCount, diff --git a/pkg/controllers/federatedcluster/controller.go b/pkg/controllers/federatedcluster/controller.go index adddc24b..9a8264db 100644 --- a/pkg/controllers/federatedcluster/controller.go +++ b/pkg/controllers/federatedcluster/controller.go @@ -124,7 +124,6 @@ func NewFederatedClusterController( c.worker = worker.NewReconcileWorker[common.QualifiedName]( FederatedClusterControllerName, - nil, c.reconcile, worker.RateLimiterOptions{}, workerCount, @@ -133,7 +132,6 @@ func NewFederatedClusterController( c.statusCollectWorker = worker.NewReconcileWorker[common.QualifiedName]( FederatedClusterControllerName, - nil, c.collectClusterStatus, worker.RateLimiterOptions{ InitialDelay: 50 * time.Millisecond, diff --git a/pkg/controllers/nsautoprop/controller.go b/pkg/controllers/nsautoprop/controller.go index cc929855..a0066869 100644 --- a/pkg/controllers/nsautoprop/controller.go +++ b/pkg/controllers/nsautoprop/controller.go @@ -124,7 +124,6 @@ func NewNamespaceAutoPropagationController( c.worker = worker.NewReconcileWorker[common.QualifiedName]( NamespaceAutoPropagationControllerName, - nil, c.reconcile, worker.RateLimiterOptions{}, workerCount, diff --git a/pkg/controllers/override/overridepolicy_controller.go b/pkg/controllers/override/overridepolicy_controller.go index 0ab9db33..9d23c041 100644 --- a/pkg/controllers/override/overridepolicy_controller.go +++ b/pkg/controllers/override/overridepolicy_controller.go @@ -99,7 +99,6 @@ func NewOverridePolicyController( c.eventRecorder = eventsink.NewDefederatingRecorderMux(kubeClient, ControllerName, 4) c.worker = worker.NewReconcileWorker[common.QualifiedName]( ControllerName, - nil, c.reconcile, worker.RateLimiterOptions{}, workerCount, diff --git a/pkg/controllers/policyrc/controller.go b/pkg/controllers/policyrc/controller.go index 083a93df..3d80877e 100644 --- a/pkg/controllers/policyrc/controller.go +++ b/pkg/controllers/policyrc/controller.go @@ -85,7 +85,6 @@ func NewPolicyRCController( c.countWorker = worker.NewReconcileWorker[common.QualifiedName]( "policyrc-controller-count-worker", - nil, c.reconcileCount, worker.RateLimiterOptions{}, 1, // currently only one worker is meaningful due to the global mutex @@ -94,7 +93,6 @@ func NewPolicyRCController( c.persistPpWorker = worker.NewReconcileWorker[common.QualifiedName]( "policyrc-controller-persist-worker", - nil, func(ctx context.Context, qualifiedName common.QualifiedName) worker.Result { return c.reconcilePersist( ctx, @@ -112,7 +110,6 @@ func NewPolicyRCController( c.persistOpWorker = worker.NewReconcileWorker[common.QualifiedName]( "policyrc-controller-persist-worker", - nil, func(ctx context.Context, qualifiedName common.QualifiedName) worker.Result { return c.reconcilePersist( ctx, diff --git a/pkg/controllers/scheduler/scheduler.go b/pkg/controllers/scheduler/scheduler.go index 7fbc7cbd..330626d7 100644 --- a/pkg/controllers/scheduler/scheduler.go +++ b/pkg/controllers/scheduler/scheduler.go @@ -130,7 +130,6 @@ func NewScheduler( s.eventRecorder = eventsink.NewDefederatingRecorderMux(kubeClient, SchedulerName, 6) s.worker = worker.NewReconcileWorker[common.QualifiedName]( SchedulerName, - nil, s.reconcile, worker.RateLimiterOptions{}, workerCount, @@ -754,7 +753,7 @@ func (s *Scheduler) enqueueFederatedObjectsForPolicy(policy metav1.Object) { if policyKey, found := GetMatchedPolicyKey(obj); !found { continue } else if policyKey.Name == policyAccessor.GetName() && policyKey.Namespace == policyAccessor.GetNamespace() { - s.worker.EnqueueObject(obj) + s.worker.Enqueue(common.NewQualifiedName(obj)) } } } @@ -777,7 +776,7 @@ func (s *Scheduler) enqueueFederatedObjectsForCluster(cluster *fedcorev1a1.Feder return } for _, obj := range fedObjects { - s.worker.EnqueueObject(obj) + s.worker.Enqueue(common.NewQualifiedName(obj)) } clusterFedObjects, err := s.clusterFedObjectInformer.Lister().List(labels.Everything()) if err != nil { @@ -785,7 +784,7 @@ func (s *Scheduler) enqueueFederatedObjectsForCluster(cluster *fedcorev1a1.Feder return } for _, obj := range clusterFedObjects { - s.worker.EnqueueObject(obj) + s.worker.Enqueue(common.NewQualifiedName(obj)) } } @@ -819,7 +818,7 @@ func (s *Scheduler) enqueueFederatedObjectsForFTC(ftc *fedcorev1a1.FederatedType continue } if sourceGVK == ftc.GetSourceTypeGVK() { - s.worker.EnqueueObject(obj) + s.worker.Enqueue(common.NewQualifiedName(obj)) } } } diff --git a/pkg/controllers/status/controller.go b/pkg/controllers/status/controller.go index 6f3a7838..ed94f7c2 100644 --- a/pkg/controllers/status/controller.go +++ b/pkg/controllers/status/controller.go @@ -142,7 +142,6 @@ func NewStatusController( s.worker = worker.NewReconcileWorker( StatusControllerName, - nil, s.reconcile, worker.RateLimiterOptions{}, workerCount, diff --git a/pkg/util/worker/worker.go b/pkg/util/worker/worker.go index 2cb24766..96b6b1de 100644 --- a/pkg/util/worker/worker.go +++ b/pkg/util/worker/worker.go @@ -38,7 +38,6 @@ type KeyFunc[Key any] func(metav1.Object) Key type ReconcileWorker[Key any] interface { Enqueue(key Key) - EnqueueObject(obj metav1.Object) EnqueueWithBackoff(key Key) EnqueueWithDelay(key Key, delay time.Duration) Run(ctx context.Context) @@ -59,9 +58,6 @@ type asyncWorker[Key any] struct { // Name of this reconcile worker. name string - // Function to extract queue key from a metav1.Object - keyFunc KeyFunc[Key] - // Work queue holding keys to be processed. queue workqueue.RateLimitingInterface @@ -78,7 +74,6 @@ type asyncWorker[Key any] struct { func NewReconcileWorker[Key any]( name string, - keyFunc KeyFunc[Key], reconcile ReconcileFunc[Key], timing RateLimiterOptions, workerCount int, @@ -109,7 +104,6 @@ func NewReconcileWorker[Key any]( return &asyncWorker[Key]{ name: name, - keyFunc: keyFunc, reconcile: reconcile, queue: queue, workerCount: workerCount, @@ -121,10 +115,6 @@ func (w *asyncWorker[Key]) Enqueue(key Key) { w.queue.Add(key) } -func (w *asyncWorker[Key]) EnqueueObject(obj metav1.Object) { - w.Enqueue(w.keyFunc(obj)) -} - func (w *asyncWorker[Key]) EnqueueWithBackoff(key Key) { w.queue.AddRateLimited(key) } From fb44559d797550b45ee967d68fe1941f6838c802 Mon Sep 17 00:00:00 2001 From: "hawjia.lim" Date: Fri, 28 Jul 2023 15:00:19 +0800 Subject: [PATCH 107/173] chore(cluster-controller): adjust healthcheck period --- pkg/controllers/federatedcluster/controller.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/controllers/federatedcluster/controller.go b/pkg/controllers/federatedcluster/controller.go index 9a8264db..3436c481 100644 --- a/pkg/controllers/federatedcluster/controller.go +++ b/pkg/controllers/federatedcluster/controller.go @@ -105,7 +105,7 @@ func NewFederatedClusterController( fedClient: fedClient, fedSystemNamespace: fedSystemNamespace, clusterHealthCheckConfig: &ClusterHealthCheckConfig{ - Period: time.Minute, + Period: time.Second*30, }, clusterJoinTimeout: clusterJoinTimeout, metrics: metrics, From af546b360805b18a099648ce93247ad488f6a96e Mon Sep 17 00:00:00 2001 From: "hawjia.lim" Date: Fri, 28 Jul 2023 17:47:52 +0800 Subject: [PATCH 108/173] feat(scheduler): use label selectors for listing --- pkg/controllers/nsautoprop/controller.go | 2 +- pkg/controllers/scheduler/scheduler.go | 63 ++++++++++++++++-------- 2 files changed, 44 insertions(+), 21 deletions(-) diff --git a/pkg/controllers/nsautoprop/controller.go b/pkg/controllers/nsautoprop/controller.go index a0066869..8744e23a 100644 --- a/pkg/controllers/nsautoprop/controller.go +++ b/pkg/controllers/nsautoprop/controller.go @@ -135,7 +135,7 @@ func NewNamespaceAutoPropagationController( func(obj *fedcorev1a1.ClusterFederatedObject) { srcMeta, err := obj.Spec.GetTemplateAsUnstructured() if err != nil { - logger.Error( + c.logger.Error( err, "Failed to get source object's metadata from ClusterFederatedObject", "object", diff --git a/pkg/controllers/scheduler/scheduler.go b/pkg/controllers/scheduler/scheduler.go index 330626d7..1326fab2 100644 --- a/pkg/controllers/scheduler/scheduler.go +++ b/pkg/controllers/scheduler/scheduler.go @@ -30,6 +30,7 @@ import ( apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/selection" "k8s.io/apimachinery/pkg/util/sets" "k8s.io/client-go/dynamic" "k8s.io/client-go/kubernetes" @@ -293,7 +294,7 @@ func (s *Scheduler) reconcile(ctx context.Context, key common.QualifiedName) (st if schedulingProfile != nil { ctx, logger = logging.InjectLoggerValues( ctx, - "schedulingProfile", + "scheduling-profile", common.NewQualifiedName(schedulingProfile).String(), ) } @@ -314,13 +315,13 @@ func (s *Scheduler) reconcile(ctx context.Context, key common.QualifiedName) (st spec := policy.GetSpec() auxInfo.enableFollowerScheduling = !spec.DisableFollowerScheduling - ctx, logger = logging.InjectLoggerValues(ctx, "enableFollowerScheduling", auxInfo.enableFollowerScheduling) + ctx, logger = logging.InjectLoggerValues(ctx, "enable-follower-scheduling", auxInfo.enableFollowerScheduling) if autoMigration := spec.AutoMigration; autoMigration != nil { auxInfo.unschedulableThreshold = pointer.Duration(autoMigration.Trigger.PodUnschedulableDuration.Duration) ctx, logger = logging.InjectLoggerValues( ctx, - "unschedulableThreshold", + "unschedulable-threshold", auxInfo.unschedulableThreshold.String(), ) } @@ -716,13 +717,22 @@ func (s *Scheduler) enqueueFederatedObjectsForPolicy(policy metav1.Object) { logger := s.logger.WithValues("policy", policyKey.String()) logger.V(2).Info("Enqueue FederatedObjects and ClusterFederatedObjects for policy") + isPolicyNamespaced := len(policyKey.Namespace) > 0 + allObjects := []metav1.Object{} - if len(policyKey.Namespace) > 0 { + var labelSelector labels.Selector + if isPolicyNamespaced { + labelSelector = labels.Set{PropagationPolicyNameLabel: policyKey.Name}.AsSelector() + } else { + labelSelector = labels.Set{ClusterPropagationPolicyNameLabel: policyKey.Name}.AsSelector() + } + + if isPolicyNamespaced { // If the policy is namespaced, we only need to scan FederatedObjects in the same namespace. - fedObjects, err := s.fedObjectInformer.Lister().FederatedObjects(policyKey.Namespace).List(labels.Everything()) + fedObjects, err := s.fedObjectInformer.Lister().FederatedObjects(policyKey.Namespace).List(labelSelector) if err != nil { - s.logger.Error(err, "Failed to enqueue FederatedObjects for policy") + logger.Error(err, "Failed to enqueue FederatedObjects for policy") return } for _, obj := range fedObjects { @@ -730,18 +740,18 @@ func (s *Scheduler) enqueueFederatedObjectsForPolicy(policy metav1.Object) { } } else { // If the policy is cluster-scoped, we need to scan all FederatedObjects and ClusterFederatedObjects - fedObjects, err := s.fedObjectInformer.Lister().List(labels.Everything()) + fedObjects, err := s.fedObjectInformer.Lister().List(labelSelector) if err != nil { - s.logger.Error(err, "Failed to enqueue FederatedObjects for policy") + logger.Error(err, "Failed to enqueue FederatedObjects for policy") return } for _, obj := range fedObjects { allObjects = append(allObjects, obj) } - clusterFedObjects, err := s.clusterFedObjectInformer.Lister().List(labels.Everything()) + clusterFedObjects, err := s.clusterFedObjectInformer.Lister().List(labelSelector) if err != nil { - s.logger.Error(err, "Failed to enqueue ClusterFederatedObjects for policy") + logger.Error(err, "Failed to enqueue ClusterFederatedObjects for policy") return } for _, obj := range clusterFedObjects { @@ -762,25 +772,38 @@ func (s *Scheduler) enqueueFederatedObjectsForCluster(cluster *fedcorev1a1.Feder logger := s.logger.WithValues("cluster", cluster.GetName()) if !clusterutil.IsClusterJoined(&cluster.Status) { - s.logger.WithValues("cluster", cluster.Name). - V(3). - Info("Skip enqueue federated objects for cluster, cluster not joined") + logger.V(3).Info("Skip enqueue federated objects for cluster, cluster not joined") return } logger.V(2).Info("Enqueue federated objects for cluster") - fedObjects, err := s.fedObjectInformer.Lister().List(labels.Everything()) + hasPropagationPolicy, err := labels.NewRequirement(PropagationPolicyNameLabel, selection.Exists, []string{}) if err != nil { - s.logger.Error(err, "Failed to enqueue FederatedObjects for policy") + logger.Error(err, "Failed to generate label selector for federated objects") + return + } + hasClusterPropagationPolicy, err := labels.NewRequirement( + ClusterPropagationPolicyNameLabel, + selection.Exists, + []string{}, + ) + if err != nil { + logger.Error(err, "Failed to generate label selector for federated objects") + } + labelSelector := labels.NewSelector().Add(*hasPropagationPolicy).Add(*hasClusterPropagationPolicy) + + fedObjects, err := s.fedObjectInformer.Lister().List(labelSelector) + if err != nil { + logger.Error(err, "Failed to enqueue FederatedObjects for policy") return } for _, obj := range fedObjects { s.worker.Enqueue(common.NewQualifiedName(obj)) } - clusterFedObjects, err := s.clusterFedObjectInformer.Lister().List(labels.Everything()) + clusterFedObjects, err := s.clusterFedObjectInformer.Lister().List(labelSelector) if err != nil { - s.logger.Error(err, "Failed to enqueue ClusterFederatedObjects for policy") + logger.Error(err, "Failed to enqueue ClusterFederatedObjects for policy") return } for _, obj := range clusterFedObjects { @@ -796,7 +819,7 @@ func (s *Scheduler) enqueueFederatedObjectsForFTC(ftc *fedcorev1a1.FederatedType allObjects := []fedcorev1a1.GenericFederatedObject{} fedObjects, err := s.fedObjectInformer.Lister().List(labels.Everything()) if err != nil { - s.logger.Error(err, "Failed to enquue FederatedObjects for policy") + logger.Error(err, "Failed to enqueue FederatedObjects for policy") return } for _, obj := range fedObjects { @@ -804,7 +827,7 @@ func (s *Scheduler) enqueueFederatedObjectsForFTC(ftc *fedcorev1a1.FederatedType } clusterFedObjects, err := s.clusterFedObjectInformer.Lister().List(labels.Everything()) if err != nil { - s.logger.Error(err, "Failed to enquue ClusterFederatedObjects for policy") + logger.Error(err, "Failed to enqueue ClusterFederatedObjects for policy") return } for _, obj := range clusterFedObjects { @@ -814,7 +837,7 @@ func (s *Scheduler) enqueueFederatedObjectsForFTC(ftc *fedcorev1a1.FederatedType for _, obj := range allObjects { sourceGVK, err := obj.GetSpec().GetTemplateGVK() if err != nil { - s.logger.Error(err, "Failed to get source GVK from FederatedObject, will not enqueue") + logger.Error(err, "Failed to get source GVK from FederatedObject, will not enqueue") continue } if sourceGVK == ftc.GetSourceTypeGVK() { From 1b642dbccea7707625d5648a77a9f16e2d6e6757 Mon Sep 17 00:00:00 2001 From: "hawjia.lim" Date: Fri, 28 Jul 2023 18:03:54 +0800 Subject: [PATCH 109/173] fix(scheduler): use label selectors for listing --- .../override/overridepolicy_controller.go | 2 +- pkg/controllers/scheduler/scheduler.go | 57 +++++++++---------- 2 files changed, 28 insertions(+), 31 deletions(-) diff --git a/pkg/controllers/override/overridepolicy_controller.go b/pkg/controllers/override/overridepolicy_controller.go index 9d23c041..4c744196 100644 --- a/pkg/controllers/override/overridepolicy_controller.go +++ b/pkg/controllers/override/overridepolicy_controller.go @@ -234,7 +234,7 @@ func (c *Controller) enqueueFedObjectsUsingPolicy(policy fedcorev1a1.GenericOver func (c *Controller) reconcileOnClusterChange(cluster *fedcorev1a1.FederatedCluster) { logger := c.logger.WithValues("federated-cluster", cluster.GetName()) - logger.V(2).Info("observed a cluster change") + logger.V(2).Info("Observed a cluster change") opRequirement, _ := labels.NewRequirement(OverridePolicyNameLabel, selection.Exists, nil) copRequirement, _ := labels.NewRequirement(ClusterOverridePolicyNameLabel, selection.Exists, nil) diff --git a/pkg/controllers/scheduler/scheduler.go b/pkg/controllers/scheduler/scheduler.go index 1326fab2..a9093636 100644 --- a/pkg/controllers/scheduler/scheduler.go +++ b/pkg/controllers/scheduler/scheduler.go @@ -715,7 +715,7 @@ func (s *Scheduler) enqueueFederatedObjectsForPolicy(policy metav1.Object) { policyKey := common.NewQualifiedName(policyAccessor) logger := s.logger.WithValues("policy", policyKey.String()) - logger.V(2).Info("Enqueue FederatedObjects and ClusterFederatedObjects for policy") + logger.V(2).Info("Enqueue federated objects for policy") isPolicyNamespaced := len(policyKey.Namespace) > 0 @@ -778,36 +778,33 @@ func (s *Scheduler) enqueueFederatedObjectsForCluster(cluster *fedcorev1a1.Feder logger.V(2).Info("Enqueue federated objects for cluster") - hasPropagationPolicy, err := labels.NewRequirement(PropagationPolicyNameLabel, selection.Exists, []string{}) - if err != nil { - logger.Error(err, "Failed to generate label selector for federated objects") - return - } - hasClusterPropagationPolicy, err := labels.NewRequirement( - ClusterPropagationPolicyNameLabel, - selection.Exists, - []string{}, - ) - if err != nil { - logger.Error(err, "Failed to generate label selector for federated objects") - } - labelSelector := labels.NewSelector().Add(*hasPropagationPolicy).Add(*hasClusterPropagationPolicy) + for _, policyLabel := range []string{PropagationPolicyNameLabel, ClusterPropagationPolicyNameLabel} { + hasPolicy, err := labels.NewRequirement(policyLabel, selection.Exists, []string{}) + if err != nil { + logger.Error(err, "Failed to generate label selector for federated objects") + return + } + if err != nil { + logger.Error(err, "Failed to generate label selector for federated objects") + } + labelSelector := labels.NewSelector().Add(*hasPolicy) - fedObjects, err := s.fedObjectInformer.Lister().List(labelSelector) - if err != nil { - logger.Error(err, "Failed to enqueue FederatedObjects for policy") - return - } - for _, obj := range fedObjects { - s.worker.Enqueue(common.NewQualifiedName(obj)) - } - clusterFedObjects, err := s.clusterFedObjectInformer.Lister().List(labelSelector) - if err != nil { - logger.Error(err, "Failed to enqueue ClusterFederatedObjects for policy") - return - } - for _, obj := range clusterFedObjects { - s.worker.Enqueue(common.NewQualifiedName(obj)) + fedObjects, err := s.fedObjectInformer.Lister().List(labelSelector) + if err != nil { + logger.Error(err, "Failed to enqueue FederatedObjects for cluster") + return + } + for _, obj := range fedObjects { + s.worker.Enqueue(common.NewQualifiedName(obj)) + } + clusterFedObjects, err := s.clusterFedObjectInformer.Lister().List(labelSelector) + if err != nil { + logger.Error(err, "Failed to enqueue ClusterFederatedObjects for cluster") + return + } + for _, obj := range clusterFedObjects { + s.worker.Enqueue(common.NewQualifiedName(obj)) + } } } From 76e5baa0607f379798cf24fe41e3f9539dd7e791 Mon Sep 17 00:00:00 2001 From: "hawjia.lim" Date: Fri, 28 Jul 2023 20:11:35 +0800 Subject: [PATCH 110/173] fix(scheduler): fix extra error check and double reenque --- pkg/controllers/scheduler/scheduler.go | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/pkg/controllers/scheduler/scheduler.go b/pkg/controllers/scheduler/scheduler.go index a9093636..46c5f96d 100644 --- a/pkg/controllers/scheduler/scheduler.go +++ b/pkg/controllers/scheduler/scheduler.go @@ -778,15 +778,13 @@ func (s *Scheduler) enqueueFederatedObjectsForCluster(cluster *fedcorev1a1.Feder logger.V(2).Info("Enqueue federated objects for cluster") + allObjects := sets.New[common.QualifiedName]() for _, policyLabel := range []string{PropagationPolicyNameLabel, ClusterPropagationPolicyNameLabel} { hasPolicy, err := labels.NewRequirement(policyLabel, selection.Exists, []string{}) if err != nil { logger.Error(err, "Failed to generate label selector for federated objects") return } - if err != nil { - logger.Error(err, "Failed to generate label selector for federated objects") - } labelSelector := labels.NewSelector().Add(*hasPolicy) fedObjects, err := s.fedObjectInformer.Lister().List(labelSelector) @@ -795,7 +793,7 @@ func (s *Scheduler) enqueueFederatedObjectsForCluster(cluster *fedcorev1a1.Feder return } for _, obj := range fedObjects { - s.worker.Enqueue(common.NewQualifiedName(obj)) + allObjects.Insert(common.NewQualifiedName(obj)) } clusterFedObjects, err := s.clusterFedObjectInformer.Lister().List(labelSelector) if err != nil { @@ -803,9 +801,13 @@ func (s *Scheduler) enqueueFederatedObjectsForCluster(cluster *fedcorev1a1.Feder return } for _, obj := range clusterFedObjects { - s.worker.Enqueue(common.NewQualifiedName(obj)) + allObjects.Insert(common.NewQualifiedName(obj)) } } + + for obj := range allObjects { + s.worker.Enqueue(obj) + } } func (s *Scheduler) enqueueFederatedObjectsForFTC(ftc *fedcorev1a1.FederatedTypeConfig) { From ab72f36093ac96854facc09a7ab191ad4e3cb7b9 Mon Sep 17 00:00:00 2001 From: "hawjia.lim" Date: Fri, 28 Jul 2023 20:36:05 +0800 Subject: [PATCH 111/173] chore(scheduler): fix formatting --- pkg/controllers/scheduler/scheduler.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/controllers/scheduler/scheduler.go b/pkg/controllers/scheduler/scheduler.go index 46c5f96d..4e180b53 100644 --- a/pkg/controllers/scheduler/scheduler.go +++ b/pkg/controllers/scheduler/scheduler.go @@ -805,7 +805,7 @@ func (s *Scheduler) enqueueFederatedObjectsForCluster(cluster *fedcorev1a1.Feder } } - for obj := range allObjects { + for obj := range allObjects { s.worker.Enqueue(obj) } } From b30649c4758f5338d248bdd7e63a610a3cb07ddd Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Gary=20Liu=20=28=E5=88=98=E5=B9=BF=E6=BA=90=29?= Date: Mon, 17 Jul 2023 02:49:12 +0000 Subject: [PATCH 112/173] refactor: use annotation for replicas retention --- pkg/controllers/common/constants.go | 3 +++ pkg/controllers/federate/controller.go | 3 +-- pkg/controllers/federate/util.go | 2 +- pkg/controllers/sync/dispatch/retain.go | 16 +++++----------- 4 files changed, 10 insertions(+), 14 deletions(-) diff --git a/pkg/controllers/common/constants.go b/pkg/controllers/common/constants.go index 810817e5..b3896ee6 100644 --- a/pkg/controllers/common/constants.go +++ b/pkg/controllers/common/constants.go @@ -121,6 +121,9 @@ const ( // FederatedObjectAnnotation indicates that the object was created by the federate controller. FederatedObjectAnnotation = DefaultPrefix + "federated-object" + // RetainReplicasAnnotation indicates that the replicas field of the cluster objects should be retained during propagation. + RetainReplicasAnnotation = DefaultPrefix + "retain-replicas" + // FollowersAnnotation indicates the additional followers of a leader. FollowersAnnotation = DefaultPrefix + "followers" // EnableFollowerSchedulingAnnotation indicates whether follower scheduling should be enabled for the leader object. diff --git a/pkg/controllers/federate/controller.go b/pkg/controllers/federate/controller.go index c81eeac7..4db4b65c 100644 --- a/pkg/controllers/federate/controller.go +++ b/pkg/controllers/federate/controller.go @@ -62,8 +62,7 @@ const ( FinalizerFederateController = common.DefaultPrefix + "federate-controller" // If this annotation is present on the source object, skip federating it. - NoFederatedResource = common.DefaultPrefix + "no-federated-resource" - RetainReplicasAnnotation = common.DefaultPrefix + "retain-replicas" + NoFederatedResource = common.DefaultPrefix + "no-federated-resource" ) // FederateController federates objects of source type to FederatedObjects or ClusterFederatedObjects. diff --git a/pkg/controllers/federate/util.go b/pkg/controllers/federate/util.go index 47bb24db..c6151f61 100644 --- a/pkg/controllers/federate/util.go +++ b/pkg/controllers/federate/util.go @@ -247,6 +247,7 @@ func updateFederatedObjectForSourceObject( var ( // List of annotations that should be copied to the federated object instead of the template from the source federatedAnnotationSet = sets.New( + common.RetainReplicasAnnotation, scheduler.SchedulingModeAnnotation, scheduler.StickyClusterAnnotation, nsautoprop.NoAutoPropagationAnnotation, @@ -260,7 +261,6 @@ var ( common.NoSchedulingAnnotation, scheduler.FollowsObjectAnnotation, common.FollowersAnnotation, - RetainReplicasAnnotation, ) // TODO: Do we need to specify the internal annotations here? diff --git a/pkg/controllers/sync/dispatch/retain.go b/pkg/controllers/sync/dispatch/retain.go index 6c3004f7..4a39456e 100644 --- a/pkg/controllers/sync/dispatch/retain.go +++ b/pkg/controllers/sync/dispatch/retain.go @@ -26,6 +26,7 @@ import ( "strings" "github.com/pkg/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/util/sets" @@ -525,23 +526,16 @@ func findServiceAccountVolumeMount(container map[string]interface{}) (volumeMoun return nil, 0, false } -func checkRetainReplicas(fedObj *unstructured.Unstructured) (bool, error) { - retainReplicas, ok, err := unstructured.NestedBool(fedObj.Object, common.SpecField, common.RetainReplicasField) - if err != nil { - return false, err - } - return ok && retainReplicas, nil +func checkRetainReplicas(fedObj metav1.Object) bool { + return fedObj.GetAnnotations()[common.RetainReplicasAnnotation] == common.AnnotationValueTrue } -func retainReplicas(desiredObj, clusterObj, fedObj *unstructured.Unstructured, typeConfig *fedcorev1a1.FederatedTypeConfig) error { +func retainReplicas(desiredObj, clusterObj *unstructured.Unstructured, fedObj metav1.Object, typeConfig *fedcorev1a1.FederatedTypeConfig) error { // Retain the replicas field if the federated object has been // configured to do so. If the replicas field is intended to be // set by the in-cluster HPA controller, not retaining it will // thrash the scheduler. - retain, err := checkRetainReplicas(fedObj) - if err != nil { - return err - } + retain := checkRetainReplicas(fedObj) if retain { replicas, err := utilunstructured.GetInt64FromPath(clusterObj, typeConfig.Spec.PathDefinition.ReplicasSpec, nil) if err != nil { From d06901d3b2c2f08c18a65024b414d403b6855b5d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Gary=20Liu=20=28=E5=88=98=E5=B9=BF=E6=BA=90=29?= Date: Tue, 18 Jul 2023 10:39:41 +0000 Subject: [PATCH 113/173] refactor: sync controller * use unified federated type * remove rollout * remove revision history * remove source feedback --- ...ubeadmiral.io_clusterfederatedobjects.yaml | 4 - .../core.kubeadmiral.io_federatedobjects.yaml | 4 - .../core/v1alpha1/types_federatedobject.go | 6 - .../core/v1alpha1/zz_generated.deepcopy.go | 5 - pkg/controllers/common/constants.go | 56 +- .../framework/plugins/clusterresources/fit.go | 2 - pkg/controllers/sync/accessor.go | 237 ++- pkg/controllers/sync/controller.go | 915 ++++------- .../sync/dispatch/checkunmanaged.go | 27 +- pkg/controllers/sync/dispatch/managed.go | 513 +----- pkg/controllers/sync/dispatch/operation.go | 12 +- pkg/controllers/sync/dispatch/retain.go | 69 +- pkg/controllers/sync/dispatch/unmanaged.go | 68 +- pkg/controllers/sync/history.go | 305 ---- pkg/controllers/sync/placement.go | 82 +- pkg/controllers/sync/resource.go | 261 +-- pkg/controllers/sync/status/status.go | 120 +- pkg/controllers/sync/status/status_test.go | 25 +- pkg/controllers/sync/version/manager.go | 72 +- pkg/controllers/util/rolloutplan.go | 868 ---------- pkg/controllers/util/rolloutplan_test.go | 1452 ----------------- .../adoption/adopted.go} | 19 +- .../adoption/conflictresolution.go} | 6 +- pkg/util/fedobjectadapters/adapters.go | 17 + .../federatedinformermanager.go | 16 + pkg/util/informermanager/interface.go | 16 +- .../orphaning}/orphaningannotation.go | 6 +- 27 files changed, 788 insertions(+), 4395 deletions(-) delete mode 100644 pkg/controllers/sync/history.go delete mode 100644 pkg/controllers/util/rolloutplan.go delete mode 100644 pkg/controllers/util/rolloutplan_test.go rename pkg/{controllers/util/adoptedannotation.go => util/adoption/adopted.go} (70%) rename pkg/{controllers/util/conflictresolutionannotation.go => util/adoption/conflictresolution.go} (89%) rename pkg/{controllers/util => util/orphaning}/orphaningannotation.go (92%) diff --git a/config/crds/core.kubeadmiral.io_clusterfederatedobjects.yaml b/config/crds/core.kubeadmiral.io_clusterfederatedobjects.yaml index 4f361f67..f1fe763c 100644 --- a/config/crds/core.kubeadmiral.io_clusterfederatedobjects.yaml +++ b/config/crds/core.kubeadmiral.io_clusterfederatedobjects.yaml @@ -146,10 +146,6 @@ spec: - status type: object type: array - collisionCount: - description: CollisionCount can be used in conjunction with RevisionHistory to implement rollbacks. - format: int32 - type: integer conditions: description: Conditions describe the current state of this FederatedObject. items: diff --git a/config/crds/core.kubeadmiral.io_federatedobjects.yaml b/config/crds/core.kubeadmiral.io_federatedobjects.yaml index 33f803e4..c280a5d8 100644 --- a/config/crds/core.kubeadmiral.io_federatedobjects.yaml +++ b/config/crds/core.kubeadmiral.io_federatedobjects.yaml @@ -146,10 +146,6 @@ spec: - status type: object type: array - collisionCount: - description: CollisionCount can be used in conjunction with RevisionHistory to implement rollbacks. - format: int32 - type: integer conditions: description: Conditions describe the current state of this FederatedObject. items: diff --git a/pkg/apis/core/v1alpha1/types_federatedobject.go b/pkg/apis/core/v1alpha1/types_federatedobject.go index 353cadf7..e4948571 100644 --- a/pkg/apis/core/v1alpha1/types_federatedobject.go +++ b/pkg/apis/core/v1alpha1/types_federatedobject.go @@ -116,10 +116,6 @@ type GenericFederatedObjectStatus struct { Conditions []GenericFederatedObjectCondition `json:"conditions,omitempty"` // Clusters contains the propagation status of the Kubernetes object for individual member clusters. Clusters []PropagationStatus `json:"clusters,omitempty"` - - // CollisionCount can be used in conjunction with RevisionHistory to implement rollbacks. - // +optional - CollisionCount *int32 `json:"collisionCount,omitempty"` } // PlacementWithController describes the member clusters that a Kubernetes object should be propagated to. @@ -213,10 +209,8 @@ const ( AggregateSuccess FederatedObjectConditionReason = "" SyncRevisionsFailed FederatedObjectConditionReason = "SyncRevisionsFailed" ClusterRetrievalFailed FederatedObjectConditionReason = "ClusterRetrievalFailed" - ComputePlacementFailed FederatedObjectConditionReason = "ComputePlacementFailed" PlanRolloutFailed FederatedObjectConditionReason = "PlanRolloutFailed" CheckClusters FederatedObjectConditionReason = "CheckClusters" - NamespaceNotFederated FederatedObjectConditionReason = "NamespaceNotFederated" EnsureDeletionFailed FederatedObjectConditionReason = "EnsureDeletionFailed" ) diff --git a/pkg/apis/core/v1alpha1/zz_generated.deepcopy.go b/pkg/apis/core/v1alpha1/zz_generated.deepcopy.go index 1226a6c0..5457a18e 100644 --- a/pkg/apis/core/v1alpha1/zz_generated.deepcopy.go +++ b/pkg/apis/core/v1alpha1/zz_generated.deepcopy.go @@ -984,11 +984,6 @@ func (in *GenericFederatedObjectStatus) DeepCopyInto(out *GenericFederatedObject *out = make([]PropagationStatus, len(*in)) copy(*out, *in) } - if in.CollisionCount != nil { - in, out := &in.CollisionCount, &out.CollisionCount - *out = new(int32) - **out = **in - } return } diff --git a/pkg/controllers/common/constants.go b/pkg/controllers/common/constants.go index b3896ee6..a795581d 100644 --- a/pkg/controllers/common/constants.go +++ b/pkg/controllers/common/constants.go @@ -20,7 +20,10 @@ are Copyright 2023 The KubeAdmiral Authors. package common -import "k8s.io/apimachinery/pkg/runtime/schema" +import ( + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" +) const ( DefaultFedSystemNamespace = "kube-admiral-system" @@ -30,7 +33,11 @@ const ( ) const ( - NamespaceResource = "namespaces" + NamespaceResource = "namespaces" + DeploymentResource = "deployments" + DaemonSetResource = "daemonsets" + ConfigMapResource = "configmaps" + SecretResource = "secrets" NamespaceKind = "Namespace" DeploymentKind = "Deployment" @@ -81,13 +88,6 @@ const ( OverridesField = "overrides" FollowsField = "follows" - // Rolling Update - - StrategyField = "strategy" - RollingUpdateField = "rollingUpdate" - MaxSurgeField = "maxSurge" - MaxUnavailableField = "maxUnavailable" - // Status AvailableReplicasField = "availableReplicas" @@ -106,14 +106,6 @@ const ( AnnotationValueTrue = "true" AnnotationValueFalse = "false" - // The following annotations contain metadata. - - LastRevisionAnnotation = DefaultPrefix + "last-revision" - CurrentRevisionAnnotation = DefaultPrefix + "current-revision" - LastReplicasetName = DefaultPrefix + "last-replicaset-name" - SourceGenerationAnnotation = DefaultPrefix + "source-generation" - FederatedGenerationAnnotation = DefaultPrefix + "federated-generation" - // The following annotations control the behavior of Kubeadmiral controllers. NoSchedulingAnnotation = DefaultPrefix + "no-scheduling" @@ -163,29 +155,13 @@ const ( ClusterServiceAccountCAKey = "service-account-ca-data" ) -var DeploymentGVR = schema.GroupVersionResource{ - Group: "apps", - Version: "v1", - Resource: "deployments", -} - -var ConfigMapGVR = schema.GroupVersionResource{ - Group: "", - Version: "v1", - Resource: "configmaps", -} - -var SecretGVR = schema.GroupVersionResource{ - Group: "", - Version: "v1", - Resource: "secrets", -} - -var DaemonSetGVR = schema.GroupVersionResource{ - Group: "apps", - Version: "v1", - Resource: "daemonsets", -} +var ( + DeploymentGVR = appsv1.SchemeGroupVersion.WithResource(DeploymentResource) + DaemonSetGVR = appsv1.SchemeGroupVersion.WithResource(DaemonSetResource) + NamespaceGVR = corev1.SchemeGroupVersion.WithResource(NamespaceResource) + ConfigMapGVR = corev1.SchemeGroupVersion.WithResource(ConfigMapResource) + SecretGVR = corev1.SchemeGroupVersion.WithResource(SecretResource) +) // MaxFederatedObjectNameLength defines the max length of a federated object name. // A custom resource name must be a DNS subdomain as defined in RFC1123 with a maximum length of 253. diff --git a/pkg/controllers/scheduler/framework/plugins/clusterresources/fit.go b/pkg/controllers/scheduler/framework/plugins/clusterresources/fit.go index 0310205b..10468da0 100644 --- a/pkg/controllers/scheduler/framework/plugins/clusterresources/fit.go +++ b/pkg/controllers/scheduler/framework/plugins/clusterresources/fit.go @@ -54,8 +54,6 @@ func (pl *ClusterResourcesFit) Filter( return framework.NewResult(framework.Error, err.Error()) } - // TODO(all), fixed me, if the scheduling unit is a type of RSP scheduling, skip here. - insufficientResources := fitsRequest(su, cluster) if len(insufficientResources) != 0 { diff --git a/pkg/controllers/sync/accessor.go b/pkg/controllers/sync/accessor.go index 236f2935..4507e48f 100644 --- a/pkg/controllers/sync/accessor.go +++ b/pkg/controllers/sync/accessor.go @@ -22,18 +22,24 @@ are Copyright 2023 The KubeAdmiral Authors. package sync import ( - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "fmt" + + corev1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/labels" pkgruntime "k8s.io/apimachinery/pkg/runtime" - "k8s.io/client-go/tools/cache" "k8s.io/client-go/tools/record" "k8s.io/klog/v2" fedcorev1a1 "github.com/kubewharf/kubeadmiral/pkg/apis/core/v1alpha1" genericclient "github.com/kubewharf/kubeadmiral/pkg/client/generic" + fedcorev1a1informers "github.com/kubewharf/kubeadmiral/pkg/client/informers/externalversions/core/v1alpha1" "github.com/kubewharf/kubeadmiral/pkg/controllers/common" "github.com/kubewharf/kubeadmiral/pkg/controllers/sync/version" "github.com/kubewharf/kubeadmiral/pkg/controllers/util" + "github.com/kubewharf/kubeadmiral/pkg/util/fedobjectadapters" + "github.com/kubewharf/kubeadmiral/pkg/util/informermanager" ) // FederatedResourceAccessor provides a way to retrieve and visit @@ -43,29 +49,23 @@ type FederatedResourceAccessor interface { HasSynced() bool FederatedResource( qualifiedName common.QualifiedName, - ) (federatedResource FederatedResource, possibleOrphan bool, err error) - VisitFederatedResources(visitFunc func(obj interface{})) + ) (federatedResource FederatedResource, err error) + VisitFederatedResources(visitFunc func(fedcorev1a1.GenericFederatedObject)) } type resourceAccessor struct { - limitedScope bool - typeConfig *fedcorev1a1.FederatedTypeConfig fedNamespace string - // The informer for the federated type. - federatedStore cache.Store - federatedController cache.Controller - - fedNamespaceAPIResource *metav1.APIResource - - // The informer used to source federated namespaces used in - // determining placement for namespaced resources. Will only be - // initialized if the target resource is namespaced. - fedNamespaceStore cache.Store - fedNamespaceController cache.Controller + // Informers for federated objects + fedObjectInformer fedcorev1a1informers.FederatedObjectInformer + clusterFedObjectInformer fedcorev1a1informers.ClusterFederatedObjectInformer // Manages propagated versions - versionManager *version.VersionManager + versionManager *version.VersionManager + clusterVersionManager *version.VersionManager + + // Manages FTCs + ftcManager informermanager.FederatedTypeConfigManager // Records events on the federated resource eventRecorder record.EventRecorder @@ -76,85 +76,47 @@ type resourceAccessor struct { func NewFederatedResourceAccessor( logger klog.Logger, controllerConfig *util.ControllerConfig, - typeConfig *fedcorev1a1.FederatedTypeConfig, - fedNamespaceAPIResource *metav1.APIResource, client genericclient.Client, - enqueueObj func(pkgruntime.Object), + fedObjectInformer fedcorev1a1informers.FederatedObjectInformer, + clusterFedObjectInformer fedcorev1a1informers.ClusterFederatedObjectInformer, + ftcManager informermanager.FederatedTypeConfigManager, + enqueue func(common.QualifiedName), eventRecorder record.EventRecorder, -) (FederatedResourceAccessor, error) { +) FederatedResourceAccessor { a := &resourceAccessor{ - limitedScope: controllerConfig.LimitedScope(), - typeConfig: typeConfig, - fedNamespace: controllerConfig.FedSystemNamespace, - fedNamespaceAPIResource: fedNamespaceAPIResource, - eventRecorder: eventRecorder, - logger: logger.WithValues("origin", "resource-accessor"), + fedNamespace: controllerConfig.FedSystemNamespace, + fedObjectInformer: fedObjectInformer, + clusterFedObjectInformer: clusterFedObjectInformer, + ftcManager: ftcManager, + eventRecorder: eventRecorder, + logger: logger.WithValues("origin", "resource-accessor"), } - targetNamespace := controllerConfig.TargetNamespace - - federatedTypeAPIResource := typeConfig.GetFederatedType() - federatedTypeClient, err := util.NewResourceClient(controllerConfig.KubeConfig, &federatedTypeAPIResource) - if err != nil { - return nil, err - } - a.federatedStore, a.federatedController = util.NewResourceInformer( - federatedTypeClient, - targetNamespace, - enqueueObj, - controllerConfig.Metrics, - ) - - if typeConfig.GetNamespaced() { - fedNamespaceEnqueue := func(fedNamespaceObj pkgruntime.Object) { - // When a federated namespace changes, every resource in - // the namespace needs to be reconciled. - // - // TODO Consider optimizing this to only reconcile - // contained resources in response to a change in - // placement for the federated namespace. - namespace := common.NewQualifiedName(fedNamespaceObj).Name - for _, rawObj := range a.federatedStore.List() { - obj := rawObj.(pkgruntime.Object) - qualifiedName := common.NewQualifiedName(obj) - if qualifiedName.Namespace == namespace { - enqueueObj(obj) - } - } - } - // Initialize an informer for federated namespaces. Placement - // for a resource is computed as the intersection of resource - // and federated namespace placement. - fedNamespaceClient, err := util.NewResourceClient(controllerConfig.KubeConfig, fedNamespaceAPIResource) - if err != nil { - return nil, err - } - a.fedNamespaceStore, a.fedNamespaceController = util.NewResourceInformer( - fedNamespaceClient, - targetNamespace, - fedNamespaceEnqueue, - controllerConfig.Metrics, - ) - } + handler := util.NewTriggerOnAllChanges(func(o pkgruntime.Object) { + enqueue(common.NewQualifiedName(o)) + }) + fedObjectInformer.Informer().AddEventHandler(handler) + clusterFedObjectInformer.Informer().AddEventHandler(handler) a.versionManager = version.NewVersionManager( logger, client, - typeConfig.GetNamespaced(), - typeConfig.GetFederatedType().Kind, - typeConfig.GetTargetType().Kind, - targetNamespace, + true, + controllerConfig.TargetNamespace, + ) + a.clusterVersionManager = version.NewVersionManager( + logger, + client, + false, + controllerConfig.TargetNamespace, ) - return a, nil + return a } func (a *resourceAccessor) Run(stopChan <-chan struct{}) { go a.versionManager.Sync(stopChan) - go a.federatedController.Run(stopChan) - if a.fedNamespaceController != nil { - go a.fedNamespaceController.Run(stopChan) - } + go a.clusterVersionManager.Sync(stopChan) } func (a *resourceAccessor) HasSynced() bool { @@ -162,75 +124,82 @@ func (a *resourceAccessor) HasSynced() bool { a.logger.V(3).Info("Version manager not synced") return false } - if !a.federatedController.HasSynced() { - a.logger.V(3).Info("Informer not synced") - return false - } - if a.fedNamespaceController != nil && !a.fedNamespaceController.HasSynced() { - a.logger.V(3).Info("FederatedNamespace informer not synced") + if !a.clusterVersionManager.HasSynced() { + a.logger.V(3).Info("Cluster version manager not synced") return false } return true } -func (a *resourceAccessor) FederatedResource(eventSource common.QualifiedName) (FederatedResource, bool, error) { - if a.typeConfig.GetTargetType().Kind == common.NamespaceKind && a.isSystemNamespace(eventSource.Name) { - return nil, false, nil +func (a *resourceAccessor) FederatedResource( + qualifiedName common.QualifiedName, +) (FederatedResource, error) { + federatedObject, err := fedobjectadapters.GetFromLister( + a.fedObjectInformer.Lister(), + a.clusterFedObjectInformer.Lister(), + qualifiedName.Namespace, + qualifiedName.Name, + ) + if err != nil { + if apierrors.IsNotFound(err) { + return nil, nil + } else { + return nil, err + } } + federatedObject = federatedObject.DeepCopyGenericFederatedObject() - kind := a.typeConfig.GetFederatedType().Kind - - // Most federated resources have the same name as their targets. - targetName := common.QualifiedName{ - Namespace: eventSource.Namespace, - Name: eventSource.Name, + template := &unstructured.Unstructured{} + if err := template.UnmarshalJSON(federatedObject.GetSpec().Template.Raw); err != nil { + return nil, fmt.Errorf("failed to unmarshal template: %w", err) } - federatedName := common.QualifiedName{ - Namespace: util.NamespaceForResource(eventSource.Namespace, a.fedNamespace), - Name: eventSource.Name, - } - - key := federatedName.String() - resource, err := util.ObjFromCache(a.federatedStore, kind, key) - if err != nil { - return nil, false, err + targetGVK := template.GroupVersionKind() + if targetGVK == corev1.SchemeGroupVersion.WithKind(common.NamespaceKind) && a.isSystemNamespace(qualifiedName.Name) { + return nil, nil } - if resource == nil { - return nil, true, nil + + typeConfig, exists := a.ftcManager.GetResourceFTC(targetGVK) + if !exists || typeConfig == nil { + return nil, nil } + typeConfig = typeConfig.DeepCopy() - var fedNamespace *unstructured.Unstructured - if a.typeConfig.GetNamespaced() { - fedNamespaceName := common.QualifiedName{Name: federatedName.Namespace} - fedNamespace, err = util.ObjFromCache( - a.fedNamespaceStore, - a.fedNamespaceAPIResource.Kind, - fedNamespaceName.String(), - ) - if err != nil { - return nil, false, err - } - // If fedNamespace is nil, the resources in member clusters - // will be removed. + var versionManager *version.VersionManager + if typeConfig.GetNamespaced() { + versionManager = a.versionManager + } else { + versionManager = a.clusterVersionManager } return &federatedResource{ - limitedScope: a.limitedScope, - typeConfig: a.typeConfig, - targetName: targetName, - federatedKind: kind, - federatedName: federatedName, - federatedResource: resource, - versionManager: a.versionManager, - fedNamespace: fedNamespace, - eventRecorder: a.eventRecorder, - }, false, nil + typeConfig: typeConfig, + federatedName: qualifiedName, + targetName: common.NewQualifiedName(template), + federatedObject: federatedObject, + template: template, + versionManager: versionManager, + eventRecorder: a.eventRecorder, + }, nil } -func (a *resourceAccessor) VisitFederatedResources(visitFunc func(obj interface{})) { - for _, obj := range a.federatedStore.List() { - visitFunc(obj) +func (a *resourceAccessor) VisitFederatedResources(visitFunc func(obj fedcorev1a1.GenericFederatedObject)) { + fedObjects, err := a.fedObjectInformer.Lister().List(labels.Everything()) + if err == nil { + for _, obj := range fedObjects { + visitFunc(obj) + } + } else { + a.logger.Error(err, "Failed to list FederatedObjects from lister") + } + + clusterFedObjects, err := a.clusterFedObjectInformer.Lister().List(labels.Everything()) + if err == nil { + for _, obj := range clusterFedObjects { + visitFunc(obj) + } + } else { + a.logger.Error(err, "Failed to list ClusterFederatedObjects from lister") } } diff --git a/pkg/controllers/sync/controller.go b/pkg/controllers/sync/controller.go index fbb34e19..a4881f16 100644 --- a/pkg/controllers/sync/controller.go +++ b/pkg/controllers/sync/controller.go @@ -24,7 +24,6 @@ package sync import ( "context" "fmt" - "hash/fnv" "strconv" "strings" "time" @@ -35,42 +34,49 @@ import ( "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/labels" pkgruntime "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/util/sets" "k8s.io/apimachinery/pkg/util/wait" - kubeclient "k8s.io/client-go/kubernetes" - "k8s.io/client-go/rest" + "k8s.io/client-go/dynamic" + "k8s.io/client-go/kubernetes" "k8s.io/client-go/tools/cache" "k8s.io/client-go/tools/record" "k8s.io/client-go/util/retry" "k8s.io/client-go/util/workqueue" "k8s.io/klog/v2" - runtimeclient "sigs.k8s.io/controller-runtime/pkg/client" + + fedclient "github.com/kubewharf/kubeadmiral/pkg/client/clientset/versioned" + "github.com/kubewharf/kubeadmiral/pkg/client/generic" + "github.com/kubewharf/kubeadmiral/pkg/util/adoption" + clusterutil "github.com/kubewharf/kubeadmiral/pkg/util/cluster" + "github.com/kubewharf/kubeadmiral/pkg/util/fedobjectadapters" + "github.com/kubewharf/kubeadmiral/pkg/util/informermanager" + "github.com/kubewharf/kubeadmiral/pkg/util/logging" + "github.com/kubewharf/kubeadmiral/pkg/util/orphaning" fedcorev1a1 "github.com/kubewharf/kubeadmiral/pkg/apis/core/v1alpha1" - fedtypesv1a1 "github.com/kubewharf/kubeadmiral/pkg/apis/types/v1alpha1" - genericclient "github.com/kubewharf/kubeadmiral/pkg/client/generic" + fedcorev1a1informers "github.com/kubewharf/kubeadmiral/pkg/client/informers/externalversions/core/v1alpha1" "github.com/kubewharf/kubeadmiral/pkg/controllers/common" "github.com/kubewharf/kubeadmiral/pkg/controllers/sync/dispatch" "github.com/kubewharf/kubeadmiral/pkg/controllers/sync/status" "github.com/kubewharf/kubeadmiral/pkg/controllers/util" - annotationutil "github.com/kubewharf/kubeadmiral/pkg/controllers/util/annotation" "github.com/kubewharf/kubeadmiral/pkg/controllers/util/eventsink" finalizersutil "github.com/kubewharf/kubeadmiral/pkg/controllers/util/finalizers" - "github.com/kubewharf/kubeadmiral/pkg/controllers/util/history" - "github.com/kubewharf/kubeadmiral/pkg/controllers/util/managedlabel" - "github.com/kubewharf/kubeadmiral/pkg/controllers/util/pendingcontrollers" - schemautil "github.com/kubewharf/kubeadmiral/pkg/controllers/util/schema" - "github.com/kubewharf/kubeadmiral/pkg/controllers/util/sourcefeedback" "github.com/kubewharf/kubeadmiral/pkg/controllers/util/worker" "github.com/kubewharf/kubeadmiral/pkg/stats" + "github.com/kubewharf/kubeadmiral/pkg/util/managedlabel" + "github.com/kubewharf/kubeadmiral/pkg/util/pendingcontrollers" +) + +const ( + SyncControllerName = "sync-controller" ) const ( EventReasonWaitForCascadingDelete = "WaitForCascadingDelete" EventReasonWaitForCascadingDeleteError = "WaitForCascadingDeleteError" - SyncControllerName = "sync-controller" ) const ( @@ -82,23 +88,28 @@ const ( // If this finalizer is present on a cluster, the sync // controller will have the opportunity to perform per-deletion operations // (like deleting managed resources from member clusters). - FinalizerCascadingDeletePrefix = common.DefaultPrefix + "cascading-delete" + FinalizerCascadingDelete = common.DefaultPrefix + "cascading-delete" ) // SyncController synchronizes the state of federated resources // in the host cluster with resources in member clusters. type SyncController struct { - name string + worker worker.ReconcileWorker[common.QualifiedName] - worker worker.ReconcileWorker - clusterWorker worker.ReconcileWorker + // For handling cascading deletion. + clusterCascadingDeletionWorker worker.ReconcileWorker[common.QualifiedName] // For triggering reconciliation of all target resources. This is // used when a new cluster becomes available. - clusterQueue workqueue.DelayingInterface + clusterReadinessTransitionQueue workqueue.DelayingInterface + + fedClient fedclient.Interface - // Informer for resources in member clusters - informer util.FederatedInformer + ftcManager informermanager.FederatedTypeConfigManager + fedInformerManager informermanager.FederatedInformerManager + + // For accessing FederatedResources (logical federated objects) + fedAccessor FederatedResourceAccessor // For events eventRecorder record.EventRecorder @@ -111,78 +122,51 @@ type SyncController struct { ensureDeletionRecheckDelay time.Duration cascadingDeletionRecheckDelay time.Duration - typeConfig *fedcorev1a1.FederatedTypeConfig - - fedAccessor FederatedResourceAccessor - - hostClusterClient genericclient.Client - - controllerHistory history.Interface - - controllerRevisionStore cache.Store - - controllerRevisionController cache.Controller - - revListerSynced cache.InformerSynced - - limitedScope bool - - cascadingDeleteFinalizer string - metrics stats.Metrics logger klog.Logger } -// StartSyncController starts a new sync controller for a type config -func StartSyncController( - controllerConfig *util.ControllerConfig, - stopChan <-chan struct{}, - typeConfig *fedcorev1a1.FederatedTypeConfig, - fedNamespaceAPIResource *metav1.APIResource, - controllerRevisionStore cache.Store, - controllerRevisionController cache.Controller, -) error { - controller, err := newSyncController( - controllerConfig, - typeConfig, - fedNamespaceAPIResource, - controllerRevisionStore, - controllerRevisionController, - ) - if err != nil { - return err - } - if controllerConfig.MinimizeLatency { - controller.minimizeLatency() - } - controller.logger.Info("Starting sync controller") - controller.Run(stopChan) - return nil -} +/* +TODOs +- two cluster queues +- cluster event handlers +- federated accessor +- version manager +- fo and cfo +- managed label for federatedinformermanager +- already partially deleted revision history +- already partially deleted fed namespace placement constraint +- use jsonutil for unmarshalling +- check deepcopy, especially when previously used UnmarshalGeneric +- federated name and target name are not the same +- check if namespace are used for all clients +- generic client: + - version manager (have to experiment with generics, reconsider after refactoring everything else) + - dispatcher +*/ -// newSyncController returns a new sync controller for the configuration -func newSyncController( +// NewSyncController returns a new sync controller for the configuration +func NewSyncController( + logger klog.Logger, controllerConfig *util.ControllerConfig, - typeConfig *fedcorev1a1.FederatedTypeConfig, - fedNamespaceAPIResource *metav1.APIResource, - controllerRevisionStore cache.Store, - controllerRevisionController cache.Controller, -) (*SyncController, error) { - federatedTypeAPIResource := typeConfig.GetFederatedType() - userAgent := fmt.Sprintf("%s-federate-sync-controller", strings.ToLower(federatedTypeAPIResource.Kind)) - // Initialize non-dynamic clients first to avoid polluting config - client := genericclient.NewForConfigOrDieWithUserAgent(controllerConfig.KubeConfig, userAgent) - kubeClient := kubeclient.NewForConfigOrDie(controllerConfig.KubeConfig) + kubeClient kubernetes.Interface, + genericClient generic.Client, + fedClient fedclient.Interface, - configCopy := rest.CopyConfig(controllerConfig.KubeConfig) - rest.AddUserAgent(configCopy, userAgent) + fedObjectInformer fedcorev1a1informers.FederatedObjectInformer, + clusterFedObjectInformer fedcorev1a1informers.ClusterFederatedObjectInformer, - recorder := eventsink.NewDefederatingRecorderMux(kubeClient, userAgent, 4) - logger := klog.LoggerWithValues(klog.Background(), "controller", SyncControllerName, "ftc", typeConfig.Name) + ftcManager informermanager.FederatedTypeConfigManager, + fedInformerManager informermanager.FederatedInformerManager, +) (*SyncController, error) { + recorder := eventsink.NewDefederatingRecorderMux(kubeClient, SyncControllerName, 4) + logger = klog.LoggerWithValues(logger, "controller", SyncControllerName) s := &SyncController{ - name: userAgent, + fedClient: fedClient, + ftcManager: ftcManager, + fedInformerManager: fedInformerManager, clusterAvailableDelay: controllerConfig.ClusterAvailableDelay, clusterUnavailableDelay: controllerConfig.ClusterUnavailableDelay, reconcileOnClusterChangeDelay: time.Second * 3, @@ -191,133 +175,122 @@ func newSyncController( ensureDeletionRecheckDelay: time.Second * 5, cascadingDeletionRecheckDelay: time.Second * 10, eventRecorder: recorder, - typeConfig: typeConfig, - hostClusterClient: client, - limitedScope: controllerConfig.LimitedScope(), - controllerRevisionStore: controllerRevisionStore, - controllerRevisionController: controllerRevisionController, metrics: controllerConfig.Metrics, logger: logger, } - hash := fnv.New32() - _, err := hash.Write([]byte(s.typeConfig.GetObjectMeta().Name)) - if err != nil { - return nil, errors.Wrapf( - err, - "failed to generate cascading-delete finalizer for ftc %s", - s.typeConfig.GetObjectMeta().Name, - ) - } - ftcNameTruncated := s.typeConfig.GetObjectMeta().Name - if len(ftcNameTruncated) > 20 { - ftcNameTruncated = ftcNameTruncated[:20] - } - s.cascadingDeleteFinalizer = fmt.Sprintf("%s-%s-%d", FinalizerCascadingDeletePrefix, ftcNameTruncated, hash.Sum32()) - - s.worker = worker.NewReconcileWorker( + s.worker = worker.NewReconcileWorker[common.QualifiedName]( + SyncControllerName, + nil, s.reconcile, worker.RateLimiterOptions{}, controllerConfig.WorkerCount, controllerConfig.Metrics, - deliverutil.NewMetricTags("sync-worker", typeConfig.GetTargetType().Kind), ) - // TODO: do we need both clusterWorker and clusterQueue? - s.clusterWorker = worker.NewReconcileWorker(s.reconcileCluster, worker.RateLimiterOptions{}, 1, controllerConfig.Metrics, - deliverutil.NewMetricTags("sync-cluster-worker", typeConfig.GetTargetType().Kind)) + s.clusterCascadingDeletionWorker = worker.NewReconcileWorker[common.QualifiedName]( + SyncControllerName+"-cluster-cascading-deletion-worker", + nil, + s.reconcileClusterForCascadingDeletion, + worker.RateLimiterOptions{}, + 1, + controllerConfig.Metrics, + ) // Build queue for triggering cluster reconciliations. - s.clusterQueue = workqueue.NewNamedDelayingQueue("sync-controller-cluster-queue") - - targetAPIResource := typeConfig.GetTargetType() - - // Federated informer for resources in member clusters - s.informer, err = util.NewFederatedInformer( - controllerConfig, - client, - configCopy, - &targetAPIResource, - func(obj pkgruntime.Object) { - qualifiedName := common.NewQualifiedName(obj) - s.worker.EnqueueWithDelay(qualifiedName, s.memberObjectEnqueueDelay) + s.clusterReadinessTransitionQueue = workqueue.NewNamedDelayingQueue("sync-controller-cluster-readiness-transition-queue") + + if err := s.fedInformerManager.AddEventHandlerGenerator(&informermanager.EventHandlerGenerator{ + Predicate: informermanager.RegisterOncePredicate, + Generator: func(ftc *fedcorev1a1.FederatedTypeConfig) cache.ResourceEventHandler { + return util.NewTriggerOnAllChanges(func(o pkgruntime.Object) { + obj := o.(*unstructured.Unstructured) + + ftc, exists := s.ftcManager.GetResourceFTC(obj.GroupVersionKind()) + if !exists { + return + } + + federatedName := common.QualifiedName{ + Namespace: obj.GetNamespace(), + Name: util.GenerateFederatedObjectName(obj.GetName(), ftc.GetName()), + } + s.worker.EnqueueWithDelay(federatedName, s.memberObjectEnqueueDelay) + }) }, - &util.ClusterLifecycleHandlerFuncs{ - ClusterAvailable: func(cluster *fedcorev1a1.FederatedCluster) { - // When new cluster becomes available process all the target resources again. - s.clusterWorker.EnqueueObject(cluster) - s.clusterQueue.AddAfter(struct{}{}, s.clusterAvailableDelay) + }); err != nil { + return nil, fmt.Errorf("failed to add event handler generator: %w", err) + } + + if err := s.fedInformerManager.AddClusterEventHandlers( + &informermanager.ClusterEventHandler{ + Predicate: func(oldCluster, newCluster *fedcorev1a1.FederatedCluster) bool { + // Enqueue cluster when it's marked for deletion to ensure cascading deletion + return oldCluster != nil && newCluster != nil && + oldCluster.GetDeletionTimestamp().IsZero() && !newCluster.GetDeletionTimestamp().IsZero() }, - // When a cluster becomes unavailable process all the target resources again. - ClusterUnavailable: func(cluster *fedcorev1a1.FederatedCluster, _ []interface{}) { - s.clusterWorker.EnqueueObject(cluster) - s.clusterQueue.AddAfter(struct{}{}, s.clusterUnavailableDelay) + Callback: func(cluster *fedcorev1a1.FederatedCluster) { + s.clusterCascadingDeletionWorker.Enqueue(common.NewQualifiedName(cluster)) }, }, - ) - if err != nil { - return nil, err - } - - s.fedAccessor, err = NewFederatedResourceAccessor( - logger, controllerConfig, typeConfig, fedNamespaceAPIResource, - client, s.worker.EnqueueObject, recorder) - if err != nil { - return nil, err + &informermanager.ClusterEventHandler{ + Predicate: func(oldCluster, newCluster *fedcorev1a1.FederatedCluster) bool { + // Reconcile all federated objects when cluster readiness changes + return oldCluster != nil && newCluster != nil && + clusterutil.IsClusterReady(&oldCluster.Status) != clusterutil.IsClusterReady(&newCluster.Status) + }, + Callback: func(cluster *fedcorev1a1.FederatedCluster) { + s.clusterReadinessTransitionQueue.Add(struct{}{}) + }, + }, + ); err != nil { + return nil, fmt.Errorf("failed to add cluster event handler: %w", err) } - if typeConfig.GetRevisionHistoryEnabled() { - s.controllerHistory = history.NewHistory(kubeClient, controllerRevisionStore) - s.revListerSynced = controllerRevisionController.HasSynced - } + s.fedAccessor = NewFederatedResourceAccessor( + logger, controllerConfig, genericClient, + fedObjectInformer, clusterFedObjectInformer, + ftcManager, + func(qualifiedName common.QualifiedName) { + s.worker.Enqueue(qualifiedName) + }, + recorder, + ) return s, nil } -// minimizeLatency reduces delays and timeouts to make the controller more responsive (useful for testing). -func (s *SyncController) minimizeLatency() { - s.clusterAvailableDelay = time.Second - s.clusterUnavailableDelay = time.Second - s.reconcileOnClusterChangeDelay = 20 * time.Millisecond - s.memberObjectEnqueueDelay = 50 * time.Millisecond - s.recheckAfterDispatchDelay = 2 * time.Second - s.ensureDeletionRecheckDelay = 2 * time.Second - s.cascadingDeletionRecheckDelay = 3 * time.Second -} - -func (s *SyncController) Run(stopChan <-chan struct{}) { - s.fedAccessor.Run(stopChan) - s.informer.Start() +func (s *SyncController) Run(ctx context.Context) { + s.fedAccessor.Run(ctx.Done()) go func() { for { - _, shutdown := s.clusterQueue.Get() + _, shutdown := s.clusterReadinessTransitionQueue.Get() if shutdown { break } - s.reconcileOnClusterChange() + s.enqueueAllObjects() } }() - if !cache.WaitForNamedCacheSync(s.name, stopChan, s.HasSynced) { + if !cache.WaitForNamedCacheSync(SyncControllerName, ctx.Done(), s.HasSynced) { return } - s.worker.Run(stopChan) - s.clusterWorker.Run(stopChan) + s.worker.Run(ctx) + s.clusterCascadingDeletionWorker.Run(ctx) // Ensure all goroutines are cleaned up when the stop channel closes go func() { - <-stopChan - s.informer.Stop() - s.clusterQueue.ShutDown() + <-ctx.Done() + s.clusterReadinessTransitionQueue.ShutDown() }() } // Check whether all data stores are in sync. False is returned if any of the informer/stores is not yet // synced with the corresponding api server. func (s *SyncController) HasSynced() bool { - if !s.informer.ClustersSynced() { - s.logger.V(3).Info("Cluster list not synced") + if !s.fedInformerManager.HasSynced() { + s.logger.V(3).Info("FederatedInformerManager not synced") return false } if !s.fedAccessor.HasSynced() { @@ -326,48 +299,27 @@ func (s *SyncController) HasSynced() bool { return false } - if s.typeConfig.GetRevisionHistoryEnabled() && !s.revListerSynced() { - s.logger.V(3).Info("ControllerRevision list not synced") - return false - } - return true } +func (s *SyncController) getClusterClient(clusterName string) (dynamic.Interface, error) { + if client, exists := s.fedInformerManager.GetClusterClient(clusterName); exists { + return client, nil + } + return nil, fmt.Errorf("client does not exist for cluster") +} + // The function triggers reconciliation of all target federated resources. -func (s *SyncController) reconcileOnClusterChange() { - s.fedAccessor.VisitFederatedResources(func(obj interface{}) { - qualifiedName := common.NewQualifiedName(obj.(pkgruntime.Object)) +func (s *SyncController) enqueueAllObjects() { + s.fedAccessor.VisitFederatedResources(func(obj fedcorev1a1.GenericFederatedObject) { + qualifiedName := common.NewQualifiedName(obj) s.worker.EnqueueWithDelay(qualifiedName, s.reconcileOnClusterChangeDelay) }) } -func (s *SyncController) reconcile(qualifiedName common.QualifiedName) (status worker.Result) { - key := qualifiedName.String() - keyedLogger := s.logger.WithValues("object", key) - ctx := klog.NewContext(context.TODO(), keyedLogger) - fedResource, possibleOrphan, err := s.fedAccessor.FederatedResource(qualifiedName) - if err != nil { - keyedLogger.Error(err, "Failed to create FederatedResource helper") - return worker.StatusError - } - if possibleOrphan { - apiResource := s.typeConfig.GetTargetType() - gvk := schemautil.APIResourceToGVK(&apiResource) - keyedLogger.WithValues("label", managedlabel.ManagedByKubeAdmiralLabelKey). - V(2).Info("Ensuring the removal of the label in member clusters") - err = s.removeManagedLabel(ctx, gvk, qualifiedName) - if err != nil { - keyedLogger.WithValues("label", managedlabel.ManagedByKubeAdmiralLabelKey). - Error(err, "Failed to remove the label from object in member clusters") - return worker.StatusError - } - - return worker.StatusAllOK - } - if fedResource == nil { - return worker.StatusAllOK - } +func (s *SyncController) reconcile(ctx context.Context, federatedName common.QualifiedName) (status worker.Result) { + keyedLogger := s.logger.WithValues("federated-name", federatedName.String()) + ctx = klog.NewContext(ctx, keyedLogger) s.metrics.Rate("sync.throughput", 1) keyedLogger.V(3).Info("Starting to reconcile") @@ -377,6 +329,21 @@ func (s *SyncController) reconcile(qualifiedName common.QualifiedName) (status w keyedLogger.WithValues("duration", time.Since(startTime), "status", status).V(3).Info("Finished reconciling") }() + fedResource, err := s.fedAccessor.FederatedResource(federatedName) + if err != nil { + keyedLogger.Error(err, "Failed to create FederatedResource helper") + return worker.StatusError + } + if fedResource == nil { + return worker.StatusAllOK + } + + ctx, keyedLogger = logging.InjectLoggerValues( + ctx, + "target-name", fedResource.TargetName().String(), + "target-gvk", fedResource.TargetGVK().String(), + ) + if fedResource.Object().GetDeletionTimestamp() != nil { return s.ensureDeletion(ctx, fedResource) } @@ -400,59 +367,29 @@ func (s *SyncController) reconcile(qualifiedName common.QualifiedName) (status w return worker.StatusError } - var lastRevisionNameWithHash, currentRevisionName string - collisionCount := fedResource.CollisionCount() - if s.typeConfig.GetRevisionHistoryEnabled() { - keyedLogger.V(2).Info("Starting to sync revisions") - collisionCount, lastRevisionNameWithHash, currentRevisionName, err = s.syncRevisions(ctx, fedResource) - if err != nil { - keyedLogger.Error(err, "Failed to sync revisions") - fedResource.RecordError("SyncRevisionHistoryError", errors.Wrap(err, "Failed to sync revisions")) - return worker.StatusError - } - } - err = s.ensureAnnotations(ctx, fedResource, lastRevisionNameWithHash, currentRevisionName) - if err != nil { - if apierrors.IsConflict(err) { - return worker.StatusConflict - } - keyedLogger.Error(err, "Failed to ensure annotations") - fedResource.RecordError("EnsureAnnotationsErr", errors.Wrap(err, "Failed to ensure annotations")) - return worker.StatusError - } - - return s.syncToClusters(ctx, fedResource, collisionCount) + return s.syncToClusters(ctx, fedResource) } // syncToClusters ensures that the state of the given object is // synchronized to member clusters. -func (s *SyncController) syncToClusters(ctx context.Context, fedResource FederatedResource, collisionCount *int32) worker.Result { +func (s *SyncController) syncToClusters(ctx context.Context, fedResource FederatedResource) worker.Result { keyedLogger := klog.FromContext(ctx) - clusters, err := s.informer.GetJoinedClusters() + clusters, err := s.fedInformerManager.GetJoinedClusters() if err != nil { fedResource.RecordError( - string(fedtypesv1a1.ClusterRetrievalFailed), + string(fedcorev1a1.ClusterRetrievalFailed), errors.Wrap(err, "Failed to retrieve list of clusters"), ) - return s.setFederatedStatus(ctx, fedResource, collisionCount, fedtypesv1a1.ClusterRetrievalFailed, nil) + return s.setFederatedStatus(ctx, fedResource, fedcorev1a1.ClusterRetrievalFailed, nil) } - selectedClusterNames, err := fedResource.ComputePlacement(clusters) - if err != nil { - fedResource.RecordError( - string(fedtypesv1a1.ComputePlacementFailed), - errors.Wrap(err, "Failed to compute placement"), - ) - return s.setFederatedStatus(ctx, fedResource, collisionCount, fedtypesv1a1.ComputePlacementFailed, nil) - } - - keyedLogger.WithValues("clusters", strings.Join(selectedClusterNames.List(), ",")). - V(2).Info("Ensuring target object in clusters") + selectedClusterNames := fedResource.ComputePlacement(clusters) + keyedLogger.V(2).Info("Ensuring target object in clusters", "clusters", strings.Join(sets.List(selectedClusterNames), ",")) - skipAdoptingPreexistingResources := !util.ShouldAdoptPreexistingResources(fedResource.Object()) + skipAdoptingPreexistingResources := !adoption.ShouldAdoptPreexistingResources(fedResource.Object()) dispatcher := dispatch.NewManagedDispatcher( - s.informer.GetClientForCluster, + s.getClusterClient, fedResource, skipAdoptingPreexistingResources, s.metrics, @@ -465,27 +402,35 @@ func (s *SyncController) syncToClusters(ctx context.Context, fedResource Federat isCascadingDeletionTriggered := cluster.GetDeletionTimestamp() != nil && util.IsCascadingDeleteEnabled(cluster) shouldBeDeleted := !isSelectedCluster || isCascadingDeletionTriggered - if !util.IsClusterReady(&cluster.Status) { + if !clusterutil.IsClusterReady(&cluster.Status) { if !shouldBeDeleted { // Cluster state only needs to be reported in resource // status for clusters where the object should not be deleted. err := errors.New("Cluster not ready") - dispatcher.RecordClusterError(fedtypesv1a1.ClusterNotReady, clusterName, err) + dispatcher.RecordClusterError(fedcorev1a1.ClusterNotReady, clusterName, err) } continue } - clusterObj, _, err := util.GetClusterObject( - ctx, - s.informer, - clusterName, - fedResource.TargetName(), - s.typeConfig.GetTargetType(), - ) - if err != nil { - wrappedErr := errors.Wrap(err, "failed to get cluster object") - dispatcher.RecordClusterError(fedtypesv1a1.CachedRetrievalFailed, clusterName, wrappedErr) - continue + var clusterObj *unstructured.Unstructured + { + // TODO: updating the sync status may thrash the host apiserver if the host caches are synced but member caches are not synced. + // Find out if this is ok. + lister, hasSynced, exists := s.fedInformerManager.GetResourceLister(fedResource.TargetGVK(), clusterName) + if !exists || !hasSynced() { + wrappedErr := fmt.Errorf("cluster cache is not synced") + dispatcher.RecordClusterError(fedcorev1a1.CachedRetrievalFailed, clusterName, wrappedErr) + continue + } + + clusterObjAny, err := lister.Get(fedResource.TargetName().String()) + if err != nil || clusterObjAny == nil { + wrappedErr := fmt.Errorf("failed to get cluster object: %w", err) + dispatcher.RecordClusterError(fedcorev1a1.CachedRetrievalFailed, clusterName, wrappedErr) + continue + } + + clusterObj = clusterObjAny.(*unstructured.Unstructured) } // Resource should not exist in the named cluster @@ -496,7 +441,7 @@ func (s *SyncController) syncToClusters(ctx context.Context, fedResource Federat } if clusterObj.GetDeletionTimestamp() != nil { // Resource is marked for deletion - dispatcher.RecordStatus(clusterName, fedtypesv1a1.WaitingForRemoval) + dispatcher.RecordStatus(clusterName, fedcorev1a1.WaitingForRemoval) continue } if cluster.GetDeletionTimestamp() != nil && !util.IsCascadingDeleteEnabled(cluster) { @@ -505,7 +450,7 @@ func (s *SyncController) syncToClusters(ctx context.Context, fedResource Federat // This could happen right after a cluster is deleted: // the scheduler observes the cluster deletion and removes // the placement, while the sync controller's informer is - // lagging behind and sees a terminating cluster. + // lagging behind and observes a terminating cluster. continue } @@ -518,25 +463,25 @@ func (s *SyncController) syncToClusters(ctx context.Context, fedResource Federat if cluster.GetDeletionTimestamp() != nil { // if the cluster is terminating, we should not sync dispatcher.RecordClusterError( - fedtypesv1a1.ClusterTerminating, + fedcorev1a1.ClusterTerminating, clusterName, errors.New("Cluster terminating"), ) continue } - hasFinalizer, err := finalizersutil.HasFinalizer(cluster, s.cascadingDeleteFinalizer) + hasFinalizer, err := finalizersutil.HasFinalizer(cluster, FinalizerCascadingDelete) if err != nil { shouldRecheckAfterDispatch = true - dispatcher.RecordClusterError(fedtypesv1a1.FinalizerCheckFailed, clusterName, err) + dispatcher.RecordClusterError(fedcorev1a1.FinalizerCheckFailed, clusterName, err) continue } if !hasFinalizer { // we should not sync before finalizer is added shouldRecheckAfterDispatch = true dispatcher.RecordClusterError( - fedtypesv1a1.FinalizerCheckFailed, + fedcorev1a1.FinalizerCheckFailed, clusterName, - errors.Errorf("Missing cluster finalizer %s", s.cascadingDeleteFinalizer), + errors.Errorf("Missing cluster finalizer %s", FinalizerCascadingDelete), ) continue } @@ -557,19 +502,9 @@ func (s *SyncController) syncToClusters(ctx context.Context, fedResource Federat return worker.StatusError } - if dispatchOk { - err := s.updateSyncSuccessAnnotations(ctx, fedResource) - if err != nil { - if apierrors.IsConflict(err) { - return worker.StatusConflict - } - return worker.StatusError - } - } - // Write updated versions to the API. updatedVersionMap := dispatcher.VersionMap() - err = fedResource.UpdateVersions(selectedClusterNames.List(), updatedVersionMap) + err = fedResource.UpdateVersions(sets.List(selectedClusterNames), updatedVersionMap) if err != nil { // Versioning of federated resources is an optimization to // avoid unnecessary updates, and failure to record version @@ -581,8 +516,7 @@ func (s *SyncController) syncToClusters(ctx context.Context, fedResource Federat if reconcileStatus := s.setFederatedStatus( ctx, fedResource, - collisionCount, - fedtypesv1a1.AggregateSuccess, + fedcorev1a1.AggregateSuccess, &collectedStatus, ); reconcileStatus != worker.StatusAllOK { return reconcileStatus @@ -599,47 +533,11 @@ func (s *SyncController) syncToClusters(ctx context.Context, fedResource Federat return worker.StatusAllOK } -func (s *SyncController) updateSyncSuccessAnnotations(ctx context.Context, fedResource FederatedResource) error { - // Update SyncSuccessTimestamp annotation to federated resource. - obj := fedResource.Object() - annotations := obj.GetAnnotations() - generation := obj.GetGeneration() - updateAnnotation := true - federatedKeyLogger := klog.FromContext(ctx) - - if v, ok := annotations[annotationutil.LastSyncSuccessGeneration]; ok { - if strconv.FormatInt(generation, 10) == v { - updateAnnotation = false - } - } - - if updateAnnotation { - _, err := annotationutil.AddAnnotation( - obj, - annotationutil.LastSyncSuccessGeneration, - strconv.FormatInt(generation, 10), - ) - if err != nil { - return err - } - - syncSuccessTimestamp := metav1.Now().UTC().Format(time.RFC3339Nano) - _, err = annotationutil.AddAnnotation(obj, annotationutil.SyncSuccessTimestamp, syncSuccessTimestamp) - if err != nil { - return err - } - - err = s.hostClusterClient.Update(ctx, obj) - if err != nil { - federatedKeyLogger.Error(err, "Failed to update syncSuccessTimestamp annotation of federated object") - return err - } - } - return nil -} - -func (s *SyncController) setFederatedStatus(ctx context.Context, fedResource FederatedResource, collisionCount *int32, - reason fedtypesv1a1.AggregateReason, collectedStatus *status.CollectedPropagationStatus, +func (s *SyncController) setFederatedStatus( + ctx context.Context, + fedResource FederatedResource, + reason fedcorev1a1.FederatedObjectConditionReason, + collectedStatus *status.CollectedPropagationStatus, ) worker.Result { if collectedStatus == nil { collectedStatus = &status.CollectedPropagationStatus{} @@ -648,33 +546,22 @@ func (s *SyncController) setFederatedStatus(ctx context.Context, fedResource Fed obj := fedResource.Object() keyedLogger := klog.FromContext(ctx) - // Only a single reason for propagation failure is reported at any one time, so only report - // NamespaceNotFederated if no other explicit error has been indicated. - if reason == fedtypesv1a1.AggregateSuccess { - // For a cluster-scoped control plane, report when the containing namespace of a federated - // resource is not federated. The KubeAdmiral system namespace is implicitly federated in a - // namespace-scoped control plane. - if !s.limitedScope && fedResource.NamespaceNotFederated() { - reason = fedtypesv1a1.NamespaceNotFederated - } - } - // If the underlying resource has changed, attempt to retrieve and // update it repeatedly. - err := wait.PollImmediate(1*time.Second, 5*time.Second, func() (bool, error) { - if updateRequired, err := status.SetFederatedStatus(obj, collisionCount, reason, *collectedStatus); err != nil { - return false, errors.Wrapf(err, "failed to set the status") - } else if !updateRequired { + err := wait.PollImmediateWithContext(ctx, 1*time.Second, 5*time.Second, func(ctx context.Context) (bool, error) { + if updateRequired := status.SetFederatedStatus(obj, reason, *collectedStatus); !updateRequired { keyedLogger.V(4).Info("No status update necessary") return true, nil } - err := s.hostClusterClient.UpdateStatus(context.TODO(), obj) + var err error + obj, err = fedobjectadapters.UpdateStatus(ctx, s.fedClient.CoreV1alpha1(), obj, metav1.UpdateOptions{}) if err == nil { + fedResource.SetObject(obj) return true, nil } if apierrors.IsConflict(err) { - err := s.hostClusterClient.Get(context.TODO(), obj, obj.GetNamespace(), obj.GetName()) + obj, err = fedobjectadapters.Get(ctx, s.fedClient.CoreV1alpha1(), obj.GetNamespace(), obj.GetName(), metav1.GetOptions{}) if err != nil { return false, errors.Wrapf(err, "failed to retrieve resource") } @@ -687,95 +574,28 @@ func (s *SyncController) setFederatedStatus(ctx context.Context, fedResource Fed return worker.StatusError } - // UpdateStatus does not read the annotations, only the status field. - // Update reads the annotations, but it will bump the generation if status is changed. - // Therefore, we have to separate status update and annotation update into two separate calls. - - err = wait.PollImmediate(1*time.Second, 5*time.Second, func() (bool, error) { - changed := false - err := sourcefeedback.PopulateSyncingAnnotation(obj, collectedStatus.StatusMap, &changed) - if err != nil { - return false, err - } - - if !changed { - return true, nil - } - - err = s.hostClusterClient.Update(context.TODO(), obj) - if err == nil { - return true, nil - } - - if apierrors.IsConflict(err) { - err := s.hostClusterClient.Get(context.TODO(), obj, obj.GetNamespace(), obj.GetName()) - if err != nil { - return false, errors.Wrapf(err, "failed to retrieve resource") - } - return false, nil - } - return false, errors.Wrapf(err, "failed to update resource") - }) - if err != nil { - keyedLogger.Error(err, "Failed to update syncing annotation") - return worker.StatusError - } - return worker.StatusAllOK } func (s *SyncController) ensureDeletion(ctx context.Context, fedResource FederatedResource) worker.Result { fedResource.DeleteVersions() - key := fedResource.FederatedName().String() - kind := fedResource.FederatedKind() keyedLogger := klog.FromContext(ctx) - keyedLogger.V(2).Info("Ensuring deletion of federated object") obj := fedResource.Object() finalizers := sets.NewString(obj.GetFinalizers()...) if !finalizers.Has(FinalizerSyncController) { - keyedLogger.WithValues("finalizer-name", FinalizerSyncController). - V(3).Info("Federated object does not have the finalizer. Nothing to do") - return worker.StatusAllOK - } - - if util.GetOrphaningBehavior(obj) == util.OrphanManagedResourcesAll { - keyedLogger.WithValues("orphaning-behavior", util.OrphanManagedResourcesAll). - V(2).Info("Removing the finalizer") - err := s.deleteHistory(fedResource) - if err != nil { - keyedLogger.Error(err, "Failed to delete history for federated object") - return worker.StatusError - } - err = s.removeFinalizer(ctx, fedResource) - if err != nil { - if apierrors.IsConflict(err) { - return worker.StatusConflict - } - keyedLogger.WithValues("finalizer-name", FinalizerSyncController). - Error(err, "Failed to remove finalizer for federated object") - return worker.StatusError - } - keyedLogger.WithValues("label-name", managedlabel.ManagedByKubeAdmiralLabelKey). - V(2).Info("Removing managed label from resources previously managed by this federated object") - err = s.removeManagedLabel(ctx, fedResource.TargetGVK(), fedResource.TargetName()) - if err != nil { - keyedLogger.WithValues("label-name", managedlabel.ManagedByKubeAdmiralLabelKey). - Error(err, "Failed to remove the label from all resources previously managed by this federated object") - return worker.StatusError - } + keyedLogger.V(3).Info("Federated object does not have the finalizer. Nothing to do", "finalizer-name", FinalizerSyncController) return worker.StatusAllOK } keyedLogger.V(2).Info("Deleting resources managed by this federated object from member clusters") recheckRequired, err := s.deleteFromClusters(ctx, fedResource) if err != nil { - wrappedErr := errors.Wrapf(err, "failed to delete %s %q", kind, key) - fedResource.RecordError(string(fedtypesv1a1.EnsureDeletionFailed), wrappedErr) - keyedLogger.Error(err, "Failed to delete federated object") + fedResource.RecordError(string(fedcorev1a1.EnsureDeletionFailed), err) + keyedLogger.Error(err, "Failed to ensure deletion of member objects") return worker.StatusError } if recheckRequired { @@ -785,41 +605,12 @@ func (s *SyncController) ensureDeletion(ctx context.Context, fedResource Federat if apierrors.IsConflict(err) { return worker.StatusConflict } - keyedLogger.WithValues("finalizer-name", FinalizerSyncController). - Error(err, "Failed to remove finalizer from the federated object") + keyedLogger.Error(err, "Failed to remove finalizer from the federated object", "finalizer-name", FinalizerSyncController) return worker.StatusError } return worker.StatusAllOK } -// removeManagedLabel attempts to remove the managed label from -// resources with the given name in member clusters. -func (s *SyncController) removeManagedLabel( - ctx context.Context, - gvk schema.GroupVersionKind, - qualifiedName common.QualifiedName, -) error { - ok, err := s.handleDeletionInClusters( - ctx, - gvk, - qualifiedName, - func(dispatcher dispatch.UnmanagedDispatcher, clusterName string, clusterObj *unstructured.Unstructured) { - if clusterObj.GetDeletionTimestamp() != nil { - return - } - - dispatcher.RemoveManagedLabel(ctx, clusterName, clusterObj) - }, - ) - if err != nil { - return err - } - if !ok { - return errors.Errorf("failed to remove the label from resources in one or more clusters.") - } - return nil -} - func (s *SyncController) deleteFromCluster( ctx context.Context, dispatcher dispatch.UnmanagedDispatcher, @@ -835,9 +626,9 @@ func (s *SyncController) deleteFromCluster( keyedLogger := klog.FromContext(ctx) // Respect orphaning behavior - orphaningBehavior := util.GetOrphaningBehavior(fedResource.Object()) - shouldBeOrphaned := orphaningBehavior == util.OrphanManagedResourcesAll || - orphaningBehavior == util.OrphanManagedResourcesAdopted && util.HasAdoptedAnnotation(clusterObj) + orphaningBehavior := orphaning.GetOrphaningBehavior(fedResource.Object()) + shouldBeOrphaned := orphaningBehavior == orphaning.OrphanManagedResourcesAll || + orphaningBehavior == orphaning.OrphanManagedResourcesAdopted && adoption.HasAdoptedAnnotation(clusterObj) if shouldBeOrphaned { keyedLogger.WithValues("cluster-name", clusterName). V(2).Info("Cluster object is going to be orphaned") @@ -877,11 +668,8 @@ func (s *SyncController) deleteFromClusters(ctx context.Context, fedResource Fed if err != nil { return false, errors.Wrapf(err, "failed to verify that managed resources no longer exist in any cluster") } - // Managed resources no longer exist in any member cluster - if err := s.deleteHistory(fedResource); err != nil { - return false, err - } + // Managed resources no longer exist in any member cluster return false, nil } @@ -891,19 +679,19 @@ func (s *SyncController) deleteFromClusters(ctx context.Context, fedResource Fed // the informer to cover the possibility that the resources have not // yet been cached. func (s *SyncController) ensureRemovedOrUnmanaged(ctx context.Context, fedResource FederatedResource) error { - clusters, err := s.informer.GetJoinedClusters() + clusters, err := s.fedInformerManager.GetJoinedClusters() if err != nil { return errors.Wrap(err, "failed to get a list of clusters") } dispatcher := dispatch.NewCheckUnmanagedDispatcher( - s.informer.GetClientForCluster, - fedResource.TargetGVK(), + s.getClusterClient, + fedResource.TargetGVR(), fedResource.TargetName(), ) unreadyClusters := []string{} for _, cluster := range clusters { - if !util.IsClusterReady(&cluster.Status) { + if !clusterutil.IsClusterReady(&cluster.Status) { unreadyClusters = append(unreadyClusters, cluster.Name) continue } @@ -926,33 +714,40 @@ func (s *SyncController) ensureRemovedOrUnmanaged(ctx context.Context, fedResour // each managed resource in member clusters. func (s *SyncController) handleDeletionInClusters( ctx context.Context, - gvk schema.GroupVersionKind, - qualifiedName common.QualifiedName, + targetGVK schema.GroupVersionKind, + targetQualifiedName common.QualifiedName, deletionFunc func(dispatcher dispatch.UnmanagedDispatcher, clusterName string, clusterObj *unstructured.Unstructured), ) (bool, error) { - clusters, err := s.informer.GetJoinedClusters() + keyedLogger := klog.FromContext(ctx) + + clusters, err := s.fedInformerManager.GetJoinedClusters() if err != nil { - return false, errors.Wrap(err, "failed to get a list of clusters") + return false, fmt.Errorf("failed to get a list of clusters: %w", err) } - keyedLogger := klog.FromContext(ctx) - dispatcher := dispatch.NewUnmanagedDispatcher(s.informer.GetClientForCluster, gvk, qualifiedName) + ftc, exists := s.ftcManager.GetResourceFTC(targetGVK) + if !exists { + return false, fmt.Errorf("FTC does not exist") + } + + dispatcher := dispatch.NewUnmanagedDispatcher(s.getClusterClient, ftc.GetSourceTypeGVR(), targetQualifiedName) retrievalFailureClusters := []string{} unreadyClusters := []string{} for _, cluster := range clusters { clusterName := cluster.Name - if !util.IsClusterReady(&cluster.Status) { + if !clusterutil.IsClusterReady(&cluster.Status) { unreadyClusters = append(unreadyClusters, clusterName) continue } - clusterObj, _, err := util.GetClusterObject( - context.TODO(), - s.informer, + clusterObj, _, err := informermanager.GetClusterObject( + ctx, + s.ftcManager, + s.fedInformerManager, clusterName, - qualifiedName, - s.typeConfig.GetTargetType(), + targetQualifiedName, + targetGVK, ) if err != nil { keyedLogger.WithValues("cluster-name", clusterName). @@ -983,134 +778,110 @@ func (s *SyncController) handleDeletionInClusters( } func (s *SyncController) ensureFinalizer(ctx context.Context, fedResource FederatedResource) error { + ctx, keyedLogger := logging.InjectLoggerValues(ctx, "finalizer-name", FinalizerSyncController) + obj := fedResource.Object() isUpdated, err := finalizersutil.AddFinalizers(obj, sets.NewString(FinalizerSyncController)) - keyedLogger := klog.FromContext(ctx) if err != nil || !isUpdated { return err } - keyedLogger.WithValues("finalizer-name", FinalizerSyncController). - V(1).Info("Adding finalizer to federated object") - return s.hostClusterClient.Update(context.TODO(), obj) -} - -func (s *SyncController) ensureAnnotations( - ctx context.Context, - fedResource FederatedResource, - lastRevision, currentRevision string, -) error { - obj := fedResource.Object().DeepCopy() - updated := false - keyedLogger := klog.FromContext(ctx) - - // ensure last revision annotation - if len(lastRevision) != 0 { - revisionUpdated, err := annotationutil.AddAnnotation(obj, common.LastRevisionAnnotation, lastRevision) - if err != nil { - return err - } - updated = updated || revisionUpdated - } - // ensure current revision annotation - if len(currentRevision) != 0 { - revisionUpdated, err := annotationutil.AddAnnotation(obj, common.CurrentRevisionAnnotation, currentRevision) - if err != nil { - return err - } - updated = updated || revisionUpdated - } - - if !updated { - return nil - } - - keyedLogger.WithValues("last-revision-annotation-name", common.LastRevisionAnnotation, - "current-revision-annotation-name", common.CurrentRevisionAnnotation). - V(1).Info("Adding Latest Revision Annotation and Current Revision Annotation to federated object") - if err := s.hostClusterClient.Update(context.TODO(), obj); err != nil { + keyedLogger.V(1).Info("Adding finalizer to federated object") + updatedObj, err := fedobjectadapters.Update( + ctx, + s.fedClient.CoreV1alpha1(), + obj, + metav1.UpdateOptions{}, + ) + if err != nil { return err } + fedResource.SetObject(updatedObj) return nil } func (s *SyncController) removeFinalizer(ctx context.Context, fedResource FederatedResource) error { - keyedLogger := klog.FromContext(ctx) + ctx, keyedLogger := logging.InjectLoggerValues(ctx, "finalizer-name", FinalizerSyncController) + obj := fedResource.Object() isUpdated, err := finalizersutil.RemoveFinalizers(obj, sets.NewString(FinalizerSyncController)) if err != nil || !isUpdated { return err } - keyedLogger.WithValues("finalizer-name", FinalizerSyncController). - V(1).Info("Removing finalizer from federated object") - return s.hostClusterClient.Update(context.TODO(), obj) -} -func (s *SyncController) deleteHistory(fedResource FederatedResource) error { - return s.hostClusterClient.DeleteHistory(context.TODO(), fedResource.Object()) + keyedLogger.V(1).Info("Removing finalizer from federated object") + updatedObj, err := fedobjectadapters.Update( + ctx, + s.fedClient.CoreV1alpha1(), + obj, + metav1.UpdateOptions{}, + ) + if err != nil { + return err + } + + fedResource.SetObject(updatedObj) + return nil } func (s *SyncController) ensureClusterFinalizer(ctx context.Context, cluster *fedcorev1a1.FederatedCluster) error { - clusteredKeyedLogger := klog.FromContext(ctx) - clusteredKeyedLogger.WithValues("finalizer-name", s.cascadingDeleteFinalizer). - V(1).Info("Adding finalizer to cluster") + ctx, keyedLogger := logging.InjectLoggerValues(ctx, "finalizer-name", FinalizerCascadingDelete) + keyedLogger.V(1).Info("Adding finalizer to cluster") if err := retry.RetryOnConflict(retry.DefaultBackoff, func() error { - if err := s.hostClusterClient.Get(context.TODO(), cluster, cluster.Namespace, cluster.Name); err != nil { + var err error + cluster, err = s.fedClient.CoreV1alpha1().FederatedClusters().Get(ctx, cluster.Name, metav1.GetOptions{ResourceVersion: "0"}) + if err != nil { return err } - isUpdated, err := finalizersutil.AddFinalizers(cluster, sets.NewString(s.cascadingDeleteFinalizer)) + isUpdated, err := finalizersutil.AddFinalizers(cluster, sets.NewString(FinalizerCascadingDelete)) if err != nil || !isUpdated { return err } - return s.hostClusterClient.Update(context.TODO(), cluster) + cluster, err = s.fedClient.CoreV1alpha1().FederatedClusters().Update(ctx, cluster, metav1.UpdateOptions{}) + return err }); err != nil { - return errors.Wrapf( - err, - "failed to ensure finalizer %s from cluster %q", - s.cascadingDeleteFinalizer, - cluster.Name, - ) + keyedLogger.Error(err, "Failed to ensure cluster finalizer") + return err } return nil } func (s *SyncController) removeClusterFinalizer(ctx context.Context, cluster *fedcorev1a1.FederatedCluster) error { - keyedLogger := klog.FromContext(ctx) - keyedLogger.WithValues("finalizer-name", s.cascadingDeleteFinalizer). - V(1).Info("Removing finalizer from cluster") + ctx, keyedLogger := logging.InjectLoggerValues(ctx, "finalizer-name", FinalizerCascadingDelete) + keyedLogger.V(1).Info("Removing finalizer from cluster") if err := retry.RetryOnConflict(retry.DefaultBackoff, func() error { - if err := s.hostClusterClient.Get(context.TODO(), cluster, cluster.Namespace, cluster.Name); err != nil { + var err error + cluster, err = s.fedClient.CoreV1alpha1().FederatedClusters().Get(ctx, cluster.Name, metav1.GetOptions{ResourceVersion: "0"}) + if err != nil { return err } - isUpdated, err := finalizersutil.RemoveFinalizers(cluster, sets.NewString(s.cascadingDeleteFinalizer)) + isUpdated, err := finalizersutil.RemoveFinalizers(cluster, sets.NewString(FinalizerCascadingDelete)) if err != nil || !isUpdated { return err } - return s.hostClusterClient.Update(context.TODO(), cluster) + cluster, err = s.fedClient.CoreV1alpha1().FederatedClusters().Update(ctx, cluster, metav1.UpdateOptions{}) + return err }); err != nil { - return errors.Wrapf( - err, - "failed to remove finalizer %s from cluster %q", - s.cascadingDeleteFinalizer, - cluster.Name, - ) + keyedLogger.Error(err, "Failed to remove cluster finalizer") + return err } return nil } -func (s *SyncController) reconcileCluster(qualifiedName common.QualifiedName) worker.Result { - logger := s.logger.WithValues("cluster-name", qualifiedName.String()) - ctx := klog.NewContext(context.TODO(), logger) +func (s *SyncController) reconcileClusterForCascadingDeletion(ctx context.Context, qualifiedName common.QualifiedName) worker.Result { + logger := s.logger.WithValues("cluster-name", qualifiedName.String(), "process", "cluster-cascading-deletion") + ctx = klog.NewContext(ctx, logger) - cluster, found, err := s.informer.GetCluster(qualifiedName.Name) + clusterLister := s.fedInformerManager.GetFederatedClusterLister() + cluster, err := clusterLister.Get(qualifiedName.Name) + if apierrors.IsNotFound(err) { + return worker.StatusAllOK + } if err != nil { logger.Error(err, "Failed to get federated cluster") return worker.StatusError } - if !found { - return worker.StatusAllOK - } cluster = cluster.DeepCopy() if cluster.DeletionTimestamp == nil { @@ -1120,20 +891,18 @@ func (s *SyncController) reconcileCluster(qualifiedName common.QualifiedName) wo if apierrors.IsConflict(err) { return worker.StatusConflict } - logger.Error(err, "Failed to ensure cluster finalizer") return worker.StatusError } return worker.StatusAllOK } - if !util.IsClusterJoined(&cluster.Status) || !util.IsCascadingDeleteEnabled(cluster) { + if !clusterutil.IsClusterJoined(&cluster.Status) || !util.IsCascadingDeleteEnabled(cluster) { // cascading-delete is not required, remove cascading-delete finalizer immediately err := s.removeClusterFinalizer(ctx, cluster) if err != nil { if apierrors.IsConflict(err) { return worker.StatusConflict } - logger.Error(err, "Failed to remove cluster finalizer") return worker.StatusError } @@ -1141,49 +910,65 @@ func (s *SyncController) reconcileCluster(qualifiedName common.QualifiedName) wo } // cascading-delete is enabled, wait for member objects to be deleted - client, err := s.informer.GetClientForCluster(cluster.Name) + ftcLister := s.ftcManager.GetFederatedTypeConfigLister() + ftcs, err := ftcLister.List(labels.Everything()) if err != nil { - s.eventRecorder.Eventf( - cluster, - corev1.EventTypeWarning, - EventReasonWaitForCascadingDeleteError, - "unable to get cluster client: cluster is not available (check cluster conditions): %v", - err, - ) - logger.Error(err, "Failed to get cluster client") + logger.Error(err, "failed to get ftc lister") return worker.StatusError } - // we need to do an actual list because federated informer returns an empty list by default - // if the cluster is unavailable - targetType := s.typeConfig.GetTargetType() - objects := &unstructured.UnstructuredList{} - objects.SetGroupVersionKind(schemautil.APIResourceToGVK(&targetType)) - err = client.ListWithOptions( - context.TODO(), - objects, - runtimeclient.Limit(1), - runtimeclient.InNamespace(corev1.NamespaceAll), - runtimeclient.MatchingLabels{ - managedlabel.ManagedByKubeAdmiralLabelKey: managedlabel.ManagedByKubeAdmiralLabelValue, - }, - ) - if err == nil && len(objects.Items) > 0 { + remainingByGVK := make(map[string]string, len(ftcs)) + for _, ftc := range ftcs { + gvk := ftc.GetSourceTypeGVK().String() + resourceLister, hasSynced, exists := s.fedInformerManager.GetResourceLister(ftc.GetSourceTypeGVK(), cluster.Name) + if !exists { + continue + } + + // If cluster cache is synced, we check the store. + // Otherwise, we will have to issue a list request. + if hasSynced() { + objects, err := resourceLister.List(labels.Everything()) + if err != nil { + remainingByGVK[gvk] = fmt.Sprintf("Unknown (failed to list from cluster lister: %w)", err) + } else if len(objects) > 0 { + remainingByGVK[gvk] = strconv.Itoa(len(objects)) + } + } else { + client, exists := s.fedInformerManager.GetClusterClient(cluster.Name) + if !exists { + remainingByGVK[gvk] = "Unknown (cluster client does not exist)" + continue + } + + objects, err := client.Resource(ftc.GetSourceTypeGVR()).Namespace(corev1.NamespaceAll).List( + ctx, metav1.ListOptions{ + Limit: 1, + LabelSelector: labels.SelectorFromSet(labels.Set{ + managedlabel.ManagedByKubeAdmiralLabelKey: managedlabel.ManagedByKubeAdmiralLabelValue, + }).String(), + }, + ) + if err == nil && len(objects.Items) > 0 { + remainingByGVK[gvk] = strconv.Itoa(len(objects.Items)) + } else if err != nil && !meta.IsNoMatchError(err) { + remainingByGVK[gvk] = fmt.Sprintf("Unknown (failed to list from cluster: %v)", err) + } + } + } + + if len(remainingByGVK) > 0 { s.eventRecorder.Eventf( cluster, corev1.EventTypeNormal, EventReasonWaitForCascadingDelete, - "waiting for cascading delete of %s", - s.typeConfig.GetTargetType().Name, + "waiting for cascading delete: %v", + remainingByGVK, ) return worker.Result{RequeueAfter: &s.cascadingDeletionRecheckDelay} } - if err != nil && !meta.IsNoMatchError(err) { - logger.Error(err, "Failed to list target objects from cluster") - return worker.StatusError - } - // either all member objects are deleted or the resource does not exist, remove finalizer + // all member objects are deleted err = s.removeClusterFinalizer(ctx, cluster) if err != nil { logger.Error(err, "Failed to remove cluster finalizer") diff --git a/pkg/controllers/sync/dispatch/checkunmanaged.go b/pkg/controllers/sync/dispatch/checkunmanaged.go index b0a3a894..859c552d 100644 --- a/pkg/controllers/sync/dispatch/checkunmanaged.go +++ b/pkg/controllers/sync/dispatch/checkunmanaged.go @@ -26,14 +26,15 @@ import ( "fmt" apierrors "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/client-go/dynamic" "k8s.io/klog/v2" - "github.com/kubewharf/kubeadmiral/pkg/client/generic" "github.com/kubewharf/kubeadmiral/pkg/controllers/common" "github.com/kubewharf/kubeadmiral/pkg/controllers/util" - "github.com/kubewharf/kubeadmiral/pkg/controllers/util/managedlabel" + "github.com/kubewharf/kubeadmiral/pkg/util/managedlabel" ) type CheckUnmanagedDispatcher interface { @@ -45,19 +46,19 @@ type CheckUnmanagedDispatcher interface { type checkUnmanagedDispatcherImpl struct { dispatcher *operationDispatcherImpl - targetGVK schema.GroupVersionKind + targetGVR schema.GroupVersionResource targetName common.QualifiedName } func NewCheckUnmanagedDispatcher( clientAccessor clientAccessorFunc, - targetGVK schema.GroupVersionKind, + targetGVR schema.GroupVersionResource, targetName common.QualifiedName, ) CheckUnmanagedDispatcher { dispatcher := newOperationDispatcher(clientAccessor, nil) return &checkUnmanagedDispatcherImpl{ dispatcher: dispatcher, - targetGVK: targetGVK, + targetGVR: targetGVR, targetName: targetName, } } @@ -73,15 +74,17 @@ func (d *checkUnmanagedDispatcherImpl) CheckRemovedOrUnlabeled(ctx context.Conte d.dispatcher.incrementOperationsInitiated() const op = "check for deletion of resource or removal of managed label from" errLogMessage := fmt.Sprintf("Failed to %s target obj", op) - go d.dispatcher.clusterOperation(ctx, clusterName, op, func(client generic.Client) bool { + go d.dispatcher.clusterOperation(ctx, clusterName, op, func(client dynamic.Interface) bool { keyedLogger := klog.FromContext(ctx).WithValues("cluster-name", clusterName) targetName := d.targetNameForCluster(clusterName) keyedLogger.V(2).Info("Checking for deletion of resource or removal of managed label from target obj") - clusterObj := &unstructured.Unstructured{} - clusterObj.SetGroupVersionKind(d.targetGVK) - err := client.Get(context.Background(), clusterObj, targetName.Namespace, targetName.Name) - if apierrors.IsNotFound(err) { + clusterObj, err := client.Resource(d.targetGVR).Namespace(targetName.Namespace).Get( + ctx, + targetName.Name, + metav1.GetOptions{ResourceVersion: "0"}, + ) + if apierrors.IsNotFound(err) || meta.IsNoMatchError(err) { return true } if err != nil { @@ -104,7 +107,7 @@ func (d *checkUnmanagedDispatcherImpl) wrapOperationError(err error, clusterName return wrapOperationError( err, operation, - d.targetGVK.Kind, + d.targetGVR.String(), d.targetNameForCluster(clusterName).String(), clusterName, ) diff --git a/pkg/controllers/sync/dispatch/managed.go b/pkg/controllers/sync/dispatch/managed.go index a8dca909..c9b2a7fd 100644 --- a/pkg/controllers/sync/dispatch/managed.go +++ b/pkg/controllers/sync/dispatch/managed.go @@ -26,27 +26,24 @@ import ( "fmt" "strings" "sync" - "time" "github.com/pkg/errors" - appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/client-go/dynamic" "k8s.io/klog/v2" fedcorev1a1 "github.com/kubewharf/kubeadmiral/pkg/apis/core/v1alpha1" - fedtypesv1a1 "github.com/kubewharf/kubeadmiral/pkg/apis/types/v1alpha1" - "github.com/kubewharf/kubeadmiral/pkg/client/generic" "github.com/kubewharf/kubeadmiral/pkg/controllers/common" "github.com/kubewharf/kubeadmiral/pkg/controllers/sync/status" "github.com/kubewharf/kubeadmiral/pkg/controllers/util" - "github.com/kubewharf/kubeadmiral/pkg/controllers/util/annotation" - "github.com/kubewharf/kubeadmiral/pkg/controllers/util/managedlabel" - utilunstructured "github.com/kubewharf/kubeadmiral/pkg/controllers/util/unstructured" "github.com/kubewharf/kubeadmiral/pkg/stats" + "github.com/kubewharf/kubeadmiral/pkg/util/adoption" + "github.com/kubewharf/kubeadmiral/pkg/util/managedlabel" ) const IndexRolloutPlans = "federation_placement_rollout" @@ -55,24 +52,16 @@ const IndexRolloutPlans = "federation_placement_rollout" // interface required for dispatching operations to managed resources. type FederatedResourceForDispatch interface { TargetName() common.QualifiedName - TargetKind() string TargetGVK() schema.GroupVersionKind + TargetGVR() schema.GroupVersionResource TypeConfig() *fedcorev1a1.FederatedTypeConfig - // Replicas is the number of replicas specified in the federated object. - Replicas() (*int64, error) // Object returns the federated object. - Object() *unstructured.Unstructured + Object() fedcorev1a1.GenericFederatedObject VersionForCluster(clusterName string) (string, error) ObjectForCluster(clusterName string) (*unstructured.Unstructured, error) - ApplyOverrides( - obj *unstructured.Unstructured, - clusterName string, - otherOverrides fedtypesv1a1.OverridePatches, - ) error + ApplyOverrides(obj *unstructured.Unstructured, clusterName string) error RecordError(errorCode string, err error) RecordEvent(reason, messageFmt string, args ...interface{}) - ReplicasOverrideForCluster(clusterName string) (int32, bool, error) - TotalReplicas(clusterNames sets.String) (int32, error) } // ManagedDispatcher dispatches operations to member clusters for resources @@ -84,8 +73,8 @@ type ManagedDispatcher interface { Update(ctx context.Context, clusterName string, clusterObj *unstructured.Unstructured) VersionMap() map[string]string CollectedStatus() status.CollectedPropagationStatus - RecordClusterError(propStatus fedtypesv1a1.PropagationStatus, clusterName string, err error) - RecordStatus(clusterName string, propStatus fedtypesv1a1.PropagationStatus) + RecordClusterError(propStatus fedcorev1a1.PropagationStatusType, clusterName string, err error) + RecordStatus(clusterName string, propStatus fedcorev1a1.PropagationStatusType) } type managedDispatcherImpl struct { @@ -101,7 +90,6 @@ type managedDispatcherImpl struct { // Track when resource updates are performed to allow indicating // when a change was last propagated to member clusters. resourcesUpdated bool - rolloutPlans util.RolloutPlans metrics stats.Metrics } @@ -120,7 +108,7 @@ func NewManagedDispatcher( metrics: metrics, } d.dispatcher = newOperationDispatcher(clientAccessor, d) - d.unmanagedDispatcher = newUnmanagedDispatcher(d.dispatcher, d, fedResource.TargetGVK(), fedResource.TargetName()) + d.unmanagedDispatcher = newUnmanagedDispatcher(d.dispatcher, d, fedResource.TargetGVR(), fedResource.TargetName()) return d } @@ -136,18 +124,18 @@ func (d *managedDispatcherImpl) Wait() (bool, error) { defer d.RUnlock() // Transition timed out status for this set to ok. okTimedOut := sets.NewString( - string(fedtypesv1a1.CreationTimedOut), - string(fedtypesv1a1.UpdateTimedOut), + string(fedcorev1a1.CreationTimedOut), + string(fedcorev1a1.UpdateTimedOut), ) for key, value := range d.statusMap { propStatus := string(value) if okTimedOut.Has(propStatus) { - d.statusMap[key] = fedtypesv1a1.ClusterPropagationOK - } else if propStatus == string(fedtypesv1a1.DeletionTimedOut) { + d.statusMap[key] = fedcorev1a1.ClusterPropagationOK + } else if propStatus == string(fedcorev1a1.DeletionTimedOut) { // If deletion was successful, then assume the resource is // pending garbage collection. - d.statusMap[key] = fedtypesv1a1.WaitingForRemoval - } else if propStatus == string(fedtypesv1a1.LabelRemovalTimedOut) { + d.statusMap[key] = fedcorev1a1.WaitingForRemoval + } else if propStatus == string(fedcorev1a1.LabelRemovalTimedOut) { // If label removal was successful, the resource is // effectively unmanaged for the cluster even though it // still may be cached. @@ -157,193 +145,27 @@ func (d *managedDispatcherImpl) Wait() (bool, error) { return ok, nil } -// Deprecated: this method is not used and outdated, but should be kept to reintegrate rollout planner in the future -func (d *managedDispatcherImpl) Dispatch(ctx context.Context, targetGetter targetAccessorFunc, clusters []*fedcorev1a1.FederatedCluster, - selectedClusterNames sets.String, rolloutPlanEnabled bool, -) { - clusterObjs := make(map[string]*unstructured.Unstructured) - toDelete := sets.String{} - for _, cluster := range clusters { - clusterName := cluster.Name - selectedCluster := selectedClusterNames.Has(clusterName) - - if !util.IsClusterReady(&cluster.Status) { - if selectedCluster { - // Cluster state only needs to be reported in resource - // status for clusters selected for placement. - err := errors.New("Cluster not ready") - d.RecordClusterError(fedtypesv1a1.ClusterNotReady, clusterName, err) - } - continue - } - - clusterObj, err := targetGetter(clusterName) - if err != nil { - wrappedErr := errors.Wrap(err, "Failed to retrieve cached cluster object") - d.RecordClusterError(fedtypesv1a1.CachedRetrievalFailed, clusterName, wrappedErr) - continue - } - - // Resource should not exist in the named cluster - if !selectedCluster { - if clusterObj == nil { - // Resource does not exist in the cluster - continue - } - if clusterObj.GetDeletionTimestamp() != nil { - // Resource is marked for deletion - d.RecordStatus(clusterName, fedtypesv1a1.WaitingForRemoval) - continue - } - toDelete.Insert(clusterName) - } - - clusterObjs[clusterName] = clusterObj - } - - // skip rollout plan for hpa and daemonset - if rolloutPlanEnabled { - retain, err := checkRetainReplicas(d.fedResource.Object()) - if err != nil { - d.fedResource.RecordError("CheckRetainReplicasFailed", err) - return - } - if retain { - rolloutPlanEnabled = false - } - } - if rolloutPlanEnabled { - clusterPlans, err := d.planRolloutProcess(ctx, clusterObjs, selectedClusterNames, toDelete) - if err != nil { - d.fedResource.RecordError(string(fedtypesv1a1.PlanRolloutFailed), err) - } - for clusterName, clusterObj := range clusterObjs { - var planned bool - var plan util.RolloutPlan - if clusterPlans != nil { - if p, ok := clusterPlans[clusterName]; ok && p != nil { - planned = true - plan = *p - } - } - - if !planned { - if clusterObj != nil { - // dispatch without updating template - d.PatchAndKeepTemplate(ctx, clusterName, clusterObj, true) - } - continue - } - if toDelete.Has(clusterName) && (plan.Replicas == nil || plan.Replicas != nil && *plan.Replicas == 0) { - d.Delete(ctx, clusterName, clusterObj) - continue - } - if clusterObj == nil { - d.Create(ctx, clusterName) - continue - } - if plan.OnlyPatchReplicas && plan.Replicas != nil { - d.PatchAndKeepTemplate(ctx, clusterName, clusterObj, false) - continue - } - d.Update(ctx, clusterName, clusterObj) - } - return - } - - d.emitRolloutStatus(ctx, clusterObjs, selectedClusterNames, nil) - for clusterName, clusterObj := range clusterObjs { - if toDelete.Has(clusterName) { - d.Delete(ctx, clusterName, clusterObj) - continue - } - - // TODO Consider waiting until the result of resource - // creation has reached the target store before attempting - // subsequent operations. Otherwise the object won't be found - // but an add operation will fail with AlreadyExists. - if clusterObj == nil { - d.Create(ctx, clusterName) - } else { - d.Update(ctx, clusterName, clusterObj) - } - } -} - -func (d *managedDispatcherImpl) planRolloutProcess(ctx context.Context, clusterObjs map[string]*unstructured.Unstructured, - selectedClusterNames, toDelete sets.String, -) (util.RolloutPlans, error) { - var ( - r = d.fedResource - key = r.TargetName().String() - gvk = r.TargetGVK() - planner *util.RolloutPlanner - plans util.RolloutPlans - replicas int32 - err error - logger = klog.FromContext(ctx) - ) - - defer func() { - if err != nil { - logger.Error(err, "Failed to generate rollout plans") - } else { - logger.WithValues("plans", plans, "current-status", planner).V(4).Info("Generating rollout plans") - } - SendRolloutPlansToES(planner, plans, r.Object(), err) - d.emitRolloutStatus(ctx, clusterObjs, selectedClusterNames, planner) - }() - - if gvk != appsv1.SchemeGroupVersion.WithKind(common.DeploymentKind) { - err = errors.Errorf("Unsupported target type for rollout plan: %s", gvk) - return nil, err - } - if replicas, err = r.TotalReplicas(selectedClusterNames); err != nil { - return nil, err - } - if planner, err = util.NewRolloutPlanner(key, r.TypeConfig(), r.Object(), replicas); err != nil { - return nil, err - } - for clusterName, clusterObj := range clusterObjs { - var desiredReplicas int32 - if !toDelete.Has(clusterName) { - var dr int32 - if dr, _, err = r.ReplicasOverrideForCluster(clusterName); err != nil { - return nil, err - } - desiredReplicas = dr - } - if err = planner.RegisterTarget(clusterName, clusterObj, desiredReplicas); err != nil { - err = errors.Wrap(err, "Failed to register target in "+clusterName) - return nil, err - } - } - plans = planner.Plan() - d.rolloutPlans = plans - return plans, nil -} - func (d *managedDispatcherImpl) Create(ctx context.Context, clusterName string) { // Default the status to an operation-specific timeout. Otherwise // when a timeout occurs it won't be possible to determine which // operation timed out. The timeout status will be cleared by // Wait() if a timeout does not occur. - d.RecordStatus(clusterName, fedtypesv1a1.CreationTimedOut) + d.RecordStatus(clusterName, fedcorev1a1.CreationTimedOut) d.dispatcher.incrementOperationsInitiated() const op = "create" - go d.dispatcher.clusterOperation(ctx, clusterName, op, func(client generic.Client) bool { + go d.dispatcher.clusterOperation(ctx, clusterName, op, func(client dynamic.Interface) bool { keyedLogger := klog.FromContext(ctx).WithValues("cluster-name", clusterName) d.recordEvent(clusterName, op, "Creating") obj, err := d.fedResource.ObjectForCluster(clusterName) if err != nil { - return d.recordOperationError(ctx, fedtypesv1a1.ComputeResourceFailed, clusterName, op, err) + return d.recordOperationError(ctx, fedcorev1a1.ComputeResourceFailed, clusterName, op, err) } - err = d.fedResource.ApplyOverrides(obj, clusterName, d.rolloutOverrides(clusterName)) + err = d.fedResource.ApplyOverrides(obj, clusterName) if err != nil { - return d.recordOperationError(ctx, fedtypesv1a1.ApplyOverridesFailed, clusterName, op, err) + return d.recordOperationError(ctx, fedcorev1a1.ApplyOverridesFailed, clusterName, op, err) } recordPropagatedLabelsAndAnnotations(obj) @@ -352,7 +174,9 @@ func (d *managedDispatcherImpl) Create(ctx context.Context, clusterName string) defer cancel() keyedLogger.V(1).Info("Creating target object in cluster") - err = client.Create(ctx, obj) + obj, err = client.Resource(d.fedResource.TargetGVR()).Namespace(obj.GetNamespace()).Create( + ctx, obj, metav1.CreateOptions{}, + ) if err == nil { version := util.ObjectVersion(obj) d.recordVersion(clusterName, version) @@ -364,21 +188,23 @@ func (d *managedDispatcherImpl) Create(ctx context.Context, clusterName string) alreadyExists := apierrors.IsAlreadyExists(err) || d.fedResource.TargetGVK() == corev1.SchemeGroupVersion.WithKind(common.NamespaceKind) && apierrors.IsServerTimeout(err) if !alreadyExists { - return d.recordOperationError(ctx, fedtypesv1a1.CreationFailed, clusterName, op, err) + return d.recordOperationError(ctx, fedcorev1a1.CreationFailed, clusterName, op, err) } // Attempt to update the existing resource to ensure that it // is labeled as a managed resource. - err = client.Get(ctx, obj, obj.GetNamespace(), obj.GetName()) + obj, err = client.Resource(d.fedResource.TargetGVR()).Namespace(obj.GetNamespace()).Get( + ctx, obj.GetName(), metav1.GetOptions{}, + ) if err != nil { wrappedErr := errors.Wrapf(err, "failed to retrieve object potentially requiring adoption") - return d.recordOperationError(ctx, fedtypesv1a1.RetrievalFailed, clusterName, op, wrappedErr) + return d.recordOperationError(ctx, fedcorev1a1.RetrievalFailed, clusterName, op, wrappedErr) } if d.skipAdoptingResources { return d.recordOperationError( ctx, - fedtypesv1a1.AlreadyExists, + fedcorev1a1.AlreadyExists, clusterName, op, errors.Errorf("Resource pre-exist in cluster"), @@ -393,7 +219,7 @@ func (d *managedDispatcherImpl) Create(ctx context.Context, clusterName string) ) if !managedlabel.HasManagedLabel(obj) { // If the object was not managed by us, mark it as adopted. - annotation.AddAnnotation(obj, util.AdoptedAnnotation, common.AnnotationValueTrue) + adoption.AddAdoptedAnnotation(obj) } d.Update(ctx, clusterName, obj) return true @@ -401,11 +227,11 @@ func (d *managedDispatcherImpl) Create(ctx context.Context, clusterName string) } func (d *managedDispatcherImpl) Update(ctx context.Context, clusterName string, clusterObj *unstructured.Unstructured) { - d.RecordStatus(clusterName, fedtypesv1a1.UpdateTimedOut) + d.RecordStatus(clusterName, fedcorev1a1.UpdateTimedOut) d.dispatcher.incrementOperationsInitiated() const op = "update" - go d.dispatcher.clusterOperation(ctx, clusterName, op, func(client generic.Client) bool { + go d.dispatcher.clusterOperation(ctx, clusterName, op, func(client dynamic.Interface) bool { keyedLogger := klog.FromContext(ctx).WithValues("cluster-name", clusterName) if managedlabel.IsExplicitlyUnmanaged(clusterObj) { err := errors.Errorf( @@ -413,44 +239,36 @@ func (d *managedDispatcherImpl) Update(ctx context.Context, clusterName string, managedlabel.ManagedByKubeAdmiralLabelKey, managedlabel.UnmanagedByKubeAdmiralLabelValue, ) - return d.recordOperationError(ctx, fedtypesv1a1.ManagedLabelFalse, clusterName, op, err) + return d.recordOperationError(ctx, fedcorev1a1.ManagedLabelFalse, clusterName, op, err) } obj, err := d.fedResource.ObjectForCluster(clusterName) if err != nil { - return d.recordOperationError(ctx, fedtypesv1a1.ComputeResourceFailed, clusterName, op, err) + return d.recordOperationError(ctx, fedcorev1a1.ComputeResourceFailed, clusterName, op, err) } - err = d.fedResource.ApplyOverrides(obj, clusterName, d.rolloutOverrides(clusterName)) + err = d.fedResource.ApplyOverrides(obj, clusterName) if err != nil { - return d.recordOperationError(ctx, fedtypesv1a1.ApplyOverridesFailed, clusterName, op, err) + return d.recordOperationError(ctx, fedcorev1a1.ApplyOverridesFailed, clusterName, op, err) } recordPropagatedLabelsAndAnnotations(obj) - err = RetainOrMergeClusterFields(d.fedResource.TargetGVK(), obj, clusterObj, d.fedResource.Object()) + err = RetainOrMergeClusterFields(d.fedResource.TargetGVK(), obj, clusterObj) if err != nil { wrappedErr := errors.Wrapf(err, "failed to retain fields") - return d.recordOperationError(ctx, fedtypesv1a1.FieldRetentionFailed, clusterName, op, wrappedErr) + return d.recordOperationError(ctx, fedcorev1a1.FieldRetentionFailed, clusterName, op, wrappedErr) } err = retainReplicas(obj, clusterObj, d.fedResource.Object(), d.fedResource.TypeConfig()) if err != nil { wrappedErr := errors.Wrapf(err, "failed to retain replicas") - return d.recordOperationError(ctx, fedtypesv1a1.FieldRetentionFailed, clusterName, op, wrappedErr) - } - - if d.fedResource.TargetGVK() == appsv1.SchemeGroupVersion.WithKind(common.DeploymentKind) { - err = setLastReplicasetName(obj, clusterObj) - if err != nil { - wrappedErr := errors.Wrapf(err, "failed to set last replicaset name") - return d.recordOperationError(ctx, fedtypesv1a1.SetLastReplicasetNameFailed, clusterName, op, wrappedErr) - } + return d.recordOperationError(ctx, fedcorev1a1.FieldRetentionFailed, clusterName, op, wrappedErr) } version, err := d.fedResource.VersionForCluster(clusterName) if err != nil { - return d.recordOperationError(ctx, fedtypesv1a1.VersionRetrievalFailed, clusterName, op, err) + return d.recordOperationError(ctx, fedcorev1a1.VersionRetrievalFailed, clusterName, op, err) } if !util.ObjectNeedsUpdate(obj, clusterObj, version, d.fedResource.TypeConfig()) { @@ -464,9 +282,11 @@ func (d *managedDispatcherImpl) Update(ctx context.Context, clusterName string, d.recordEvent(clusterName, op, "Updating") keyedLogger.V(1).Info("Updating target object in cluster") - err = client.Update(ctx, obj) + obj, err = client.Resource(d.fedResource.TargetGVR()).Namespace(obj.GetNamespace()).Update( + ctx, obj, metav1.UpdateOptions{}, + ) if err != nil { - return d.recordOperationError(ctx, fedtypesv1a1.UpdateFailed, clusterName, op, err) + return d.recordOperationError(ctx, fedcorev1a1.UpdateFailed, clusterName, op, err) } d.setResourcesUpdated() version = util.ObjectVersion(obj) @@ -476,102 +296,19 @@ func (d *managedDispatcherImpl) Update(ctx context.Context, clusterName string, } func (d *managedDispatcherImpl) Delete(ctx context.Context, clusterName string, clusterObj *unstructured.Unstructured) { - d.RecordStatus(clusterName, fedtypesv1a1.DeletionTimedOut) + d.RecordStatus(clusterName, fedcorev1a1.DeletionTimedOut) d.unmanagedDispatcher.Delete(ctx, clusterName, clusterObj) } -func (d *managedDispatcherImpl) PatchAndKeepTemplate( - ctx context.Context, - clusterName string, - clusterObj *unstructured.Unstructured, - keepRolloutSettings bool, -) { - d.RecordStatus(clusterName, fedtypesv1a1.UpdateTimedOut) - - d.dispatcher.incrementOperationsInitiated() - const op = "update" - go d.dispatcher.clusterOperation(ctx, clusterName, op, func(client generic.Client) bool { - keyedLogger := klog.FromContext(ctx).WithValues("cluster-name", clusterName) - if managedlabel.IsExplicitlyUnmanaged(clusterObj) { - err := errors.Errorf( - "Unable to manage the object which has label %s: %s", - managedlabel.ManagedByKubeAdmiralLabelKey, - managedlabel.UnmanagedByKubeAdmiralLabelValue, - ) - return d.recordOperationError(ctx, fedtypesv1a1.ManagedLabelFalse, clusterName, op, err) - } - - obj, err := d.fedResource.ObjectForCluster(clusterName) - if err != nil { - return d.recordOperationError(ctx, fedtypesv1a1.ComputeResourceFailed, clusterName, op, err) - } - - err = d.fedResource.ApplyOverrides(obj, clusterName, d.rolloutOverrides(clusterName)) - if err != nil { - return d.recordOperationError(ctx, fedtypesv1a1.ApplyOverridesFailed, clusterName, op, err) - } - - recordPropagatedLabelsAndAnnotations(obj) - - err = RetainOrMergeClusterFields(d.fedResource.TargetGVK(), obj, clusterObj, d.fedResource.Object()) - if err != nil { - wrappedErr := errors.Wrapf(err, "failed to retain fields") - return d.recordOperationError(ctx, fedtypesv1a1.FieldRetentionFailed, clusterName, op, wrappedErr) - } - - err = retainReplicas(obj, clusterObj, d.fedResource.Object(), d.fedResource.TypeConfig()) - if err != nil { - wrappedErr := errors.Wrapf(err, "failed to retain replicas") - return d.recordOperationError(ctx, fedtypesv1a1.FieldRetentionFailed, clusterName, op, wrappedErr) - } - - if d.fedResource.TargetGVK() == appsv1.SchemeGroupVersion.WithKind(common.DeploymentKind) { - if err = retainTemplate(obj, clusterObj, d.fedResource.TypeConfig(), keepRolloutSettings); err != nil { - wrappedErr := errors.Wrapf(err, "failed to retain template") - return d.recordOperationError(ctx, fedtypesv1a1.FieldRetentionFailed, clusterName, op, wrappedErr) - } - if err = setLastReplicasetName(obj, clusterObj); err != nil { - wrappedErr := errors.Wrapf(err, "failed to set last replicaset name") - return d.recordOperationError(ctx, fedtypesv1a1.SetLastReplicasetNameFailed, clusterName, op, wrappedErr) - } - } - - version, err := d.fedResource.VersionForCluster(clusterName) - if err != nil { - return d.recordOperationError(ctx, fedtypesv1a1.VersionRetrievalFailed, clusterName, op, err) - } - - if !util.ObjectNeedsUpdate(obj, clusterObj, version, d.fedResource.TypeConfig()) { - // Resource is current, we still record version in dispatcher - // so that federated status can be set with cluster resource generation - d.recordVersion(clusterName, version) - return true - } - - // Only record an event if the resource is not current - d.recordEvent(clusterName, op, "Updating") - - keyedLogger.V(1).Info("Patching and keeping template for target object in cluster") - err = client.Update(ctx, obj) - if err != nil { - return d.recordOperationError(ctx, fedtypesv1a1.UpdateFailed, clusterName, op, err) - } - d.setResourcesUpdated() - version = util.ObjectVersion(obj) - d.recordVersion(clusterName, version) - return true - }) -} - func (d *managedDispatcherImpl) RemoveManagedLabel(ctx context.Context, clusterName string, clusterObj *unstructured.Unstructured) { - d.RecordStatus(clusterName, fedtypesv1a1.LabelRemovalTimedOut) + d.RecordStatus(clusterName, fedcorev1a1.LabelRemovalTimedOut) d.unmanagedDispatcher.RemoveManagedLabel(ctx, clusterName, clusterObj) } func (d *managedDispatcherImpl) RecordClusterError( - propStatus fedtypesv1a1.PropagationStatus, + propStatus fedcorev1a1.PropagationStatusType, clusterName string, err error, ) { @@ -579,7 +316,7 @@ func (d *managedDispatcherImpl) RecordClusterError( d.RecordStatus(clusterName, propStatus) } -func (d *managedDispatcherImpl) RecordStatus(clusterName string, propStatus fedtypesv1a1.PropagationStatus) { +func (d *managedDispatcherImpl) RecordStatus(clusterName string, propStatus fedcorev1a1.PropagationStatusType) { d.Lock() defer d.Unlock() d.statusMap[clusterName] = propStatus @@ -587,7 +324,7 @@ func (d *managedDispatcherImpl) RecordStatus(clusterName string, propStatus fedt func (d *managedDispatcherImpl) recordOperationError( ctx context.Context, - propStatus fedtypesv1a1.PropagationStatus, + propStatus fedcorev1a1.PropagationStatusType, clusterName, operation string, err error, ) bool { @@ -598,7 +335,7 @@ func (d *managedDispatcherImpl) recordOperationError( func (d *managedDispatcherImpl) recordError(ctx context.Context, clusterName, operation string, err error) { targetName := d.unmanagedDispatcher.targetNameForCluster(clusterName) - args := []interface{}{operation, d.fedResource.TargetKind(), targetName, clusterName} + args := []interface{}{operation, d.fedResource.TargetGVR().String(), targetName, clusterName} eventType := fmt.Sprintf("%sInClusterFailed", strings.Replace(strings.Title(operation), " ", "", -1)) eventErr := errors.Wrapf(err, "Failed to "+eventTemplate, args...) logger := klog.FromContext(ctx) @@ -613,7 +350,7 @@ func (d *managedDispatcherImpl) recordError(ctx context.Context, clusterName, op func (d *managedDispatcherImpl) recordEvent(clusterName, operation, operationContinuous string) { targetName := d.unmanagedDispatcher.targetNameForCluster(clusterName) - args := []interface{}{operationContinuous, d.fedResource.TargetKind(), targetName, clusterName} + args := []interface{}{operationContinuous, d.fedResource.TargetGVR().String(), targetName, clusterName} eventType := fmt.Sprintf("%sInCluster", strings.Replace(strings.Title(operation), " ", "", -1)) d.fedResource.RecordEvent(eventType, eventTemplate, args...) } @@ -653,149 +390,3 @@ func (d *managedDispatcherImpl) CollectedStatus() status.CollectedPropagationSta ResourcesUpdated: d.resourcesUpdated, } } - -func (d *managedDispatcherImpl) rolloutOverrides(clusterName string) fedtypesv1a1.OverridePatches { - if d.rolloutPlans == nil { - return fedtypesv1a1.OverridePatches{} - } - return d.rolloutPlans.GetRolloutOverrides(clusterName) -} - -// emitRolloutStatus temporarily emit status metrics during rollout for observation -func (d *managedDispatcherImpl) emitRolloutStatus( - ctx context.Context, - clusterObjs map[string]*unstructured.Unstructured, - selectedClusterNames sets.String, - planner *util.RolloutPlanner, -) { - r := d.fedResource - deployName := r.TargetName().Name - if r.TargetGVK() != appsv1.SchemeGroupVersion.WithKind(common.DeploymentKind) { - return - } - - fedClusterName := "fed" - fedTags := []stats.Tag{ - {Name: "dp", Value: deployName}, - {Name: "cluster", Value: fedClusterName}, - {Name: "ismember", Value: "false"}, - } - logger := klog.FromContext(ctx) - - // settings - if planner == nil { - replicas, err := r.TotalReplicas(selectedClusterNames) - if err != nil { - logger.Error(err, "Skip rollout metrics: failed to get replicas") - return - } - pathPrefix := []string{common.SpecField, common.TemplateField} - maxSurgePath := append(pathPrefix, util.MaxSurgePathSlice...) - maxUnavailablePath := append(pathPrefix, util.MaxUnavailablePathSlice...) - maxSurge, maxUnavailable, err := util.RetrieveFencepost(r.Object(), maxSurgePath, maxUnavailablePath, replicas) - if err != nil { - logger.Error(err, "Skip rollout metrics: failed to get maxSurge and maxUnavailable") - return - } - _ = d.metrics.Store("sync.rollout.maxSurge", maxSurge, fedTags...) - _ = d.metrics.Store("sync.rollout.maxUnavailable", maxUnavailable, fedTags...) - _ = d.metrics.Store("sync.rollout.minAvailable", replicas-maxUnavailable, fedTags...) - } else { - _ = d.metrics.Store("sync.rollout.maxSurge", planner.MaxSurge, fedTags...) - _ = d.metrics.Store("sync.rollout.maxUnavailable", planner.MaxUnavailable, fedTags...) - _ = d.metrics.Store("sync.rollout.minAvailable", planner.Replicas-planner.MaxUnavailable, fedTags...) - - for _, t := range planner.Targets { - clusterName := t.ClusterName - tags := []stats.Tag{{Name: "dp", Value: deployName}, {Name: "cluster", Value: clusterName}, {Name: "ismember", Value: "true"}} - _ = d.metrics.Store("sync.rollout.maxSurge", t.Status.MaxSurge, tags...) - _ = d.metrics.Store("sync.rollout.maxUnavailable", t.Status.MaxUnavailable, tags...) - } - } - - // status - var unavailable, surge, available int64 - for clusterName, clusterObj := range clusterObjs { - if clusterObj == nil { - continue - } - tags := []stats.Tag{ - {Name: "dp", Value: deployName}, - {Name: "cluster", Value: clusterName}, - {Name: "ismember", Value: "true"}, - } - if u, ok, err := unstructured.NestedInt64(clusterObj.Object, "status", "unavailableReplicas"); err == nil && - ok { - _ = d.metrics.Store("sync.rollout.unavailable", u, tags...) - unavailable += u - } - if r, ok, err := unstructured.NestedInt64(clusterObj.Object, "status", "replicas"); err == nil && ok { - if r0, err := utilunstructured.GetInt64FromPath( - clusterObj, - d.fedResource.TypeConfig().Spec.PathDefinition.ReplicasSpec, - nil, - ); err == nil && r0 != nil { - s := r - *r0 - _ = d.metrics.Store("sync.rollout.surge", s, tags...) - surge += s - } - } - if a, ok, err := unstructured.NestedInt64(clusterObj.Object, "status", "availableReplicas"); err == nil && ok { - _ = d.metrics.Store("sync.rollout.available", a, tags...) - available += a - } - } - _ = d.metrics.Store("sync.rollout.surge", surge, fedTags...) - _ = d.metrics.Store("sync.rollout.unavailable", unavailable, fedTags...) - _ = d.metrics.Store("sync.rollout.available", available, fedTags...) -} - -func SendRolloutPlansToES( - planner *util.RolloutPlanner, - plans util.RolloutPlans, - fedResource *unstructured.Unstructured, - err error, -) { - data := struct { - Date time.Time - Name string - Namespace string - Kind string - Replicas int32 - MaxSurge int32 - MaxUnavailable int32 - Revision string - CurrentStatus string - Result util.RolloutPlans - ResultStr string - Error string - }{ - Date: time.Now(), - Name: fedResource.GetName(), - Namespace: fedResource.GetNamespace(), - Kind: fedResource.GetKind(), - Result: plans, - } - if err != nil { - data.Error = err.Error() - } else { - if planner != nil { - var s []string - for _, t := range planner.Targets { - s = append(s, t.String()) - } - data.Replicas = planner.Replicas - data.MaxSurge = planner.MaxSurge - data.MaxUnavailable = planner.MaxUnavailable - data.Revision = planner.Revision - data.CurrentStatus = strings.Join(s, "\n") - } - if len(plans) > 0 { - var s []string - for c, p := range plans { - s = append(s, c+":"+p.String()) - } - data.ResultStr = strings.Join(s, "\n") - } - } -} diff --git a/pkg/controllers/sync/dispatch/operation.go b/pkg/controllers/sync/dispatch/operation.go index 5109fca7..42a1174a 100644 --- a/pkg/controllers/sync/dispatch/operation.go +++ b/pkg/controllers/sync/dispatch/operation.go @@ -28,20 +28,20 @@ import ( "github.com/pkg/errors" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/client-go/dynamic" "k8s.io/klog/v2" - fedtypesv1a1 "github.com/kubewharf/kubeadmiral/pkg/apis/types/v1alpha1" - "github.com/kubewharf/kubeadmiral/pkg/client/generic" + fedcorev1a1 "github.com/kubewharf/kubeadmiral/pkg/apis/core/v1alpha1" ) type ( - clientAccessorFunc func(clusterName string) (generic.Client, error) + clientAccessorFunc func(clusterName string) (dynamic.Interface, error) targetAccessorFunc func(clusterName string) (*unstructured.Unstructured, error) ) type dispatchRecorder interface { recordEvent(clusterName, operation, operationContinuous string) - recordOperationError(ctx context.Context, status fedtypesv1a1.PropagationStatus, clusterName, operation string, err error) bool + recordOperationError(ctx context.Context, status fedcorev1a1.PropagationStatusType, clusterName, operation string, err error) bool } // OperationDispatcher provides an interface to wait for operations @@ -100,7 +100,7 @@ func (d *operationDispatcherImpl) Wait() (bool, error) { return ok, nil } -func (d *operationDispatcherImpl) clusterOperation(ctx context.Context, clusterName, op string, opFunc func(generic.Client) bool) { +func (d *operationDispatcherImpl) clusterOperation(ctx context.Context, clusterName, op string, opFunc func(dynamic.Interface) bool) { // TODO Support cancellation of client calls on timeout. client, err := d.clientAccessor(clusterName) logger := klog.FromContext(ctx) @@ -109,7 +109,7 @@ func (d *operationDispatcherImpl) clusterOperation(ctx context.Context, clusterN if d.recorder == nil { logger.Error(wrappedErr, "Failed to retrieve client for cluster") } else { - d.recorder.recordOperationError(ctx, fedtypesv1a1.ClientRetrievalFailed, clusterName, op, wrappedErr) + d.recorder.recordOperationError(ctx, fedcorev1a1.ClientRetrievalFailed, clusterName, op, wrappedErr) } d.resultChan <- false return diff --git a/pkg/controllers/sync/dispatch/retain.go b/pkg/controllers/sync/dispatch/retain.go index 4a39456e..83b95dee 100644 --- a/pkg/controllers/sync/dispatch/retain.go +++ b/pkg/controllers/sync/dispatch/retain.go @@ -33,8 +33,6 @@ import ( fedcorev1a1 "github.com/kubewharf/kubeadmiral/pkg/apis/core/v1alpha1" "github.com/kubewharf/kubeadmiral/pkg/controllers/common" - "github.com/kubewharf/kubeadmiral/pkg/controllers/util" - annotationutil "github.com/kubewharf/kubeadmiral/pkg/controllers/util/annotation" schemautil "github.com/kubewharf/kubeadmiral/pkg/controllers/util/schema" utilunstructured "github.com/kubewharf/kubeadmiral/pkg/controllers/util/unstructured" ) @@ -50,7 +48,7 @@ const ( // from the cluster object. func RetainOrMergeClusterFields( targetGvk schema.GroupVersionKind, - desiredObj, clusterObj, fedObj *unstructured.Unstructured, + desiredObj, clusterObj *unstructured.Unstructured, ) error { // Pass the same ResourceVersion as in the cluster object for update operation, otherwise operation will fail. desiredObj.SetResourceVersion(clusterObj.GetResourceVersion()) @@ -551,71 +549,6 @@ func retainReplicas(desiredObj, clusterObj *unstructured.Unstructured, fedObj me return nil } -func setLastReplicasetName(desiredObj, clusterObj *unstructured.Unstructured) error { - if clusterObj == nil { - return nil - } - revision, ok := desiredObj.GetAnnotations()[common.CurrentRevisionAnnotation] - if !ok { - return nil - } - lastDispatchedRevision, ok := clusterObj.GetAnnotations()[common.CurrentRevisionAnnotation] - if ok && revision != lastDispatchedRevision { - // update LastReplicasetName only when the revision must have been changed - rsName, ok := clusterObj.GetAnnotations()[util.LatestReplicasetNameAnnotation] - if !ok { - // don't block the dispatch if the annotation is missing, validate the existence during plan initiation - return nil - } - if _, err := annotationutil.AddAnnotation(desiredObj, common.LastReplicasetName, rsName); err != nil { - return err - } - } - return nil -} - -func retainTemplate( - desiredObj, clusterObj *unstructured.Unstructured, - typeConfig *fedcorev1a1.FederatedTypeConfig, - keepRolloutSettings bool, -) error { - tpl, ok, err := unstructured.NestedMap(clusterObj.Object, common.SpecField, common.TemplateField) - if err != nil { - return err - } - if ok { - if err := unstructured.SetNestedMap(desiredObj.Object, tpl, common.SpecField, common.TemplateField); err != nil { - return err - } - } else { - unstructured.RemoveNestedField(desiredObj.Object, common.SpecField, common.TemplateField) - } - - revision, ok := clusterObj.GetAnnotations()[common.CurrentRevisionAnnotation] - if ok { - if _, err := annotationutil.AddAnnotation(desiredObj, common.CurrentRevisionAnnotation, revision); err != nil { - return err - } - } else { - if _, err := annotationutil.RemoveAnnotation(desiredObj, common.CurrentRevisionAnnotation); err != nil { - return err - } - } - - if keepRolloutSettings { - replicas, err := utilunstructured.GetInt64FromPath(clusterObj, typeConfig.Spec.PathDefinition.ReplicasSpec, nil) - if err != nil { - return err - } - - if err := utilunstructured.SetInt64FromPath(desiredObj, typeConfig.Spec.PathDefinition.ReplicasSpec, replicas, nil); err != nil { - return err - } - } - - return nil -} - func retainArgoWorkflow(desiredObj, clusterObj *unstructured.Unstructured) error { // Usually status is a subresource and will not be modified with an update request, i.e. it is implicitly retained. // If the status field is not a subresource, we need to explicitly retain it. diff --git a/pkg/controllers/sync/dispatch/unmanaged.go b/pkg/controllers/sync/dispatch/unmanaged.go index 75a4700b..683dd493 100644 --- a/pkg/controllers/sync/dispatch/unmanaged.go +++ b/pkg/controllers/sync/dispatch/unmanaged.go @@ -30,15 +30,14 @@ import ( "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/client-go/dynamic" "k8s.io/klog/v2" - runtimeclient "sigs.k8s.io/controller-runtime/pkg/client" - fedtypesv1a1 "github.com/kubewharf/kubeadmiral/pkg/apis/types/v1alpha1" - "github.com/kubewharf/kubeadmiral/pkg/client/generic" + fedcorev1a1 "github.com/kubewharf/kubeadmiral/pkg/apis/core/v1alpha1" "github.com/kubewharf/kubeadmiral/pkg/controllers/common" "github.com/kubewharf/kubeadmiral/pkg/controllers/util" "github.com/kubewharf/kubeadmiral/pkg/controllers/util/finalizers" - "github.com/kubewharf/kubeadmiral/pkg/controllers/util/managedlabel" + "github.com/kubewharf/kubeadmiral/pkg/util/managedlabel" ) const ( @@ -58,7 +57,7 @@ type UnmanagedDispatcher interface { type unmanagedDispatcherImpl struct { dispatcher *operationDispatcherImpl - targetGVK schema.GroupVersionKind + targetGVR schema.GroupVersionResource targetName common.QualifiedName recorder dispatchRecorder @@ -66,22 +65,22 @@ type unmanagedDispatcherImpl struct { func NewUnmanagedDispatcher( clientAccessor clientAccessorFunc, - targetGVK schema.GroupVersionKind, + targetGVR schema.GroupVersionResource, targetName common.QualifiedName, ) UnmanagedDispatcher { dispatcher := newOperationDispatcher(clientAccessor, nil) - return newUnmanagedDispatcher(dispatcher, nil, targetGVK, targetName) + return newUnmanagedDispatcher(dispatcher, nil, targetGVR, targetName) } func newUnmanagedDispatcher( dispatcher *operationDispatcherImpl, recorder dispatchRecorder, - targetGVK schema.GroupVersionKind, + targetGVR schema.GroupVersionResource, targetName common.QualifiedName, ) *unmanagedDispatcherImpl { return &unmanagedDispatcherImpl{ dispatcher: dispatcher, - targetGVK: targetGVK, + targetGVR: targetGVR, targetName: targetName, recorder: recorder, } @@ -95,7 +94,7 @@ func (d *unmanagedDispatcherImpl) Delete(ctx context.Context, clusterName string d.dispatcher.incrementOperationsInitiated() const op = "delete" const opContinuous = "Deleting" - go d.dispatcher.clusterOperation(ctx, clusterName, op, func(client generic.Client) bool { + go d.dispatcher.clusterOperation(ctx, clusterName, op, func(client dynamic.Interface) bool { keyedLogger := klog.FromContext(ctx).WithValues("cluster-name", clusterName) targetName := d.targetNameForCluster(clusterName) keyedLogger.V(1).Info("Deleting target object in cluster") @@ -112,14 +111,15 @@ func (d *unmanagedDispatcherImpl) Delete(ctx context.Context, clusterName string wrappedErr := d.wrapOperationError(err, clusterName, op) keyedLogger.Error(wrappedErr, "Failed to delete target object in cluster") } else { - d.recorder.recordOperationError(ctx, fedtypesv1a1.DeletionFailed, clusterName, op, err) + d.recorder.recordOperationError(ctx, fedcorev1a1.DeletionFailed, clusterName, op, err) } return false } if needUpdate { - err := client.Update( + clusterObj, err = client.Resource(d.targetGVR).Namespace(targetName.Namespace).Update( ctx, clusterObj, + metav1.UpdateOptions{}, ) if apierrors.IsNotFound(err) { err = nil @@ -129,7 +129,7 @@ func (d *unmanagedDispatcherImpl) Delete(ctx context.Context, clusterName string wrappedErr := d.wrapOperationError(err, clusterName, op) keyedLogger.Error(wrappedErr, "Failed to delete target object in cluster") } else { - d.recorder.recordOperationError(ctx, fedtypesv1a1.DeletionFailed, clusterName, op, err) + d.recorder.recordOperationError(ctx, fedcorev1a1.DeletionFailed, clusterName, op, err) } return false } @@ -140,21 +140,18 @@ func (d *unmanagedDispatcherImpl) Delete(ctx context.Context, clusterName string return true } - obj := &unstructured.Unstructured{} - obj.SetGroupVersionKind(d.targetGVK) - err = client.Delete( + // When deleting some resources (e.g. batch/v1.Job, batch/v1beta1.CronJob) without setting PropagationPolicy in DeleteOptions, + // kube-apiserver defaults to Orphan for backward compatibility. + // This would leak the dependents after the main propagated resource has been deleted. + // Ref: https://github.com/kubernetes/kubernetes/pull/71792 + // + // To avoid this, we explicitly set the PropagationPolicy to Background like `kubectl delete` does by default. + // Ref: https://github.com/kubernetes/kubernetes/pull/65908 + deletionPropagation := metav1.DeletePropagationBackground + err = client.Resource(d.targetGVR).Namespace(targetName.Namespace).Delete( ctx, - obj, - targetName.Namespace, targetName.Name, - // When deleting some resources (e.g. batch/v1.Job, batch/v1beta1.CronJob) without setting PropagationPolicy in DeleteOptions, - // kube-apiserver defaults to Orphan for backward compatibility. - // This would leak the dependents after the main propagated resource has been deleted. - // Ref: https://github.com/kubernetes/kubernetes/pull/71792 - // - // To avoid this, we explicitly set the PropagationPolicy to Background like `kubectl delete` does by default. - // Ref: https://github.com/kubernetes/kubernetes/pull/65908 - runtimeclient.PropagationPolicy(metav1.DeletePropagationBackground), + metav1.DeleteOptions{PropagationPolicy: &deletionPropagation}, ) if apierrors.IsNotFound(err) { err = nil @@ -164,7 +161,7 @@ func (d *unmanagedDispatcherImpl) Delete(ctx context.Context, clusterName string wrappedErr := d.wrapOperationError(err, clusterName, op) keyedLogger.Error(wrappedErr, "Failed to delete target object in cluster") } else { - d.recorder.recordOperationError(ctx, fedtypesv1a1.DeletionFailed, clusterName, op, err) + d.recorder.recordOperationError(ctx, fedcorev1a1.DeletionFailed, clusterName, op, err) } return false } @@ -176,7 +173,7 @@ func (d *unmanagedDispatcherImpl) RemoveManagedLabel(ctx context.Context, cluste d.dispatcher.incrementOperationsInitiated() const op = "remove managed label from" const opContinuous = "Removing managed label from" - go d.dispatcher.clusterOperation(ctx, clusterName, op, func(client generic.Client) bool { + go d.dispatcher.clusterOperation(ctx, clusterName, op, func(client dynamic.Interface) bool { keyedLogger := klog.FromContext(ctx).WithValues("cluster-name", clusterName) keyedLogger.V(1).Info("Removing managed label from target object in cluster") if d.recorder != nil { @@ -192,18 +189,21 @@ func (d *unmanagedDispatcherImpl) RemoveManagedLabel(ctx context.Context, cluste wrappedErr := d.wrapOperationError(err, clusterName, op) keyedLogger.Error(wrappedErr, "Failed to remove managed label from target object in cluster") } else { - d.recorder.recordOperationError(ctx, fedtypesv1a1.LabelRemovalFailed, clusterName, op, err) + d.recorder.recordOperationError(ctx, fedcorev1a1.LabelRemovalFailed, clusterName, op, err) } return false } - err := client.Update(ctx, updateObj) + var err error + updateObj, err = client.Resource(d.targetGVR).Namespace(clusterObj.GetNamespace()).Update( + ctx, updateObj, metav1.UpdateOptions{}, + ) if err != nil { if d.recorder == nil { wrappedErr := d.wrapOperationError(err, clusterName, op) keyedLogger.Error(wrappedErr, "Failed to remove managed label from target object in cluster") } else { - d.recorder.recordOperationError(ctx, fedtypesv1a1.LabelRemovalFailed, clusterName, op, err) + d.recorder.recordOperationError(ctx, fedcorev1a1.LabelRemovalFailed, clusterName, op, err) } return false } @@ -215,7 +215,7 @@ func (d *unmanagedDispatcherImpl) wrapOperationError(err error, clusterName, ope return wrapOperationError( err, operation, - d.targetGVK.Kind, + d.targetGVR.String(), d.targetNameForCluster(clusterName).String(), clusterName, ) @@ -225,8 +225,8 @@ func (d *unmanagedDispatcherImpl) targetNameForCluster(clusterName string) commo return util.QualifiedNameForCluster(clusterName, d.targetName) } -func wrapOperationError(err error, operation, targetKind, targetName, clusterName string) error { - return errors.Wrapf(err, "Failed to "+eventTemplate, operation, targetKind, targetName, clusterName) +func wrapOperationError(err error, operation, targetGVR, targetName, clusterName string) error { + return errors.Wrapf(err, "Failed to "+eventTemplate, operation, targetGVR, targetName, clusterName) } func removeRetainObjectFinalizer(obj *unstructured.Unstructured) (bool, error) { diff --git a/pkg/controllers/sync/history.go b/pkg/controllers/sync/history.go deleted file mode 100644 index 167fc1ac..00000000 --- a/pkg/controllers/sync/history.go +++ /dev/null @@ -1,305 +0,0 @@ -//go:build exclude -/* -Copyright 2023 The KubeAdmiral Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package sync - -import ( - "context" - "encoding/json" - "fmt" - - "github.com/pkg/errors" - appsv1 "k8s.io/api/apps/v1" - apierrors "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/klog/v2" - - fedtypesv1a1 "github.com/kubewharf/kubeadmiral/pkg/apis/types/v1alpha1" - "github.com/kubewharf/kubeadmiral/pkg/controllers/util/history" -) - -// syncRevisions update current history revision number, or create current history if need to. -// It also deduplicates current history, and keeps quantity of history within historyLimit. -// It returns collision count, latest revision name, current revision name and possible error -func (s *SyncController) syncRevisions(ctx context.Context, fedResource FederatedResource) (*int32, string, string, error) { - var ( - oldRevisions []*appsv1.ControllerRevision - currentRevisions []*appsv1.ControllerRevision - ) - - collisionCount := fedResource.CollisionCount() - - historyLimit := fedResource.RevisionHistoryLimit() - if historyLimit == 0 { - return collisionCount, "", "", nil - } - - // create a new revision from the current fedResource, figure out the right revision number later - updateRevision, err := newRevision(fedResource, 0, collisionCount) - if err != nil { - return collisionCount, "", "", err - } - - revisions, err := s.listRevisions(fedResource) - if err != nil { - return collisionCount, "", "", err - } - for _, revision := range revisions { - if history.EqualRevision(revision, updateRevision) { - currentRevisions = append(currentRevisions, revision) - } else { - oldRevisions = append(oldRevisions, revision) - } - } - - keyedLogger := klog.FromContext(ctx) - revisionNumber := maxRevision(oldRevisions) + 1 - switch len(currentRevisions) { - case 0: - // create a new revision if the current one isn't found - if _, err := s.controllerHistory.CreateControllerRevision(fedResource.Object(), updateRevision, collisionCount); err != nil { - return collisionCount, "", "", err - } - default: - update, err := s.dedupCurRevisions(currentRevisions) - if err != nil { - return collisionCount, "", "", err - } - // Update revision number if necessary - if update.Revision < revisionNumber { - if _, err := s.controllerHistory.UpdateControllerRevision(update, revisionNumber); err != nil { - return collisionCount, "", "", err - } - } else if update.Revision >= revisionNumber { - labels := revisionLabelsWithOriginalLabel(fedResource) - if err := s.updateRevisionLabel(ctx, update, labels); err != nil { - return collisionCount, "", "", err - } - } - } - - // maintain the revision history limit. - if err := s.truncateRevisions(ctx, oldRevisions, historyLimit); err != nil { - // it doesn't hurt much on failure - keyedLogger.Error(err, "Failed to truncate revisionHistory for federated object") - } - // revisions are sorted after truncation, so the last one in oldRevisions should be the latest revision - var lastRevisionNameWithHash string - if len(oldRevisions) >= 1 && historyLimit >= 1 { - lastRevisionNameWithHash = oldRevisions[len(oldRevisions)-1].Name - } - if lastRevisionNameWithHash != "" { - podTemplateHash, err := getPodTemplateHash(fedResource) - if err == nil { - // suffix lastRevisionNameWithHash with podTemplateHash for check before rollback - lastRevisionNameWithHash = lastRevisionNameWithHash + "|" + podTemplateHash - } - } - - for _, revision := range oldRevisions { - if err := s.updateRevisionLabel(ctx, revision, revisionLabelsWithOriginalLabel(fedResource)); err != nil { - return collisionCount, "", "", err - } - } - return collisionCount, lastRevisionNameWithHash, updateRevision.Name, nil -} - -// check if y is a subset of x -func IsLabelSubset(x, y map[string]string) bool { - if y == nil || len(y) == 0 { - return true - } - if x == nil || len(x) == 0 { - return false - } - for k, v := range y { - if x[k] != v { - return false - } - } - return true -} - -func (s *SyncController) updateRevisionLabel( - ctx context.Context, - revision *appsv1.ControllerRevision, - labels map[string]string, -) error { - keyedLogger := klog.FromContext(ctx) - if !IsLabelSubset(revision.GetLabels(), labels) { - clone := revision.DeepCopy() - for k, v := range labels { - clone.Labels[k] = v - } - revisionNumber := clone.Revision - // set revision to 0 to update forcely - clone.Revision = 0 - if _, err := s.controllerHistory.UpdateControllerRevision(clone, revisionNumber); err != nil { - if apierrors.IsNotFound(err) { - keyedLogger.WithValues("controller-revision-name", revision.Name). - Error(err, "Failed to update the revision") - } else { - return err - } - } - } - return nil -} - -func (s *SyncController) truncateRevisions(ctx context.Context, revisions []*appsv1.ControllerRevision, limit int64) error { - history.SortControllerRevisions(revisions) - toKill := len(revisions) - int(limit) - keyedLogger := klog.FromContext(ctx) - for _, revision := range revisions { - if toKill <= 0 { - break - } - if err := s.controllerHistory.DeleteControllerRevision(revision); err != nil { - if apierrors.IsNotFound(err) { - keyedLogger.WithValues("controller-revision-name", revision.Name). - Error(err, "Failed to delete the revision") - } else { - return err - } - } - toKill-- - } - return nil -} - -func (s *SyncController) listRevisions(fedResource FederatedResource) ([]*appsv1.ControllerRevision, error) { - selector := labels.SelectorFromSet(revisionLabels(fedResource)) - return s.controllerHistory.ListControllerRevisions(fedResource.Object(), selector) -} - -// newRevision creates a new ControllerRevision containing a patch that reapplies the target state of federatedResource. -func newRevision( - fedResource FederatedResource, - revision int64, - collisionCount *int32, -) (*appsv1.ControllerRevision, error) { - patch, err := getPatch(fedResource) - if err != nil { - return nil, err - } - cr, err := history.NewControllerRevision(fedResource.Object(), - fedResource.FederatedGVK(), - revisionLabelsWithOriginalLabel(fedResource), - runtime.RawExtension{Raw: patch}, - revision, - collisionCount, - ) - if err != nil { - return nil, err - } - return cr, nil -} - -// dedupCurRevisions deduplicates current revisions and returns the revision to keep -func (s *SyncController) dedupCurRevisions( - dupRevisions []*appsv1.ControllerRevision, -) (*appsv1.ControllerRevision, error) { - if len(dupRevisions) == 0 { - return nil, fmt.Errorf("empty input for duplicate revisions") - } - - keepCur := dupRevisions[0] - maxRevision := dupRevisions[0].Revision - for _, cur := range dupRevisions { - if cur.Revision > maxRevision { - keepCur = cur - maxRevision = cur.Revision - } - } - - // Clean up duplicates - for _, cur := range dupRevisions { - if cur.Name == keepCur.Name { - continue - } - if err := s.controllerHistory.DeleteControllerRevision(cur); err != nil { - return nil, err - } - } - return keepCur, nil -} - -func maxRevision(revisions []*appsv1.ControllerRevision) int64 { - max := int64(0) - for _, revision := range revisions { - if revision.Revision > max { - max = revision.Revision - } - } - return max -} - -func getPatch(fedResource FederatedResource) ([]byte, error) { - // Create a patch of the fedResource that replaces spec.template.spec.template - template, ok, err := unstructured.NestedMap(fedResource.Object().Object, "spec", "template", "spec", "template") - if err != nil { - fedResource.RecordError( - string(fedtypesv1a1.SyncRevisionsFailed), - errors.Wrap(err, "Failed to get template.spec.template"), - ) - return nil, err - } - if !ok { - fedResource.RecordError( - string(fedtypesv1a1.SyncRevisionsFailed), - fmt.Errorf("failed to find template.spec.template"), - ) - return nil, fmt.Errorf( - "spec.template.spec.template is not found, fedResource: %+v", - fedResource.Object().Object, - ) - } - - patchObj := make(map[string]interface{}) - patchObj["op"] = "replace" - patchObj["path"] = "/spec/template/spec/template" - patchObj["value"] = template - return json.Marshal([]interface{}{patchObj}) -} - -func getPodTemplateHash(fedResource FederatedResource) (string, error) { - template, ok, err := unstructured.NestedMap(fedResource.Object().Object, "spec", "template", "spec", "template") - if err != nil { - return "", err - } - if !ok { - return "", fmt.Errorf("spec.template.spec.template is not found, fedResource: %+v", fedResource.Object().Object) - } - return history.HashObject(template), nil -} - -func revisionLabels(fedResource FederatedResource) map[string]string { - return map[string]string{ - "uid": string(fedResource.Object().GetUID()), - } -} - -func revisionLabelsWithOriginalLabel(fedResource FederatedResource) map[string]string { - ret := revisionLabels(fedResource) - labels := fedResource.Object().GetLabels() - for key, value := range labels { - ret[key] = value - } - return ret -} diff --git a/pkg/controllers/sync/placement.go b/pkg/controllers/sync/placement.go index ad85b233..a60889f1 100644 --- a/pkg/controllers/sync/placement.go +++ b/pkg/controllers/sync/placement.go @@ -22,94 +22,24 @@ are Copyright 2023 The KubeAdmiral Authors. package sync import ( - "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/util/sets" fedcorev1a1 "github.com/kubewharf/kubeadmiral/pkg/apis/core/v1alpha1" - "github.com/kubewharf/kubeadmiral/pkg/controllers/util" ) -// computeNamespacedPlacement determines placement for namespaced -// federated resources (e.g. FederatedConfigMap). -// -// If KubeFed is deployed cluster-wide, placement is the intersection -// of the placement for the federated resource and the placement of -// the federated namespace containing the resource. -// -// If KubeFed is limited to a single namespace, placement is -// determined as the intersection of resource and namespace placement -// if namespace placement exists. If namespace placement does not -// exist, resource placement will be used verbatim. This is possible -// because the single namespace by definition must exist on member -// clusters, so namespace placement becomes a mechanism for limiting -// rather than allowing propagation. -func computeNamespacedPlacement( - resource, namespace *unstructured.Unstructured, - clusters []*fedcorev1a1.FederatedCluster, - limitedScope bool, -) (selectedClusters sets.String, err error) { - resourceClusters, err := computePlacement(resource, clusters) - if err != nil { - return nil, err - } - - if namespace == nil { - if limitedScope { - // Use the resource placement verbatim if no federated - // namespace is present and KubeFed is targeting a - // single namespace. - return resourceClusters, nil - } - // Resource should not exist in any member clusters. - return sets.String{}, nil - } - - namespaceClusters, err := computePlacement(namespace, clusters) - if err != nil { - return nil, err - } - - // If both namespace and resource placement exist, the desired - // list of clusters is their intersection. - return resourceClusters.Intersection(namespaceClusters), nil -} - // computePlacement determines the selected clusters for a federated // resource. func computePlacement( - resource *unstructured.Unstructured, + resource fedcorev1a1.GenericFederatedObject, clusters []*fedcorev1a1.FederatedCluster, -) (selectedClusters sets.String, err error) { - selectedNames, err := selectedClusterNames(resource, clusters) - if err != nil { - return nil, err - } +) (selectedClusters sets.Set[string]) { + selectedNames := resource.GetSpec().GetPlacementUnion() clusterNames := getClusterNames(clusters) - return clusterNames.Intersection(selectedNames), nil -} - -func selectedClusterNames( - resource *unstructured.Unstructured, - clusters []*fedcorev1a1.FederatedCluster, -) (sets.String, error) { - object, err := util.UnmarshalGenericPlacements(resource) - if err != nil { - return nil, err - } - - selectedNames := sets.String{} - - for _, placement := range object.Spec.Placements { - for cluster := range placement.Placement.ClusterNames() { - selectedNames.Insert(cluster) - } - } - - return selectedNames, nil + return clusterNames.Intersection(selectedNames) } -func getClusterNames(clusters []*fedcorev1a1.FederatedCluster) sets.String { - clusterNames := sets.String{} +func getClusterNames(clusters []*fedcorev1a1.FederatedCluster) sets.Set[string] { + clusterNames := sets.New[string]() for _, cluster := range clusters { clusterNames.Insert(cluster.Name) } diff --git a/pkg/controllers/sync/resource.go b/pkg/controllers/sync/resource.go index 092d4634..550b7d40 100644 --- a/pkg/controllers/sync/resource.go +++ b/pkg/controllers/sync/resource.go @@ -27,7 +27,6 @@ import ( "encoding/hex" "fmt" "sort" - "strings" "sync" "github.com/pkg/errors" @@ -38,16 +37,13 @@ import ( "k8s.io/client-go/tools/record" fedcorev1a1 "github.com/kubewharf/kubeadmiral/pkg/apis/core/v1alpha1" - fedtypesv1a1 "github.com/kubewharf/kubeadmiral/pkg/apis/types/v1alpha1" "github.com/kubewharf/kubeadmiral/pkg/controllers/common" "github.com/kubewharf/kubeadmiral/pkg/controllers/sync/dispatch" "github.com/kubewharf/kubeadmiral/pkg/controllers/sync/version" "github.com/kubewharf/kubeadmiral/pkg/controllers/util" - annotationutil "github.com/kubewharf/kubeadmiral/pkg/controllers/util/annotation" "github.com/kubewharf/kubeadmiral/pkg/controllers/util/finalizers" - "github.com/kubewharf/kubeadmiral/pkg/controllers/util/managedlabel" schemautil "github.com/kubewharf/kubeadmiral/pkg/controllers/util/schema" - utilunstructured "github.com/kubewharf/kubeadmiral/pkg/controllers/util/unstructured" + "github.com/kubewharf/kubeadmiral/pkg/util/managedlabel" ) // FederatedResource encapsulates the behavior of a logical federated @@ -57,94 +53,78 @@ type FederatedResource interface { dispatch.FederatedResourceForDispatch FederatedName() common.QualifiedName - FederatedKind() string - FederatedGVK() schema.GroupVersionKind - CollisionCount() *int32 - RevisionHistoryLimit() int64 UpdateVersions(selectedClusters []string, versionMap map[string]string) error DeleteVersions() - ComputePlacement(clusters []*fedcorev1a1.FederatedCluster) (selectedClusters sets.String, err error) - NamespaceNotFederated() bool + ComputePlacement(clusters []*fedcorev1a1.FederatedCluster) sets.Set[string] + SetObject(obj fedcorev1a1.GenericFederatedObject) } +var _ FederatedResource = &federatedResource{} + type federatedResource struct { sync.RWMutex - limitedScope bool - typeConfig *fedcorev1a1.FederatedTypeConfig - targetName common.QualifiedName - federatedKind string - federatedName common.QualifiedName - federatedResource *unstructured.Unstructured - versionManager *version.VersionManager - overridesMap util.OverridesMap - versionMap map[string]string - fedNamespace *unstructured.Unstructured - eventRecorder record.EventRecorder + typeConfig *fedcorev1a1.FederatedTypeConfig + federatedName common.QualifiedName + targetName common.QualifiedName + federatedObject fedcorev1a1.GenericFederatedObject + template *unstructured.Unstructured + versionManager *version.VersionManager + overridesMap util.OverridesMap + versionMap map[string]string + eventRecorder record.EventRecorder } func (r *federatedResource) FederatedName() common.QualifiedName { return r.federatedName } -func (r *federatedResource) FederatedKind() string { - return r.typeConfig.GetFederatedType().Kind -} - -func (r *federatedResource) FederatedGVK() schema.GroupVersionKind { - apiResource := r.typeConfig.GetFederatedType() - return schemautil.APIResourceToGVK(&apiResource) -} - func (r *federatedResource) TargetName() common.QualifiedName { return r.targetName } -func (r *federatedResource) TargetKind() string { - return r.typeConfig.GetTargetType().Kind +func (r *federatedResource) TargetGVK() schema.GroupVersionKind { + return r.typeConfig.GetSourceTypeGVK() } -func (r *federatedResource) TargetGVK() schema.GroupVersionKind { - apiResource := r.typeConfig.GetTargetType() - return schemautil.APIResourceToGVK(&apiResource) +func (r *federatedResource) TargetGVR() schema.GroupVersionResource { + return r.typeConfig.GetSourceTypeGVR() } func (r *federatedResource) TypeConfig() *fedcorev1a1.FederatedTypeConfig { return r.typeConfig } -func (r *federatedResource) Replicas() (*int64, error) { - return utilunstructured.GetInt64FromPath( - r.federatedResource, - r.typeConfig.Spec.PathDefinition.ReplicasSpec, - common.TemplatePath, - ) +func (r *federatedResource) Object() fedcorev1a1.GenericFederatedObject { + return r.federatedObject } -func (r *federatedResource) Object() *unstructured.Unstructured { - return r.federatedResource -} - -func (r *federatedResource) CollisionCount() *int32 { - val, _, _ := unstructured.NestedInt64(r.Object().Object, "status", "collisionCount") - v := int32(val) - return &v -} - -func (r *federatedResource) RevisionHistoryLimit() int64 { - val, _, _ := unstructured.NestedInt64(r.Object().Object, "spec", "revisionHistoryLimit") - return val +func (r *federatedResource) SetObject(obj fedcorev1a1.GenericFederatedObject) { + r.federatedObject = obj } func (r *federatedResource) TemplateVersion() (string, error) { - obj := r.federatedResource - return GetTemplateHash(obj.Object) + if hash, err := hashUnstructured(r.template); err != nil { + return "", fmt.Errorf("failed to hash template: %w", err) + } else { + return hash, nil + } } func (r *federatedResource) OverrideVersion() (string, error) { // TODO Consider hashing overrides per cluster to minimize // unnecessary updates. - return GetOverrideHash(r.federatedResource) + obj := &unstructured.Unstructured{ + Object: map[string]interface{}{ + "overrides": r.federatedObject.GetSpec().Overrides, + }, + } + + if hash, err := hashUnstructured(obj); err != nil { + return "", fmt.Errorf("failed to hash overrides: %w", err) + } else { + return hash, nil + } } func (r *federatedResource) VersionForCluster(clusterName string) (string, error) { @@ -168,30 +148,15 @@ func (r *federatedResource) DeleteVersions() { r.versionManager.Delete(r.federatedName) } -func (r *federatedResource) ComputePlacement(clusters []*fedcorev1a1.FederatedCluster) (sets.String, error) { - if r.typeConfig.GetNamespaced() { - return computeNamespacedPlacement(r.federatedResource, r.fedNamespace, clusters, r.limitedScope) - } - return computePlacement(r.federatedResource, clusters) -} - -func (r *federatedResource) NamespaceNotFederated() bool { - return r.typeConfig.GetNamespaced() && r.fedNamespace == nil +func (r *federatedResource) ComputePlacement(clusters []*fedcorev1a1.FederatedCluster) sets.Set[string] { + return computePlacement(r.federatedObject, clusters) } // TODO Marshall the template once per reconcile, not per-cluster func (r *federatedResource) ObjectForCluster(clusterName string) (*unstructured.Unstructured, error) { - templateBody, ok, err := unstructured.NestedMap(r.federatedResource.Object, common.SpecField, common.TemplateField) - if err != nil { - return nil, errors.Wrap(err, "Error retrieving template body") - } - if !ok { - // Some resources (like namespaces) can be created from an - // empty template. - templateBody = make(map[string]interface{}) - } - obj := &unstructured.Unstructured{Object: templateBody} + obj := r.template.DeepCopy() + // TODO: do we still need this? The template created by federate controller should never contain finalizers. notSupportedTemplate := "metadata.%s cannot be set via template to avoid conflicting with controllers " + "in member clusters. Consider using an override to add or remove elements from this collection." if len(obj.GetFinalizers()) > 0 { @@ -202,35 +167,12 @@ func (r *federatedResource) ObjectForCluster(clusterName string) (*unstructured. // Avoid having to duplicate these details in the template or have // the name/namespace vary between host and member clusters. // TODO: consider omitting these fields in the template created by federate controller - obj.SetName(r.federatedResource.GetName()) - namespace := util.NamespaceForCluster(clusterName, r.federatedResource.GetNamespace()) - obj.SetNamespace(namespace) - targetAPIResource := r.typeConfig.GetTargetType() - obj.SetKind(targetAPIResource.Kind) - - // deprecated: federated generation is currently unused and might increase the dispatcher load - // if _, err = annotationutil.AddAnnotation( - // obj, common.FederatedGenerationAnnotation, fmt.Sprintf("%d", r.Object().GetGeneration())); err != nil { - // return nil, err - // } + obj.SetName(r.federatedObject.GetName()) + obj.SetNamespace(r.federatedObject.GetNamespace()) - if _, err = annotationutil.AddAnnotation(obj, common.SourceGenerationAnnotation, fmt.Sprintf("%d", obj.GetGeneration())); err != nil { - return nil, err - } - - // If the template does not specify an api version, default it to - // the one configured for the target type in the FTC. - if len(obj.GetAPIVersion()) == 0 { - obj.SetAPIVersion(schema.GroupVersion{Group: targetAPIResource.Group, Version: targetAPIResource.Version}.String()) - } - - // set current revision - revision, ok := r.Object().GetAnnotations()[common.CurrentRevisionAnnotation] - if ok { - if _, err := annotationutil.AddAnnotation(obj, common.CurrentRevisionAnnotation, revision); err != nil { - return nil, err - } - } + targetAPIResource := r.typeConfig.GetSourceType() + obj.SetAPIVersion(schema.GroupVersion{Group: targetAPIResource.Group, Version: targetAPIResource.Version}.String()) + obj.SetKind(targetAPIResource.Kind) if schemautil.IsJobGvk(r.TargetGVK()) { if err := dropJobFields(obj); err != nil { @@ -306,7 +248,6 @@ func dropPodFields(obj *unstructured.Unstructured) error { func (r *federatedResource) ApplyOverrides( obj *unstructured.Unstructured, clusterName string, - otherOverrides fedtypesv1a1.OverridePatches, ) error { overrides, err := r.overridesForCluster(clusterName) if err != nil { @@ -317,11 +258,6 @@ func (r *federatedResource) ApplyOverrides( return err } } - if len(otherOverrides) != 0 { - if err := util.ApplyJsonPatch(obj, otherOverrides); err != nil { - return err - } - } // Ensure that resources managed by KubeFed always have the // managed label. The label is intended to be targeted by all the @@ -340,17 +276,15 @@ func (r *federatedResource) RecordEvent(reason, messageFmt string, args ...inter r.eventRecorder.Eventf(r.Object(), corev1.EventTypeNormal, reason, messageFmt, args...) } -func (r *federatedResource) overridesForCluster(clusterName string) (fedtypesv1a1.OverridePatches, error) { +func (r *federatedResource) overridesForCluster(clusterName string) (fedcorev1a1.OverridePatches, error) { r.Lock() defer r.Unlock() if r.overridesMap == nil { - obj, err := util.UnmarshalGenericOverrides(r.federatedResource) - if err != nil { - return nil, fmt.Errorf("unmarshal cluster overrides: %w", err) + overrides := make([]fedcorev1a1.OverrideWithController, 0, len(r.federatedObject.GetSpec().Overrides)) + for _, o := range r.federatedObject.GetSpec().Overrides { + overrides = append(overrides, *o.DeepCopy()) } - r.overridesMap = make(util.OverridesMap) - // Order overrides based on the controller name specified in the FTC // Put overrides from unknown sources at the end, but preserve their relative orders controllerOrder := make(map[string]int) @@ -359,15 +293,16 @@ func (r *federatedResource) overridesForCluster(clusterName string) (fedtypesv1a controllerOrder[controller] = len(controllerOrder) } } - sort.SliceStable(obj.Spec.Overrides, func(i, j int) bool { - lhs, isKnown := controllerOrder[obj.Spec.Overrides[i].Controller] + + sort.SliceStable(overrides, func(i, j int) bool { + lhs, isKnown := controllerOrder[overrides[i].Controller] if !isKnown { // lhs is unknown // if rhs is known, return false so rhs can precede lhs // if rhs is unknown, return false to preserve the relative order return false } - rhs, isKnown := controllerOrder[obj.Spec.Overrides[j].Controller] + rhs, isKnown := controllerOrder[overrides[j].Controller] if !isKnown { // lhs controller is known and rhs controller is unknown // lhs should precede rhs @@ -376,11 +311,13 @@ func (r *federatedResource) overridesForCluster(clusterName string) (fedtypesv1a return lhs < rhs }) + r.overridesMap = make(util.OverridesMap) + // Merge overrides in the specified order - for _, controllerOverride := range obj.Spec.Overrides { - for _, clusterOverride := range controllerOverride.Clusters { - r.overridesMap[clusterOverride.ClusterName] = append( - r.overridesMap[clusterOverride.ClusterName], clusterOverride.Patches..., + for _, controllerOverride := range overrides { + for _, clusterOverride := range controllerOverride.Override { + r.overridesMap[clusterOverride.Cluster] = append( + r.overridesMap[clusterOverride.Cluster], clusterOverride.Patches..., ) } } @@ -388,83 +325,11 @@ func (r *federatedResource) overridesForCluster(clusterName string) (fedtypesv1a return r.overridesMap[clusterName], nil } -// FIXME: Since override operator is not limited to "replace" and there can be multiple patches affecting the same key, -// we can only determine the correct replicas overrides after all overrides have ben applied. -func (r *federatedResource) ReplicasOverrideForCluster(clusterName string) (int32, bool, error) { - overrides, err := r.overridesForCluster(clusterName) - if err != nil { - return 0, false, err - } - for _, o := range overrides { - if o.Path == "/spec/replicas" && o.Value != nil { - r, ok := o.Value.(float64) - if !ok { - return 0, false, errors.Errorf("failed to retrieve replicas override for %s", clusterName) - } - return int32(r), true, nil - } - } - - defaultReplicas, err := r.Replicas() - if err != nil { - return 0, false, err - } - if defaultReplicas == nil { - return 0, false, errors.Errorf("failed to retrieve replicas override for %s", clusterName) - } - return int32(*defaultReplicas), false, nil -} - -func (r *federatedResource) TotalReplicas(clusterNames sets.String) (int32, error) { - var replicas int32 - for clusterName := range clusterNames { - val, _, err := r.ReplicasOverrideForCluster(clusterName) - if err != nil { - return 0, err - } - replicas += val - } - return replicas, nil -} - -func GetTemplateHash(fieldMap map[string]interface{}) (string, error) { - fields := []string{common.SpecField, common.TemplateField} - fieldMap, ok, err := unstructured.NestedMap(fieldMap, fields...) - if err != nil { - return "", errors.Wrapf(err, "Error retrieving %q", strings.Join(fields, ".")) - } - if !ok { - return "", nil - } - obj := &unstructured.Unstructured{Object: fieldMap} - description := strings.Join(fields, ".") - return hashUnstructured(obj, description) -} - -func GetOverrideHash(rawObj *unstructured.Unstructured) (string, error) { - override := fedtypesv1a1.GenericObjectWithOverrides{} - err := util.UnstructuredToInterface(rawObj, &override) - if err != nil { - return "", errors.Wrap(err, "Error retrieving overrides") - } - if override.Spec == nil { - return "", nil - } - // Only hash the overrides - obj := &unstructured.Unstructured{ - Object: map[string]interface{}{ - "overrides": override.Spec.Overrides, - }, - } - - return hashUnstructured(obj, "overrides") -} - // TODO Investigate alternate ways of computing the hash of a field map. -func hashUnstructured(obj *unstructured.Unstructured, description string) (string, error) { +func hashUnstructured(obj *unstructured.Unstructured) (string, error) { jsonBytes, err := obj.MarshalJSON() if err != nil { - return "", errors.Wrapf(err, "Failed to marshal %q to json", description) + return "", fmt.Errorf("failed to marshal to json: %w", err) } //nolint:gosec hash := md5.New() diff --git a/pkg/controllers/sync/status/status.go b/pkg/controllers/sync/status/status.go index ae082df7..5613fb89 100644 --- a/pkg/controllers/sync/status/status.go +++ b/pkg/controllers/sync/status/status.go @@ -22,21 +22,15 @@ are Copyright 2023 The KubeAdmiral Authors. package status import ( - "encoding/json" - "reflect" "sort" - "time" - "github.com/pkg/errors" corev1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - fedtypesv1a1 "github.com/kubewharf/kubeadmiral/pkg/apis/types/v1alpha1" - "github.com/kubewharf/kubeadmiral/pkg/controllers/common" - "github.com/kubewharf/kubeadmiral/pkg/controllers/util" + fedcorev1a1 "github.com/kubewharf/kubeadmiral/pkg/apis/core/v1alpha1" ) -type PropagationStatusMap map[string]fedtypesv1a1.PropagationStatus +type PropagationStatusMap map[string]fedcorev1a1.PropagationStatusType type CollectedPropagationStatus struct { StatusMap PropagationStatusMap @@ -48,48 +42,20 @@ type CollectedPropagationStatus struct { // federated resource's object map. Returns a boolean indication of // whether status should be written to the API. func SetFederatedStatus( - fedObject *unstructured.Unstructured, - collisionCount *int32, - reason fedtypesv1a1.AggregateReason, + fedObject fedcorev1a1.GenericFederatedObject, + reason fedcorev1a1.FederatedObjectConditionReason, collectedStatus CollectedPropagationStatus, -) (bool, error) { - resource := &fedtypesv1a1.GenericObjectWithStatus{} - err := util.UnstructuredToInterface(fedObject, resource) - if err != nil { - return false, errors.Wrapf(err, "Failed to unmarshall to generic resource") - } - if resource.Status == nil { - resource.Status = &fedtypesv1a1.GenericFederatedStatus{} - } - - changed := update(resource.Status, fedObject.GetGeneration(), collisionCount, reason, collectedStatus) - - if !changed { - return false, nil - } - - resourceJSON, err := json.Marshal(resource) - if err != nil { - return false, errors.Wrapf(err, "Failed to marshall generic status to json") - } - resourceObj := &unstructured.Unstructured{} - err = resourceObj.UnmarshalJSON(resourceJSON) - if err != nil { - return false, errors.Wrapf(err, "Failed to marshall generic resource json to unstructured") - } - fedObject.Object[common.StatusField] = resourceObj.Object[common.StatusField] - - return true, nil +) bool { + return update(fedObject.GetStatus(), fedObject.GetGeneration(), reason, collectedStatus) } // update ensures that the status reflects the given generation, reason // and collected status. Returns a boolean indication of whether the // status has been changed. func update( - s *fedtypesv1a1.GenericFederatedStatus, + s *fedcorev1a1.GenericFederatedObjectStatus, generation int64, - collisionCount *int32, - reason fedtypesv1a1.AggregateReason, + reason fedcorev1a1.FederatedObjectConditionReason, collectedStatus CollectedPropagationStatus, ) bool { generationUpdated := s.SyncedGeneration != generation @@ -97,32 +63,27 @@ func update( s.SyncedGeneration = generation } - collisionCountUpdated := !reflect.DeepEqual(s.CollisionCount, collisionCount) - if collisionCountUpdated { - s.CollisionCount = collisionCount - } + clustersChanged := setClusters(s, collectedStatus.StatusMap, collectedStatus.GenerationMap) + + // Indicate that changes were propagated if either status.clusters + // was changed or if existing resources were updated (which could + // occur even if status.clusters was unchanged). + changesPropagated := clustersChanged || len(collectedStatus.StatusMap) > 0 && collectedStatus.ResourcesUpdated // Identify whether one or more clusters could not be reconciled // successfully. - if reason == fedtypesv1a1.AggregateSuccess { + if reason == fedcorev1a1.AggregateSuccess { for _, value := range collectedStatus.StatusMap { - if value != fedtypesv1a1.ClusterPropagationOK { - reason = fedtypesv1a1.CheckClusters + if value != fedcorev1a1.ClusterPropagationOK { + reason = fedcorev1a1.CheckClusters break } } } - clustersChanged := setClusters(s, collectedStatus.StatusMap, collectedStatus.GenerationMap) - - // Indicate that changes were propagated if either status.clusters - // was changed or if existing resources were updated (which could - // occur even if status.clusters was unchanged). - changesPropagated := clustersChanged || len(collectedStatus.StatusMap) > 0 && collectedStatus.ResourcesUpdated - propStatusUpdated := setPropagationCondition(s, reason, changesPropagated) - statusUpdated := generationUpdated || collisionCountUpdated || propStatusUpdated + statusUpdated := generationUpdated || propStatusUpdated return statusUpdated } @@ -130,14 +91,14 @@ func update( // map and generation map. Returns a boolean indication of whether the // status.clusters was modified. func setClusters( - s *fedtypesv1a1.GenericFederatedStatus, + s *fedcorev1a1.GenericFederatedObjectStatus, statusMap PropagationStatusMap, generationMap map[string]int64, ) bool { if !clustersDiffers(s, statusMap, generationMap) { return false } - s.Clusters = []fedtypesv1a1.GenericClusterStatus{} + s.Clusters = []fedcorev1a1.PropagationStatus{} // Write status in ascending order of cluster names for better readability clusterNames := make([]string, 0, len(statusMap)) for clusterName := range statusMap { @@ -146,10 +107,10 @@ func setClusters( sort.Strings(clusterNames) for _, clusterName := range clusterNames { status := statusMap[clusterName] - s.Clusters = append(s.Clusters, fedtypesv1a1.GenericClusterStatus{ - Name: clusterName, - Status: status, - Generation: generationMap[clusterName], + s.Clusters = append(s.Clusters, fedcorev1a1.PropagationStatus{ + Cluster: clusterName, + Status: status, + LastObservedGeneration: generationMap[clusterName], }) } return true @@ -158,7 +119,7 @@ func setClusters( // clustersDiffers checks whether `status.clusters` differs from the // given status map and generation map. func clustersDiffers( - s *fedtypesv1a1.GenericFederatedStatus, + s *fedcorev1a1.GenericFederatedObjectStatus, statusMap PropagationStatusMap, generationMap map[string]int64, ) bool { @@ -169,10 +130,10 @@ func clustersDiffers( return true } for _, status := range s.Clusters { - if statusMap[status.Name] != status.Status { + if statusMap[status.Cluster] != status.Status { return true } - if generationMap[status.Name] != status.Generation { + if generationMap[status.Cluster] != status.LastObservedGeneration { return true } } @@ -182,23 +143,26 @@ func clustersDiffers( // setPropagationCondition ensures that the Propagation condition is // updated to reflect the given reason. The type of the condition is // derived from the reason (empty -> True, not empty -> False). -func setPropagationCondition(s *fedtypesv1a1.GenericFederatedStatus, reason fedtypesv1a1.AggregateReason, +func setPropagationCondition( + s *fedcorev1a1.GenericFederatedObjectStatus, + reason fedcorev1a1.FederatedObjectConditionReason, changesPropagated bool, ) bool { // Determine the appropriate status from the reason. var newStatus corev1.ConditionStatus - if reason == fedtypesv1a1.AggregateSuccess { + if reason == fedcorev1a1.AggregateSuccess { newStatus = corev1.ConditionTrue } else { newStatus = corev1.ConditionFalse } if s.Conditions == nil { - s.Conditions = []*fedtypesv1a1.GenericCondition{} + s.Conditions = []fedcorev1a1.GenericFederatedObjectCondition{} } - var propCondition *fedtypesv1a1.GenericCondition - for _, condition := range s.Conditions { - if condition.Type == fedtypesv1a1.PropagationConditionType { + var propCondition *fedcorev1a1.GenericFederatedObjectCondition + for i := range s.Conditions { + condition := &s.Conditions[i] + if condition.Type == fedcorev1a1.PropagationConditionType { propCondition = condition break } @@ -206,13 +170,13 @@ func setPropagationCondition(s *fedtypesv1a1.GenericFederatedStatus, reason fedt newCondition := propCondition == nil if newCondition { - propCondition = &fedtypesv1a1.GenericCondition{ - Type: fedtypesv1a1.PropagationConditionType, - } - s.Conditions = append(s.Conditions, propCondition) + s.Conditions = append(s.Conditions, fedcorev1a1.GenericFederatedObjectCondition{ + Type: fedcorev1a1.PropagationConditionType, + }) + propCondition = &s.Conditions[len(s.Conditions)-1] } - now := time.Now().UTC().Format(time.RFC3339) + now := metav1.Now() transition := newCondition || !(propCondition.Status == newStatus && propCondition.Reason == reason) if transition { diff --git a/pkg/controllers/sync/status/status_test.go b/pkg/controllers/sync/status/status_test.go index f146c861..c31cf308 100644 --- a/pkg/controllers/sync/status/status_test.go +++ b/pkg/controllers/sync/status/status_test.go @@ -25,26 +25,25 @@ import ( corev1 "k8s.io/api/core/v1" - fedtypesv1a1 "github.com/kubewharf/kubeadmiral/pkg/apis/types/v1alpha1" + fedcorev1a1 "github.com/kubewharf/kubeadmiral/pkg/apis/core/v1alpha1" ) func TestGenericPropagationStatusUpdateChanged(t *testing.T) { testCases := map[string]struct { generation int64 - collisionCount *int32 - reason fedtypesv1a1.AggregateReason + reason fedcorev1a1.FederatedObjectConditionReason statusMap PropagationStatusMap resourcesUpdated bool expectedChanged bool }{ "No change in clusters indicates unchanged": { statusMap: PropagationStatusMap{ - "cluster1": fedtypesv1a1.ClusterPropagationOK, + "cluster1": fedcorev1a1.ClusterPropagationOK, }, }, "No change in clusters with update indicates changed": { statusMap: PropagationStatusMap{ - "cluster1": fedtypesv1a1.ClusterPropagationOK, + "cluster1": fedcorev1a1.ClusterPropagationOK, }, resourcesUpdated: true, expectedChanged: true, @@ -53,7 +52,7 @@ func TestGenericPropagationStatusUpdateChanged(t *testing.T) { expectedChanged: true, }, "Transition indicates changed": { - reason: fedtypesv1a1.NamespaceNotFederated, + reason: fedcorev1a1.ClusterRetrievalFailed, expectedChanged: true, }, "Changed generation indicates changed": { @@ -63,16 +62,16 @@ func TestGenericPropagationStatusUpdateChanged(t *testing.T) { } for testName, tc := range testCases { t.Run(testName, func(t *testing.T) { - propStatus := &fedtypesv1a1.GenericFederatedStatus{ - Clusters: []fedtypesv1a1.GenericClusterStatus{ + propStatus := &fedcorev1a1.GenericFederatedObjectStatus{ + Clusters: []fedcorev1a1.PropagationStatus{ { - Name: "cluster1", - Status: fedtypesv1a1.ClusterPropagationOK, + Cluster: "cluster1", + Status: fedcorev1a1.ClusterPropagationOK, }, }, - Conditions: []*fedtypesv1a1.GenericCondition{ + Conditions: []fedcorev1a1.GenericFederatedObjectCondition{ { - Type: fedtypesv1a1.PropagationConditionType, + Type: fedcorev1a1.PropagationConditionType, Status: corev1.ConditionTrue, }, }, @@ -82,7 +81,7 @@ func TestGenericPropagationStatusUpdateChanged(t *testing.T) { ResourcesUpdated: tc.resourcesUpdated, GenerationMap: map[string]int64{"cluster1": tc.generation}, } - changed := update(propStatus, tc.generation, tc.collisionCount, tc.reason, collectedStatus) + changed := update(propStatus, tc.generation, tc.reason, collectedStatus) if tc.expectedChanged != changed { t.Fatalf("Expected changed to be %v, got %v", tc.expectedChanged, changed) } diff --git a/pkg/controllers/sync/version/manager.go b/pkg/controllers/sync/version/manager.go index 59468f7d..a531e4bf 100644 --- a/pkg/controllers/sync/version/manager.go +++ b/pkg/controllers/sync/version/manager.go @@ -23,8 +23,6 @@ package version import ( "context" - "fmt" - "strings" "sync" "time" @@ -32,7 +30,6 @@ import ( apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" pkgruntime "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/util/sets" "k8s.io/apimachinery/pkg/util/wait" @@ -49,7 +46,7 @@ import ( // implement to allow versions to be tracked by the VersionManager. type VersionedResource interface { FederatedName() common.QualifiedName - Object() *unstructured.Unstructured + Object() fedcorev1a1.GenericFederatedObject TemplateVersion() (string, error) OverrideVersion() (string, error) } @@ -57,10 +54,6 @@ type VersionedResource interface { type VersionManager struct { sync.RWMutex - targetKind string - - federatedKind string - // Namespace to source propagated versions from namespace string @@ -70,6 +63,7 @@ type VersionManager struct { versions map[string]runtimeclient.Object + // TODO: consider switching to a fedcorev1a1client.Interface or fedcorev1a1client.Interface or dynamic.Interface client generic.Client logger klog.Logger @@ -79,16 +73,15 @@ func NewVersionManager( logger klog.Logger, client generic.Client, namespaced bool, - federatedKind, targetKind, namespace string, + namespace string, ) *VersionManager { + adapter := NewVersionAdapter(namespaced) v := &VersionManager{ - logger: logger.WithValues("origin", "version-manager"), - targetKind: targetKind, - federatedKind: federatedKind, - namespace: namespace, - adapter: NewVersionAdapter(namespaced), - versions: make(map[string]runtimeclient.Object), - client: client, + logger: logger.WithValues("origin", "version-manager", "type-name", adapter.TypeName()), + namespace: namespace, + adapter: adapter, + versions: make(map[string]runtimeclient.Object), + client: client, } return v @@ -150,8 +143,10 @@ func (m *VersionManager) Get(resource VersionedResource) (map[string]string, err // Update ensures that the propagated version for the given versioned // resource is recorded. -func (m *VersionManager) Update(resource VersionedResource, - selectedClusters []string, versionMap map[string]string, +func (m *VersionManager) Update( + resource VersionedResource, + selectedClusters []string, + versionMap map[string]string, ) error { templateVersion, err := resource.TemplateVersion() if err != nil { @@ -189,13 +184,13 @@ func (m *VersionManager) Update(resource VersionedResource, if oldStatus != nil && util.PropagatedVersionStatusEquivalent(oldStatus, status) { m.Unlock() - m.logger.WithValues("type-name", m.adapter.TypeName(), "version-qualified-name", qualifiedName). + m.logger.WithValues("version-qualified-name", qualifiedName). V(4).Info("No update necessary") return nil } if obj == nil { - ownerReference := ownerReferenceForUnstructured(resource.Object()) + ownerReference := ownerReferenceForFederatedObject(resource.Object()) obj = m.adapter.NewVersion(qualifiedName, ownerReference, status) m.versions[key] = obj } else { @@ -234,8 +229,7 @@ func (m *VersionManager) list(stopChan <-chan struct{}) (runtimeclient.ObjectLis versionList = m.adapter.NewListObject() err := m.client.List(context.TODO(), versionList, m.namespace) if err != nil { - m.logger.WithValues("federated-kind", m.federatedKind). - Error(err, "Failed to list propagated versions for federatedKind") + m.logger.Error(err, "Failed to list propagated versions") // Do not return the error to allow the operation to be retried. return false, nil } @@ -251,7 +245,6 @@ func (m *VersionManager) list(stopChan <-chan struct{}) (runtimeclient.ObjectLis // version manager should not be used in advance of HasSynced // returning true, locking is assumed to be unnecessary. func (m *VersionManager) load(versionList runtimeclient.ObjectList, stopChan <-chan struct{}) bool { - typePrefix := PropagatedVersionPrefix(m.targetKind) objs, err := meta.ExtractList(versionList) if err != nil { return false @@ -265,24 +258,19 @@ func (m *VersionManager) load(versionList runtimeclient.ObjectList, stopChan <-c } qualifiedName := common.NewQualifiedName(obj) - // Ignore propagated version for other types - if strings.HasPrefix(qualifiedName.Name, typePrefix) { - m.versions[qualifiedName.String()] = obj.(runtimeclient.Object) - } + m.versions[qualifiedName.String()] = obj.(runtimeclient.Object) } m.Lock() m.hasSynced = true m.Unlock() - m.logger.WithValues("federated-kind", m.federatedKind). - V(4).Info("Version manager for federatedKind synced") + m.logger.V(4).Info("Version manager synced") return true } // versionQualifiedName derives the qualified name of a version -// resource from the qualified name of a template or target resource. +// resource from the qualified name of a federated object. func (m *VersionManager) versionQualifiedName(qualifiedName common.QualifiedName) common.QualifiedName { - versionName := PropagatedVersionName(m.targetKind, qualifiedName.Name) - return common.QualifiedName{Name: versionName, Namespace: qualifiedName.Namespace} + return qualifiedName } // writeVersion serializes the current state of the named propagated @@ -296,7 +284,7 @@ func (m *VersionManager) versionQualifiedName(qualifiedName common.QualifiedName func (m *VersionManager) writeVersion(obj pkgruntime.Object, qualifiedName common.QualifiedName) error { key := qualifiedName.String() adapterType := m.adapter.TypeName() - keyedLogger := m.logger.WithValues("type-name", adapterType, "version-qualified-name", key) + keyedLogger := m.logger.WithValues("version-qualified-name", key) resourceVersion, err := getResourceVersion(obj) if err != nil { @@ -409,7 +397,7 @@ func (m *VersionManager) writeVersion(obj pkgruntime.Object, qualifiedName commo } func (m *VersionManager) getResourceVersionFromAPI(qualifiedName common.QualifiedName) (string, error) { - m.logger.WithValues("federated-kind", m.federatedKind, "version-qualified-name", qualifiedName). + m.logger.WithValues("version-qualified-name", qualifiedName). V(2).Info("Retrieving resourceVersion from the API") obj := m.adapter.NewObject() err := m.client.Get(context.TODO(), obj, qualifiedName.Namespace, qualifiedName.Name) @@ -436,7 +424,7 @@ func setResourceVersion(obj pkgruntime.Object, resourceVersion string) error { return nil } -func ownerReferenceForUnstructured(obj *unstructured.Unstructured) metav1.OwnerReference { +func ownerReferenceForFederatedObject(obj fedcorev1a1.GenericFederatedObject) metav1.OwnerReference { gvk := obj.GetObjectKind().GroupVersionKind() return metav1.OwnerReference{ APIVersion: gvk.GroupVersion().String(), @@ -446,8 +434,10 @@ func ownerReferenceForUnstructured(obj *unstructured.Unstructured) metav1.OwnerR } } -func updateClusterVersions(oldVersions []fedcorev1a1.ClusterObjectVersion, - newVersions map[string]string, selectedClusters []string, +func updateClusterVersions( + oldVersions []fedcorev1a1.ClusterObjectVersion, + newVersions map[string]string, + selectedClusters []string, ) []fedcorev1a1.ClusterObjectVersion { // Retain versions for selected clusters that were not changed selectedClusterSet := sets.NewString(selectedClusters...) @@ -478,11 +468,3 @@ func VersionMapToClusterVersions(versionMap map[string]string) []fedcorev1a1.Clu util.SortClusterVersions(clusterVersions) return clusterVersions } - -func PropagatedVersionName(kind, resourceName string) string { - return fmt.Sprintf("%s%s", PropagatedVersionPrefix(kind), resourceName) -} - -func PropagatedVersionPrefix(kind string) string { - return fmt.Sprintf("%s-", strings.ToLower(kind)) -} diff --git a/pkg/controllers/util/rolloutplan.go b/pkg/controllers/util/rolloutplan.go deleted file mode 100644 index e6bd057f..00000000 --- a/pkg/controllers/util/rolloutplan.go +++ /dev/null @@ -1,868 +0,0 @@ -//go:build exclude -/* -Copyright 2023 The KubeAdmiral Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package util - -import ( - "fmt" - "sort" - "strconv" - "strings" - - "github.com/pkg/errors" - "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" - intstrutil "k8s.io/apimachinery/pkg/util/intstr" - "k8s.io/klog/v2" - - fedcorev1a1 "github.com/kubewharf/kubeadmiral/pkg/apis/core/v1alpha1" - fedtypesv1a1 "github.com/kubewharf/kubeadmiral/pkg/apis/types/v1alpha1" - "github.com/kubewharf/kubeadmiral/pkg/controllers/common" - utilunstructured "github.com/kubewharf/kubeadmiral/pkg/controllers/util/unstructured" -) - -const ( - ReplicaPath = "/spec/replicas" - MaxSurgePath = "/spec/strategy/rollingUpdate/maxSurge" - MaxUnavailablePath = "/spec/strategy/rollingUpdate/maxUnavailable" - Nil = "nil" -) - -var ( - MaxSurgePathSlice = []string{ - common.SpecField, - common.StrategyField, - common.RollingUpdateField, - common.MaxSurgeField, - } - MaxUnavailablePathSlice = []string{ - common.SpecField, - common.StrategyField, - common.RollingUpdateField, - common.MaxUnavailableField, - } -) - -type RolloutPlan struct { - Replicas *int32 - MaxSurge *int32 - MaxUnavailable *int32 - OnlyPatchReplicas bool -} - -func (p RolloutPlan) String() string { - r, s, u := Nil, Nil, Nil - if p.Replicas != nil { - r = fmt.Sprintf("%d", *p.Replicas) - } - if p.MaxSurge != nil { - s = fmt.Sprintf("%d", *p.MaxSurge) - } - if p.MaxUnavailable != nil { - u = fmt.Sprintf("%d", *p.MaxUnavailable) - } - return fmt.Sprintf("%s,%s,%s,%t", r, s, u, p.OnlyPatchReplicas) -} - -func (p RolloutPlan) toOverrides() fedtypesv1a1.OverridePatches { - overrides := fedtypesv1a1.OverridePatches{} - if p.Replicas != nil { - overrides = append(overrides, fedtypesv1a1.OverridePatch{Path: ReplicaPath, Value: *p.Replicas}) - } - if p.MaxSurge != nil { - overrides = append(overrides, fedtypesv1a1.OverridePatch{Path: MaxSurgePath, Value: *p.MaxSurge}) - } - if p.MaxUnavailable != nil { - overrides = append(overrides, fedtypesv1a1.OverridePatch{Path: MaxUnavailablePath, Value: *p.MaxUnavailable}) - } - return overrides -} - -func (p *RolloutPlan) correctFencepost(t *TargetInfo, defaultIsSurge bool) { - completed := t.UpdateCompleted() - isSurge := t.IsSurge() - flip := t.Flip(defaultIsSurge) - - if completed && !flip { - // If the new replica set is saturated, set maxSurge & maxUnavailable to the final value. - // If there are unavailable instances in the new replica set, they will be part of maxUnavailable - p.MaxSurge = nil - p.MaxUnavailable = nil - } else if *p.MaxSurge == 0 && *p.MaxUnavailable == 0 { - // Like deployment controller, we set one of them to one if both maxSurge & maxUnavailable is zero - var one int32 = 1 - if isSurge { - p.MaxSurge = &one - } else { - p.MaxUnavailable = &one - } - } -} - -type RolloutPlans map[string]*RolloutPlan - -func (r RolloutPlans) String() string { - var strs []string - for k, v := range r { - strs = append(strs, fmt.Sprintf("%s:%v", k, v)) - } - return strings.Join(strs, "; ") -} - -func (r RolloutPlans) GetRolloutOverrides(clusterName string) fedtypesv1a1.OverridePatches { - p, ok := r[clusterName] - if !ok { - return fedtypesv1a1.OverridePatches{} - } - return p.toOverrides() -} - -type Targets []*TargetInfo - -func (s Targets) CurrentReplicas() int32 { - var currentReplicas int32 - for _, t := range s { - currentReplicas += t.Status.Replicas - } - return currentReplicas -} - -func (s Targets) DesiredReplicas() int32 { - var desiredReplicas int32 - for _, t := range s { - desiredReplicas += t.DesiredReplicas - } - return desiredReplicas -} - -func (s Targets) AvailableReplicas() int32 { - var totalAvailable int32 - for _, t := range s { - totalAvailable += t.Status.AvailableReplicas - } - return totalAvailable -} - -func (s Targets) ActualReplicas() int32 { - var totalActual int32 - for _, t := range s { - totalActual += t.Status.ActualReplicas - } - return totalActual -} - -type TargetStatus struct { - Replicas int32 // dp.Spec.Replicas - ActualReplicas int32 // dp.Status.Replicas - AvailableReplicas int32 // dp.Status.AvailableReplicas - UpdatedReplicas int32 // latestreplicaset.kubeadmiral.io/replicas if it's up-to-date, else 0 - UpdatedAvailableReplicas int32 // latestreplicaset.kubeadmiral.io/available-replicas if it's up-to-date, else 0 - CurrentNewReplicas int32 // the replicas of new replicaset which belong to current deployment - CurrentNewAvailableReplicas int32 // the available replicas of new replicaset which belong to current deployment - Updated bool // whether pod template is up to date in current dp with which in fedDp - MaxSurge int32 // maxSurge in current dp - MaxUnavailable int32 // maxUnavailable in current dp -} - -type TargetInfo struct { - ClusterName string - Status TargetStatus - DesiredReplicas int32 -} - -func (t *TargetInfo) String() string { - return fmt.Sprintf("%s:%d->%d,%d/%d,%d/%d,%d/%d,%d,%d,%t", t.ClusterName, t.Status.Replicas, t.DesiredReplicas, - t.Status.UpdatedAvailableReplicas, t.Status.UpdatedReplicas, - t.Status.CurrentNewAvailableReplicas, t.Status.CurrentNewReplicas, - t.Status.AvailableReplicas, t.Status.ActualReplicas, - t.Status.MaxSurge, t.Status.MaxUnavailable, t.Status.Updated) -} - -func (t *TargetInfo) MaxSurge(maxSurge, leastSurge int32) (int32, int32) { - res := Int32Min(maxSurge+leastSurge, t.ReplicasToUpdate()) - if res < 0 { - res = 0 - } - more := res - leastSurge - // impossible in normal cases - // normalize to zero to get a more strict plan, try the best to correct the unexpected situation - if more < 0 { - more = 0 - } - if maxSurge < 0 && leastSurge > t.Status.MaxSurge && res > t.Status.MaxSurge { - res = t.Status.MaxSurge - } - return res, more -} - -func (t *TargetInfo) MaxUnavailable(maxUnavailable, leastUnavailable int32) (int32, int32) { - res := Int32Min(maxUnavailable+leastUnavailable, t.ReplicasToUpdatedAvailable()) - if res < 0 { - res = 0 - } - more := res - leastUnavailable - // impossible in normal cases - // normalize to zero to get a more strict plan, try the best to correct the unexpected situation - if more < 0 { - more = 0 - } - if maxUnavailable < 0 && leastUnavailable > t.Status.MaxUnavailable && res > t.Status.MaxUnavailable { - res = t.Status.MaxUnavailable - } - return res, more -} - -func (t *TargetInfo) MaxScaleOut(maxScaleOut, leastSurge int32) (int32, int32) { - res := Int32Min(maxScaleOut+leastSurge, t.DesiredReplicas-t.Status.Replicas) - if res < 0 { - res = 0 - } - more := res - leastSurge - if more < 0 { - more = 0 - } - return res, more -} - -func (t *TargetInfo) MaxScaleIn(maxScaleIn, leastUnavailable int32) (int32, int32) { - res := Int32Min(maxScaleIn+leastUnavailable, t.Status.Replicas-t.DesiredReplicas) - // impossible - if res > t.Status.Replicas { - res = t.Status.Replicas - } - if res < 0 { - res = 0 - } - more := res - leastUnavailable - if more < 0 { - more = 0 - } - return res, more -} - -func (t *TargetInfo) LeastSurge() int32 { - res := t.Status.ActualReplicas - t.Status.Replicas - if res < 0 { - res = 0 - } - if !t.DuringUpdating() { - return res - } - return Int32Max(res, Int32Min(t.Status.MaxSurge, res+t.ReplicasToUpdateCurrently())) -} - -func (t *TargetInfo) LeastUnavailable() int32 { - res := t.Status.Replicas - t.Status.AvailableReplicas - if res < 0 { - res = 0 - } - if !t.DuringUpdating() { - return res - } - return Int32Max(res, Int32Min(t.Status.MaxUnavailable, t.ReplicasToUpdatedAvailableCurrently())) -} - -func (t *TargetInfo) ReplicasToUpdate() int32 { - res := t.Status.Replicas - t.Status.UpdatedReplicas - if res < 0 { - res = 0 - } - return res -} - -func (t *TargetInfo) ReplicasToUpdatedAvailable() int32 { - res := t.Status.Replicas - t.Status.UpdatedAvailableReplicas - if res < 0 { - res = 0 - } - return res -} - -func (t *TargetInfo) ReplicasToUpdateCurrently() int32 { - res := t.Status.Replicas - t.Status.CurrentNewReplicas - if res < 0 { - res = 0 - } - return res -} - -func (t *TargetInfo) ReplicasToUpdatedAvailableCurrently() int32 { - res := t.Status.Replicas - t.Status.CurrentNewAvailableReplicas - if res < 0 { - res = 0 - } - return res -} - -func (t *TargetInfo) DuringUpdating() bool { - // todo: only return t.Status.CurrentNewReplicas < t.Status.Replicas after we get the real currentNewReplicas - if t.Status.CurrentNewReplicas < t.Status.Replicas { - return true - } - if t.Status.Updated && t.ReplicasToUpdate() > 0 { - return true - } - return false -} - -func (t *TargetInfo) UpdateCompleted() bool { - return t.ReplicasToUpdate() == 0 -} - -func (t *TargetInfo) IsSurge() bool { - return t.Status.MaxSurge != 0 && t.Status.MaxUnavailable == 0 -} - -func (t *TargetInfo) Flip(defaultIsSurge bool) bool { - // a temporary fix to avoid unexpected flipping - // todo: avoiding this nasty judgment by restricting the replicas changes to be used only for scaling - return t.IsSurge() && !defaultIsSurge && t.ReplicasToUpdatedAvailable() > 0 -} - -func (t *TargetInfo) SkipPlanForUpdate(maxSurge, maxUnavailable int32) bool { - return maxSurge <= 0 && maxUnavailable <= 0 && !t.Status.Updated && !t.DuringUpdating() && t.LeastSurge() <= 0 && - t.LeastUnavailable() <= 0 -} - -func (t *TargetInfo) SkipPlanForUpdateForThoseToScaleIn(maxSurge, maxUnavailable, leastUnavailable int32) bool { - if maxSurge <= 0 && maxUnavailable <= 0 && !t.Status.Updated && !t.DuringUpdating() { - if leastUnavailable > 0 { - return false - } - leastSurge := t.LeastSurge() - if t.DesiredReplicas < t.Status.Replicas { - leastSurge = 0 - } - if leastSurge > 0 { - return false - } - return true - } - return false -} - -func (t *TargetInfo) SkipPlanForScaleIn(maxUnavailable int32) bool { - return maxUnavailable <= 0 && t.LeastUnavailable() <= 0 -} - -func (t *TargetInfo) SkipPlanForScaleOut(maxSurge int32) bool { - return maxSurge <= 0 && t.LeastSurge() <= 0 -} - -func unstructuredObjToTargetInfo(clusterName string, unstructuredObj *unstructured.Unstructured, desiredReplicas int32, - desiredRevision string, typeConfig *fedcorev1a1.FederatedTypeConfig, -) (*TargetInfo, error) { - if unstructuredObj == nil { - return &TargetInfo{ - ClusterName: clusterName, - DesiredReplicas: desiredReplicas, - }, nil - } - - replicas, err := utilunstructured.GetInt64FromPath(unstructuredObj, typeConfig.Spec.PathDefinition.ReplicasSpec, nil) - if err != nil || replicas == nil { - return nil, errors.Errorf("failed to retrieve replicas, err: %v", err) - } - maxSurge, maxUnavailable, err := RetrieveFencepost( - unstructuredObj, - MaxSurgePathSlice, - MaxUnavailablePathSlice, - int32(*replicas), - ) - if err != nil { - return nil, errors.Wrap(err, "failed to retrieve fencepost") - } - revision, ok := unstructuredObj.GetAnnotations()[common.CurrentRevisionAnnotation] - if !ok { - return nil, errors.Errorf("failed to retrieve annotation %s", common.CurrentRevisionAnnotation) - } - // consider it has been updated as long as the template is updated. We don't wait for the refresh of - // latestreplicaset annotations since the latency due to asynchronous updates may bring some problems - updated := revision == desiredRevision - currentNewReplicas, currentNewAvailableReplicas, err := retrieveNewReplicaSetInfo(unstructuredObj) - if err != nil { - return nil, errors.Wrap(err, "failed to retrieve new replicaSet info") - } - - updatedReplicas, updatedAvailableReplicas := currentNewReplicas, currentNewAvailableReplicas - if !updated { - updatedReplicas, updatedAvailableReplicas = 0, 0 - } - - actualReplicasOption, err := utilunstructured.GetInt64FromPath( - unstructuredObj, - typeConfig.Spec.PathDefinition.ReplicasSpec, - nil, - ) - if err != nil { - return nil, errors.Wrap(err, "failed to retrieve actual replicas") - } - var actualReplicas int32 - if actualReplicasOption != nil { - actualReplicas = int32(*actualReplicasOption) - } - - availableReplicasOption, err := utilunstructured.GetInt64FromPath( - unstructuredObj, - typeConfig.Spec.PathDefinition.AvailableReplicasStatus, - nil, - ) - if err != nil { - return nil, errors.Wrap(err, "failed to retrieve actual available replicas") - } - var availableReplicas int32 - if availableReplicasOption != nil { - availableReplicas = int32(*availableReplicasOption) - } - - t := &TargetInfo{ - ClusterName: clusterName, - Status: TargetStatus{ - Replicas: int32(*replicas), - ActualReplicas: actualReplicas, - AvailableReplicas: availableReplicas, - UpdatedReplicas: updatedReplicas, - UpdatedAvailableReplicas: updatedAvailableReplicas, - CurrentNewReplicas: currentNewReplicas, - CurrentNewAvailableReplicas: currentNewAvailableReplicas, - Updated: updated, - MaxSurge: maxSurge, - MaxUnavailable: maxUnavailable, - }, - DesiredReplicas: desiredReplicas, - } - return t, nil -} - -type RolloutPlanner struct { - typeConfig *fedcorev1a1.FederatedTypeConfig - Key string - Targets Targets - MaxSurge int32 - MaxUnavailable int32 - Replicas int32 - Revision string -} - -func NewRolloutPlanner( - key string, - typeConfig *fedcorev1a1.FederatedTypeConfig, - federatedResource *unstructured.Unstructured, - replicas int32, -) (*RolloutPlanner, error) { - pathPrefix := []string{common.SpecField, common.TemplateField} - maxSurgePath := append(pathPrefix, MaxSurgePathSlice...) - maxUnavailablePath := append(pathPrefix, MaxUnavailablePathSlice...) - maxSurge, maxUnavailable, err := RetrieveFencepost(federatedResource, maxSurgePath, maxUnavailablePath, replicas) - if err != nil { - return nil, errors.Wrap(err, "failed to retrieve maxSurge or maxUnavailable from federated resource") - } - desiredRevision, ok := federatedResource.GetAnnotations()[common.CurrentRevisionAnnotation] - if !ok { - return nil, errors.Errorf( - "failed to retrieve annotation %s from federated resource", - common.CurrentRevisionAnnotation, - ) - } - return &RolloutPlanner{ - typeConfig: typeConfig, - Key: key, - MaxSurge: maxSurge, - MaxUnavailable: maxUnavailable, - Replicas: replicas, - Revision: desiredRevision, - }, nil -} - -func (p *RolloutPlanner) RegisterTarget( - clusterName string, - targetObj *unstructured.Unstructured, - desiredReplicas int32, -) error { - t, err := unstructuredObjToTargetInfo(clusterName, targetObj, desiredReplicas, p.Revision, p.typeConfig) - if err != nil { - return err - } - p.Targets = append(p.Targets, t) - return nil -} - -func (p *RolloutPlanner) IsScalingEvent() bool { - _, targetsToScaleOut, targetsToScaleIn := sortTargets(p.Targets) - // create / scale out / scale in - if len(targetsToScaleOut) != 0 && len(targetsToScaleIn) != 0 { - return false - } - if len(targetsToScaleOut) == 0 && len(targetsToScaleIn) == 0 { - return false - } - for _, t := range p.Targets { - if !t.UpdateCompleted() { - return false - } - if t.Flip(p.IsSurge()) { - return false - } - } - return true -} - -func (p *RolloutPlanner) PlanScale() RolloutPlans { - plans := make(map[string]*RolloutPlan) - for _, t := range p.Targets { - plans[t.ClusterName] = &RolloutPlan{} - } - return plans -} - -func (p *RolloutPlanner) String() string { - var ts []string - for _, t := range p.Targets { - ts = append(ts, fmt.Sprintf("%v", t)) - } - return fmt.Sprintf("%s[%d,%d,%d,%s]: %v", - p.Key, p.Replicas, p.MaxSurge, p.MaxUnavailable, p.Revision, strings.Join(ts, "; ")) -} - -func (p *RolloutPlanner) RemainingMaxSurge() int32 { - // maxSurge := p.Replicas + p.MaxSurge - p.Targets.ActualReplicas() - // maxSurge := p.MaxSurge - (p.Targets.ActualReplicas() - p.Replicas) - var replicas, occupied int32 - for _, t := range p.Targets { - replicas += t.Status.Replicas - occupied += t.LeastSurge() - } - return p.MaxSurge - (replicas - p.Replicas) - occupied -} - -func (p *RolloutPlanner) RemainingMaxUnavailable() int32 { - // maxUnavailable := p.Targets.AvailableReplicas() - (p.Replicas - p.MaxUnavailable) - // maxUnavailable := p.MaxUnavailable - (p.Replicas - p.Targets.AvailableReplicas()) - var replicas, occupied int32 - for _, t := range p.Targets { - replicas += t.Status.Replicas - occupied += t.LeastUnavailable() - } - return p.MaxUnavailable - (p.Replicas - replicas) - occupied -} - -func (p *RolloutPlanner) IsSurge() bool { - return p.MaxSurge != 0 && p.MaxUnavailable == 0 -} - -// ComputeRolloutPlans compute maxUnavailable, maxSurge, replicas during rollout process. It returns a map that -// contains all the targets which are planned according to current status. Nil in a plan means the corresponding field -// won't be overridden by the rollout plan and should be set with the original value. If there's no plan for a target, -// it means "don't rollout it, it should wait for it's turn". -func (p *RolloutPlanner) Plan() RolloutPlans { - targetsToUpdate, targetsToScaleOut, targetsToScaleIn := sortTargets(p.Targets) - plans := make(map[string]*RolloutPlan) - - if p.IsScalingEvent() { - return p.PlanScale() - } - - // the remaining maxSurge & maxUnavailable that can be dispatched to deployments. If there are clusters that are - // not ready, or that we failed to get deployment from, the maxSurge/maxUnavailble will be increased/decreased - maxSurge, maxUnavailable := p.RemainingMaxSurge(), p.RemainingMaxUnavailable() - - // execution sequence (try to upgrade before scale out and scale in before upgrade): - // 1. upgrade targets waiting to be scaled out - // 2. scale in targets waiting to be scaled in - // 3. upgrade targets that only need to be upgraded - // 4. scale out targets waiting to be scaled out - // 5. upgrade targets waiting to be scaled in - for _, t := range targetsToScaleOut { - if t.SkipPlanForUpdate(maxSurge, maxUnavailable) { - continue - } - s, sm := t.MaxSurge(maxSurge, t.LeastSurge()) - u, um := t.MaxUnavailable(maxUnavailable, t.LeastUnavailable()) - maxSurge -= sm - maxUnavailable -= um - r := t.Status.Replicas - plan := &RolloutPlan{Replicas: &r} - plan.MaxSurge = &s - plan.MaxUnavailable = &u - plan.correctFencepost(t, p.IsSurge()) - plans[t.ClusterName] = plan - } - - for _, t := range targetsToScaleIn { - if t.SkipPlanForScaleIn(maxUnavailable) { - continue - } - // we tend to scale in those that are already unavailable - leastUnavailable := t.LeastUnavailable() - if t.DuringUpdating() { - // if it' during updating (for example, the maxUnavailable is enough for scale in and updating coming next, - // so we set the replica and maxUnavailable; but a fed weight adjusting followed so we have to scale in again - // even though it's being updated), scaling will be performed proportionally and may not cover the - // unavailable instances as expected. - leastUnavailable = 0 - } - scale, more := t.MaxScaleIn(maxUnavailable, leastUnavailable) - maxUnavailable -= more - plan := &RolloutPlan{OnlyPatchReplicas: true} - r := t.Status.Replicas - scale - plan.Replicas = &r - plans[t.ClusterName] = plan - } - - for _, t := range targetsToUpdate { - if t.SkipPlanForUpdate(maxSurge, maxUnavailable) { - continue - } - s, sm := t.MaxSurge(maxSurge, t.LeastSurge()) - u, um := t.MaxUnavailable(maxUnavailable, t.LeastUnavailable()) - maxSurge -= sm - maxUnavailable -= um - plan := &RolloutPlan{} - plan.MaxSurge = &s - plan.MaxUnavailable = &u - plan.correctFencepost(t, p.IsSurge()) - plans[t.ClusterName] = plan - } - - for _, t := range targetsToScaleOut { - if t.SkipPlanForScaleOut(maxSurge) { - continue - } - // make sure new rs exists to avoid too much unnecessary work - if !t.Status.Updated && t.Status.Replicas != 0 { - continue - } - leastSurge := t.LeastSurge() - if t.DuringUpdating() { - leastSurge = 0 - } - scale, more := t.MaxScaleOut(maxSurge, leastSurge) - maxSurge -= more - plan, ok := plans[t.ClusterName] - if !ok || plan == nil { - plan = &RolloutPlan{} - } - r := t.Status.Replicas + scale - plan.Replicas = &r - plans[t.ClusterName] = plan - } - - for _, t := range targetsToScaleIn { - plan, ok := plans[t.ClusterName] - if !ok || plan == nil { - r := t.Status.Replicas - plan = &RolloutPlan{Replicas: &r} - } - // we have already scale in some unavailable instances in the second step, exclude them - leastUnavailable := t.LeastUnavailable() - if !t.DuringUpdating() { - leastUnavailable -= t.Status.Replicas - *plan.Replicas - if leastUnavailable < 0 { - leastUnavailable = 0 - } - } - if t.SkipPlanForUpdateForThoseToScaleIn(maxSurge, maxUnavailable, leastUnavailable) { - continue - } - - plan.OnlyPatchReplicas = false - s, sm := t.MaxSurge(maxSurge, t.LeastSurge()) - u, um := t.MaxUnavailable(maxUnavailable, leastUnavailable) - maxSurge -= sm - maxUnavailable -= um - plan.MaxSurge = &s - plan.MaxUnavailable = &u - plan.correctFencepost(t, p.IsSurge()) - plans[t.ClusterName] = plan - } - if err := validatePlans(p, plans); err != nil { - klog.Errorf("Failed to generate rollout plan for %s: %v. Current status: %s", p.Key, err, p) - return RolloutPlans{} - } - return plans -} - -func sortTargets(targets []*TargetInfo) ([]*TargetInfo, []*TargetInfo, []*TargetInfo) { - // sort the list to first update the targets that are already in update process - sort.Slice(targets, func(i, j int) bool { - return targets[i].ClusterName < targets[j].ClusterName - }) - var toUpdate, toScaleOut, toScaleIn []*TargetInfo - for _, t := range targets { - change := t.DesiredReplicas - t.Status.Replicas - switch { - case change < 0: - toScaleIn = append(toScaleIn, t) - case change > 0: - toScaleOut = append(toScaleOut, t) - default: - toUpdate = append(toUpdate, t) - } - } - return toUpdate, toScaleOut, toScaleIn -} - -func Int32Min(a, b int32) int32 { - if b < a { - return b - } - return a -} - -func Int32Max(a, b int32) int32 { - if b > a { - return b - } - return a -} - -func resolveFenceposts(maxSurge, maxUnavailable *intstrutil.IntOrString, desired int32) (int32, int32, error) { - surge, err := intstrutil.GetValueFromIntOrPercent( - intstrutil.ValueOrDefault(maxSurge, intstrutil.FromInt(0)), - int(desired), - true, - ) - if err != nil { - return 0, 0, err - } - unavailable, err := intstrutil.GetValueFromIntOrPercent( - intstrutil.ValueOrDefault(maxUnavailable, intstrutil.FromInt(0)), - int(desired), - false, - ) - if err != nil { - return 0, 0, err - } - - if surge == 0 && unavailable == 0 { - // Validation should never allow the user to explicitly use zero values for both maxSurge - // maxUnavailable. Due to rounding down maxUnavailable though, it may resolve to zero. - // If both fenceposts resolve to zero, then we should set maxUnavailable to 1 on the - // theory that surge might not work due to quota. - unavailable = 1 - } - - return int32(surge), int32(unavailable), nil -} - -func RetrieveFencepost(unstructuredObj *unstructured.Unstructured, maxSurgePath []string, maxUnavailablePath []string, - replicas int32, -) (int32, int32, error) { - var maxSurge, maxUnavailable *intstrutil.IntOrString - if ms, ok, err := unstructured.NestedString(unstructuredObj.Object, maxSurgePath...); ok && err == nil { - maxSurge = &intstrutil.IntOrString{Type: intstrutil.String, StrVal: ms} - } else { - if ms, ok, err2 := unstructured.NestedInt64(unstructuredObj.Object, maxSurgePath...); ok && err2 == nil { - maxSurge = &intstrutil.IntOrString{Type: intstrutil.Int, IntVal: int32(ms)} - } else { - klog.V(4).Infof("Failed to retrieve maxSurge from %s/%s: %v, %v", - unstructuredObj.GetNamespace(), unstructuredObj.GetName(), err, err2) - } - } - if mu, ok, err := unstructured.NestedString(unstructuredObj.Object, maxUnavailablePath...); ok && err == nil { - maxUnavailable = &intstrutil.IntOrString{Type: intstrutil.String, StrVal: mu} - } else { - if mu, ok, err2 := unstructured.NestedInt64(unstructuredObj.Object, maxUnavailablePath...); ok && err2 == nil { - maxUnavailable = &intstrutil.IntOrString{Type: intstrutil.Int, IntVal: int32(mu)} - } else { - klog.V(4).Infof("Failed to retrieve maxUnavailable from %s/%s: %v, %v", - unstructuredObj.GetNamespace(), unstructuredObj.GetName(), err, err2) - } - } - - ms, mu, err := resolveFenceposts(maxSurge, maxUnavailable, replicas) - if err != nil { - return 0, 0, err - } - if ms < 0 { - ms = 0 - } - if mu < 0 { - mu = 0 - } - return ms, mu, nil -} - -func retrieveNewReplicaSetInfo(unstructuredObj *unstructured.Unstructured) (int32, int32, error) { - ann, ok := unstructuredObj.GetAnnotations()[LatestReplicasetReplicasAnnotation] - if !ok || ann == "" { - return 0, 0, errors.Errorf("missing annotation %s", LatestReplicasetReplicasAnnotation) - } - replicas, err := strconv.ParseInt(ann, 10, 32) - if err != nil { - return 0, 0, err - } - ann, ok = unstructuredObj.GetAnnotations()[LatestReplicasetAvailableReplicasAnnotation] - if !ok || ann == "" { - return 0, 0, errors.Errorf( - "missing annotation %s", LatestReplicasetAvailableReplicasAnnotation) - } - availableReplicas, err := strconv.ParseInt(ann, 10, 32) - if err != nil { - return 0, 0, err - } - // todo: make sure the latestreplicaset annotations describe the current pod template of deployment - // a simple way to tell if the latestreplicaset annotations is up to date with current deployment. - lastRsName, lastRsNameExists := unstructuredObj.GetAnnotations()[common.LastReplicasetName] - rsName, rsNameExists := unstructuredObj.GetAnnotations()[LatestReplicasetNameAnnotation] - if !rsNameExists { - return 0, 0, errors.Errorf("missing annotation %s", LatestReplicasetNameAnnotation) - } - rsNameOutdated := lastRsNameExists && rsNameExists && lastRsName == rsName - if rsNameOutdated { - // paused=true may also result in this situation - replicas, availableReplicas = 0, 0 - } - return int32(replicas), int32(availableReplicas), nil -} - -func validatePlans(p *RolloutPlanner, plans RolloutPlans) error { - var planned, desired, current, maxUnavailable int32 - for _, t := range p.Targets { - desired += t.DesiredReplicas - cluster := t.ClusterName - r := t.Status.Replicas - current += r - if p, ok := plans[cluster]; ok { - if p == nil { - return errors.Errorf("invalid plan for %s: %v", cluster, p) - } - if p.Replicas != nil { - r = *p.Replicas - } else { - r = t.DesiredReplicas - } - if p.MaxUnavailable != nil { - if p.MaxSurge == nil || *p.MaxSurge != 0 || *p.MaxUnavailable != 1 { - maxUnavailable += *p.MaxUnavailable - } - } - } - planned += r - } - if p.Replicas-desired > p.MaxUnavailable { - return errors.Errorf("desired replicas deviates too much from the initial replicas, maybe some " + - "clusters are not ready") - } - l, h := desired, current - if desired > current { - l, h = current, desired - } - if l-planned > p.MaxUnavailable || planned-h > p.MaxSurge { - return errors.Errorf("invalid plan: %v", plans) - } - return nil -} diff --git a/pkg/controllers/util/rolloutplan_test.go b/pkg/controllers/util/rolloutplan_test.go deleted file mode 100644 index 12b2b0f1..00000000 --- a/pkg/controllers/util/rolloutplan_test.go +++ /dev/null @@ -1,1452 +0,0 @@ -/* -Copyright 2023 The KubeAdmiral Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package util - -import ( - "reflect" - "testing" -) - -type PlanTestSuit struct { - Name string - Targets Targets - Plans RolloutPlans - MaxSurge int32 - MaxUnavailable int32 - Replicas int32 -} - -func ptr(i int32) *int32 { - return &i -} - -func newTarget( - name string, - updated bool, - replicas, - desiredReplicas, - updatedReplicas, - updatedAvailableReplicas, - ms, - mu int32, -) *TargetInfo { - currentNewReplicas := updatedReplicas - if !updated { - currentNewReplicas = replicas - } - currentNewAvailableReplicas := currentNewReplicas - return &TargetInfo{ - name, - TargetStatus{ - replicas, - replicas, - replicas, - updatedReplicas, - updatedAvailableReplicas, - currentNewReplicas, - currentNewAvailableReplicas, - updated, - ms, - mu, - }, - desiredReplicas, - } -} - -func newTargetWithActualInfo( - name string, - updated bool, - replicas, - desiredReplicas, - updatedReplicas, - updatedAvailableReplicas, - actualReplicas, - availableReplicas, - ms, - mu int32, -) *TargetInfo { - currentNewReplicas := updatedReplicas - if !updated { - currentNewReplicas = replicas - } - currentNewAvailableReplicas := currentNewReplicas - return &TargetInfo{ - name, - TargetStatus{ - replicas, - actualReplicas, - availableReplicas, - updatedReplicas, - updatedAvailableReplicas, - currentNewReplicas, - currentNewAvailableReplicas, - updated, - ms, - mu, - }, - desiredReplicas, - } -} - -func newTargetWithAllInfo( - name string, - updated bool, - replicas, - desiredReplicas, - updatedReplicas, - updatedAvailableReplicas, - currentNewReplicas, - currentNewAvailableReplicas, - actualReplicas, - availableReplicas, - ms, - mu int32, -) *TargetInfo { - return &TargetInfo{ - name, - TargetStatus{ - replicas, - actualReplicas, - availableReplicas, - updatedReplicas, - updatedAvailableReplicas, - currentNewReplicas, - currentNewAvailableReplicas, - updated, - ms, - mu, - }, - desiredReplicas, - } -} - -func TestPlanWholeProcessWithMaxUnavailable(t *testing.T) { - var replicas, maxSurge, maxUnavailable int32 = 45, 0, 10 - var tests []PlanTestSuit - s := PlanTestSuit{Replicas: replicas, MaxSurge: maxSurge, MaxUnavailable: maxUnavailable} - s.Name = "round 1" - s.Targets = Targets{ - newTarget("f", false, 5, 5, 0, 0, 0, 0), - newTarget("d", false, 10, 10, 0, 0, 0, 0), - newTarget("c", false, 0, 5, 0, 0, 0, 0), - newTarget("a", false, 5, 15, 0, 0, 0, 0), - newTarget("b", false, 20, 10, 0, 0, 0, 0), - newTarget("e", false, 5, 0, 0, 0, 0, 0), - } - s.Plans = RolloutPlans{ - "a": {ptr(5), ptr(0), ptr(5), false}, - "c": {ptr(0), nil, nil, false}, - "b": {ptr(15), nil, nil, true}, - } - tests = append(tests, s) - - s = PlanTestSuit{Replicas: replicas, MaxSurge: maxSurge, MaxUnavailable: maxUnavailable} - s.Name = "round 2" - s.Targets = Targets{ - newTarget("f", false, 5, 5, 0, 0, 0, 0), - newTarget("d", false, 10, 10, 0, 0, 0, 0), - newTarget("c", true, 0, 5, 0, 0, 0, 10), - newTarget("a", true, 5, 15, 5, 5, 0, 5), - newTarget("b", false, 15, 10, 0, 0, 0, 0), - newTarget("e", false, 5, 0, 0, 0, 0, 0), - } - s.Plans = RolloutPlans{ - "a": {ptr(5), nil, nil, false}, - "c": {ptr(0), nil, nil, false}, - "b": {ptr(10), nil, nil, true}, - "d": {nil, ptr(5), ptr(0), false}, - } - tests = append(tests, s) - - s = PlanTestSuit{Replicas: replicas, MaxSurge: maxSurge, MaxUnavailable: maxUnavailable} - s.Name = "round 3" - s.Targets = Targets{ - newTarget("f", false, 5, 5, 0, 0, 0, 0), - newTarget("d", true, 10, 10, 5, 5, 5, 0), - newTarget("c", true, 0, 5, 0, 0, 0, 10), - newTarget("a", true, 5, 15, 5, 5, 0, 10), - newTarget("b", false, 10, 10, 0, 0, 0, 0), - newTarget("e", false, 5, 0, 0, 0, 0, 0), - } - s.Plans = RolloutPlans{ - "a": {ptr(5), nil, nil, false}, - "c": {ptr(0), nil, nil, false}, - "d": {nil, ptr(5), ptr(0), false}, - "b": {nil, ptr(5), ptr(0), false}, - } - tests = append(tests, s) - - s = PlanTestSuit{Replicas: replicas, MaxSurge: maxSurge, MaxUnavailable: maxUnavailable} - s.Name = "round 4" - s.Targets = Targets{ - newTarget("f", false, 5, 5, 0, 0, 0, 0), - newTarget("d", true, 10, 10, 10, 9, 5, 0), - newTarget("c", true, 0, 5, 0, 0, 0, 10), - newTarget("a", true, 5, 15, 5, 5, 0, 10), - newTarget("b", true, 10, 10, 5, 5, 5, 0), - newTarget("e", false, 5, 0, 0, 0, 0, 0), - } - s.Plans = RolloutPlans{ - "a": {ptr(5), nil, nil, false}, - "c": {ptr(0), nil, nil, false}, - "b": {nil, ptr(5), ptr(0), false}, - "d": {nil, ptr(1), ptr(0), false}, - "f": {nil, ptr(5), ptr(0), false}, - } - tests = append(tests, s) - - s = PlanTestSuit{Replicas: replicas, MaxSurge: maxSurge, MaxUnavailable: maxUnavailable} - s.Name = "round 5" - s.Targets = Targets{ - newTarget("f", true, 5, 5, 5, 3, 5, 0), - newTarget("d", true, 10, 10, 10, 10, 0, 10), - newTarget("c", true, 0, 5, 0, 0, 0, 10), - newTarget("a", true, 5, 15, 5, 5, 0, 10), - newTarget("b", true, 10, 10, 10, 8, 5, 0), - newTarget("e", false, 5, 0, 0, 0, 0, 0), - } - s.Plans = RolloutPlans{ - "a": {ptr(15), nil, nil, false}, - "c": {ptr(0), nil, nil, false}, - "b": {nil, ptr(1), ptr(0), false}, - "d": {nil, nil, nil, false}, - "f": {nil, ptr(1), ptr(0), false}, - } - tests = append(tests, s) - - s = PlanTestSuit{Replicas: replicas, MaxSurge: maxSurge, MaxUnavailable: maxUnavailable} - s.Name = "round 6" - s.Targets = Targets{ - newTarget("f", true, 5, 5, 5, 5, 0, 10), - newTarget("d", true, 10, 10, 10, 10, 0, 10), - newTarget("c", true, 0, 5, 0, 0, 0, 10), - newTarget("a", true, 15, 15, 15, 12, 0, 10), - newTarget("b", true, 10, 10, 10, 10, 0, 10), - newTarget("e", false, 5, 0, 0, 0, 0, 0), - } - s.Plans = RolloutPlans{ - "c": {ptr(0), nil, nil, false}, - "e": {ptr(0), ptr(0), ptr(2), false}, - "a": {nil, nil, nil, false}, - "b": {nil, nil, nil, false}, - "d": {nil, nil, nil, false}, - "f": {nil, nil, nil, false}, - } - tests = append(tests, s) - - s = PlanTestSuit{Replicas: replicas, MaxSurge: maxSurge, MaxUnavailable: maxUnavailable} - s.Name = "round 7" - s.Targets = Targets{ - newTarget("f", true, 5, 5, 5, 5, 0, 10), - newTarget("d", true, 10, 10, 10, 10, 0, 10), - newTarget("c", true, 0, 5, 0, 0, 0, 10), - newTarget("a", true, 15, 15, 15, 14, 0, 1), - newTarget("b", true, 10, 10, 10, 10, 0, 10), - } - s.Plans = RolloutPlans{ - "c": {nil, nil, nil, false}, - "a": {nil, nil, nil, false}, - "b": {nil, nil, nil, false}, - "d": {nil, nil, nil, false}, - "f": {nil, nil, nil, false}, - } - tests = append(tests, s) - - s = PlanTestSuit{Replicas: replicas, MaxSurge: maxSurge, MaxUnavailable: maxUnavailable} - s.Name = "round 8" - s.Targets = Targets{ - newTarget("f", true, 5, 5, 5, 5, 0, 10), - newTarget("d", true, 10, 10, 10, 10, 0, 10), - newTarget("c", true, 5, 5, 5, 5, 0, 10), - newTarget("a", true, 15, 15, 15, 15, 0, 1), - newTarget("b", true, 10, 10, 10, 10, 0, 10), - } - s.Plans = RolloutPlans{ - "a": {nil, nil, nil, false}, - "b": {nil, nil, nil, false}, - "c": {nil, nil, nil, false}, - "d": {nil, nil, nil, false}, - "f": {nil, nil, nil, false}, - } - tests = append(tests, s) - - for _, test := range tests { - t.Run(test.Name, func(t *testing.T) { - planner := &RolloutPlanner{Targets: test.Targets, MaxSurge: test.MaxSurge, MaxUnavailable: test.MaxUnavailable, Replicas: test.Replicas} - got := planner.Plan() - if !reflect.DeepEqual(got, test.Plans) { - t.Errorf("%s: got: %v, expected: %v", test.Name, got, test.Plans) - } - }) - } -} - -func TestPlanWholeProcessWithBoth(t *testing.T) { - var replicas, maxSurge, maxUnavailable int32 = 45, 5, 10 - var tests []PlanTestSuit - - s := PlanTestSuit{Replicas: replicas, MaxSurge: maxSurge, MaxUnavailable: maxUnavailable} - s.Name = "round 1" - s.Targets = Targets{ - newTarget("f", false, 5, 5, 0, 0, 0, 0), - newTarget("d", false, 10, 10, 0, 0, 0, 0), - newTarget("c", false, 0, 5, 0, 0, 0, 0), - newTarget("a", false, 5, 15, 0, 0, 0, 0), - newTarget("b", false, 20, 10, 0, 0, 0, 0), - newTarget("e", false, 5, 0, 0, 0, 0, 0), - } - s.Plans = RolloutPlans{ - "a": {ptr(5), ptr(5), ptr(5), false}, - "c": {ptr(0), nil, nil, false}, - "b": {ptr(15), nil, nil, true}, - } - tests = append(tests, s) - - s = PlanTestSuit{Replicas: replicas, MaxSurge: maxSurge, MaxUnavailable: maxUnavailable} - s.Name = "round 2" - s.Targets = Targets{ - newTarget("f", false, 5, 5, 0, 0, 0, 0), - newTarget("d", false, 10, 10, 0, 0, 0, 0), - newTarget("c", true, 0, 5, 0, 0, 5, 10), - newTarget("a", true, 5, 15, 5, 5, 5, 5), - newTarget("b", false, 15, 10, 0, 0, 0, 0), - newTarget("e", false, 5, 0, 0, 0, 0, 0), - } - s.Plans = RolloutPlans{ - "a": {ptr(5), nil, nil, false}, - "c": {ptr(0), nil, nil, false}, - "b": {ptr(10), nil, nil, true}, - "d": {nil, ptr(10), ptr(0), false}, - } - tests = append(tests, s) - - s = PlanTestSuit{Replicas: replicas, MaxSurge: maxSurge, MaxUnavailable: maxUnavailable} - s.Name = "round 3" - s.Targets = Targets{ - newTarget("f", false, 5, 5, 0, 0, 0, 0), - newTarget("d", true, 10, 10, 10, 8, 10, 0), - newTarget("c", true, 0, 5, 0, 0, 5, 10), - newTarget("a", true, 5, 15, 5, 5, 5, 10), - newTarget("b", false, 10, 10, 0, 0, 0, 0), - newTarget("e", false, 5, 0, 0, 0, 0, 0), - } - s.Plans = RolloutPlans{ - "a": {ptr(5), nil, nil, false}, - "c": {ptr(0), nil, nil, false}, - "d": {nil, ptr(1), ptr(0), false}, - "b": {nil, ptr(10), ptr(0), false}, - "f": {nil, ptr(5), ptr(0), false}, - } - tests = append(tests, s) - - s = PlanTestSuit{Replicas: replicas, MaxSurge: maxSurge, MaxUnavailable: maxUnavailable} - s.Name = "round 4" - s.Targets = Targets{ - newTarget("f", true, 5, 5, 5, 4, 5, 0), - newTarget("d", true, 10, 10, 10, 10, 5, 10), - newTarget("c", true, 0, 5, 0, 0, 5, 10), - newTarget("a", true, 5, 15, 5, 5, 5, 10), - newTarget("b", true, 10, 10, 10, 8, 10, 0), - newTarget("e", false, 5, 0, 0, 0, 0, 0), - } - s.Plans = RolloutPlans{ - "a": {ptr(15), nil, nil, false}, - "c": {ptr(5), nil, nil, false}, - "b": {nil, ptr(1), ptr(0), false}, - "d": {nil, nil, nil, false}, - "f": {nil, ptr(1), ptr(0), false}, - } - tests = append(tests, s) - - s = PlanTestSuit{Replicas: replicas, MaxSurge: maxSurge, MaxUnavailable: maxUnavailable} - s.Name = "round 5" - s.Targets = Targets{ - newTarget("f", true, 5, 5, 5, 5, 5, 10), - newTarget("d", true, 10, 10, 10, 10, 5, 10), - newTarget("c", true, 5, 5, 5, 2, 5, 10), - newTarget("a", true, 15, 15, 15, 15, 5, 10), - newTarget("b", true, 10, 10, 10, 10, 5, 0), - newTarget("e", false, 5, 0, 0, 0, 0, 0), - } - s.Plans = RolloutPlans{ - "e": {ptr(0), ptr(0), ptr(5), false}, - "a": {nil, nil, nil, false}, - "b": {nil, nil, nil, false}, - "c": {nil, nil, nil, false}, - "d": {nil, nil, nil, false}, - "f": {nil, nil, nil, false}, - } - tests = append(tests, s) - - s = PlanTestSuit{Replicas: replicas, MaxSurge: maxSurge, MaxUnavailable: maxUnavailable} - s.Name = "round 6" - s.Targets = Targets{ - newTarget("f", true, 5, 5, 5, 5, 5, 10), - newTarget("d", true, 10, 10, 10, 10, 5, 10), - newTarget("c", true, 5, 5, 5, 4, 5, 10), - newTarget("a", true, 15, 15, 15, 15, 5, 10), - newTarget("b", true, 10, 10, 10, 10, 5, 10), - } - s.Plans = RolloutPlans{ - "a": {nil, nil, nil, false}, - "b": {nil, nil, nil, false}, - "c": {nil, nil, nil, false}, - "d": {nil, nil, nil, false}, - "f": {nil, nil, nil, false}, - } - tests = append(tests, s) - - for _, test := range tests { - t.Run(test.Name, func(t *testing.T) { - planner := &RolloutPlanner{Targets: test.Targets, MaxSurge: maxSurge, MaxUnavailable: maxUnavailable, Replicas: 45} - got := planner.Plan() - if !reflect.DeepEqual(got, test.Plans) { - t.Errorf("%s: got: %v, expected: %v", test.Name, got, test.Plans) - } - }) - } -} - -func TestPlanWholeProcessWithSurge(t *testing.T) { - var replicas, maxSurge, maxUnavailable int32 = 45, 5, 0 - var tests []PlanTestSuit - - s := PlanTestSuit{Replicas: replicas, MaxSurge: maxSurge, MaxUnavailable: maxUnavailable} - s.Name = "round 1" - s.Targets = Targets{ - newTarget("f", false, 5, 5, 0, 0, 0, 0), - newTarget("d", false, 10, 10, 0, 0, 0, 0), - newTarget("c", false, 0, 5, 0, 0, 0, 0), - newTarget("a", false, 5, 15, 0, 0, 0, 0), - newTarget("b", false, 20, 10, 0, 0, 0, 0), - newTarget("e", false, 5, 0, 0, 0, 0, 0), - } - s.Plans = RolloutPlans{ - "a": {ptr(5), ptr(5), ptr(0), false}, - } - tests = append(tests, s) - - s = PlanTestSuit{Replicas: replicas, MaxSurge: maxSurge, MaxUnavailable: maxUnavailable} - s.Name = "round 2" - s.Targets = Targets{ - newTarget("f", false, 5, 5, 0, 0, 0, 0), - newTarget("d", false, 10, 10, 0, 0, 0, 0), - newTarget("c", false, 0, 5, 0, 0, 0, 0), - newTarget("a", true, 5, 15, 5, 4, 5, 0), - newTarget("b", false, 20, 10, 0, 0, 0, 0), - newTarget("e", false, 5, 0, 0, 0, 0, 0), - } - s.Plans = RolloutPlans{ - "a": {ptr(5), nil, nil, false}, - "c": {ptr(0), nil, nil, false}, - "d": {nil, ptr(5), ptr(0), false}, - } - tests = append(tests, s) - - s = PlanTestSuit{Replicas: replicas, MaxSurge: maxSurge, MaxUnavailable: maxUnavailable} - s.Name = "round 3" - s.Targets = Targets{ - newTarget("f", false, 5, 5, 0, 0, 0, 0), - newTarget("d", true, 10, 10, 5, 4, 5, 0), - newTarget("c", true, 0, 5, 0, 0, 5, 0), - newTarget("a", true, 5, 15, 5, 5, 5, 0), - newTarget("b", false, 20, 10, 0, 0, 0, 0), - newTarget("e", false, 5, 0, 0, 0, 0, 0), - } - s.Plans = RolloutPlans{ - "a": {ptr(5), nil, nil, false}, - "c": {ptr(0), nil, nil, false}, - "d": {nil, ptr(5), ptr(0), false}, - } - tests = append(tests, s) - - s = PlanTestSuit{Replicas: replicas, MaxSurge: maxSurge, MaxUnavailable: maxUnavailable} - s.Name = "round 4" - s.Targets = Targets{ - newTarget("f", false, 5, 5, 0, 0, 0, 0), - newTarget("d", true, 10, 10, 10, 7, 5, 0), - newTarget("c", true, 0, 5, 0, 0, 5, 0), - newTarget("a", true, 5, 15, 5, 5, 5, 0), - newTarget("b", false, 20, 10, 0, 0, 0, 0), - newTarget("e", false, 5, 0, 0, 0, 0, 0), - } - s.Plans = RolloutPlans{ - "a": {ptr(5), nil, nil, false}, - "c": {ptr(0), nil, nil, false}, - "d": {nil, nil, nil, false}, - "f": {nil, ptr(5), ptr(0), false}, - } - tests = append(tests, s) - - s = PlanTestSuit{Replicas: replicas, MaxSurge: maxSurge, MaxUnavailable: maxUnavailable} - s.Name = "round 5" - s.Targets = Targets{ - newTarget("f", true, 5, 5, 5, 2, 5, 0), - newTarget("d", true, 10, 10, 10, 10, 5, 0), - newTarget("c", true, 0, 5, 0, 0, 5, 0), - newTarget("a", true, 5, 15, 5, 5, 5, 0), - newTarget("b", false, 20, 10, 0, 0, 0, 0), - newTarget("e", false, 5, 0, 0, 0, 0, 0), - } - s.Plans = RolloutPlans{ - "a": {ptr(10), nil, nil, false}, - "c": {ptr(0), nil, nil, false}, - "d": {nil, nil, nil, false}, - "f": {nil, nil, nil, false}, - } - tests = append(tests, s) - - s = PlanTestSuit{Replicas: replicas, MaxSurge: maxSurge, MaxUnavailable: maxUnavailable} - s.Name = "round 6" - s.Targets = Targets{ - newTarget("f", true, 5, 5, 5, 5, 5, 0), - newTarget("d", true, 10, 10, 10, 10, 5, 0), - newTarget("c", true, 0, 5, 0, 0, 5, 0), - newTarget("a", true, 10, 15, 10, 8, 5, 0), - newTarget("b", false, 20, 10, 0, 0, 0, 0), - newTarget("e", false, 5, 0, 0, 0, 0, 0), - } - s.Plans = RolloutPlans{ - "a": {ptr(10), nil, nil, false}, - "c": {ptr(0), nil, nil, false}, - "b": {ptr(17), nil, nil, true}, - "d": {nil, nil, nil, false}, - "f": {nil, nil, nil, false}, - } - tests = append(tests, s) - - s = PlanTestSuit{Replicas: replicas, MaxSurge: maxSurge, MaxUnavailable: maxUnavailable} - s.Name = "round 7" - s.Targets = Targets{ - newTarget("f", true, 5, 5, 5, 5, 5, 0), - newTarget("d", true, 10, 10, 10, 10, 5, 0), - newTarget("c", true, 0, 5, 0, 0, 5, 0), - newTarget("a", true, 10, 15, 10, 10, 0, 1), - newTarget("b", false, 17, 10, 0, 0, 0, 0), - newTarget("e", false, 5, 0, 0, 0, 0, 0), - } - s.Plans = RolloutPlans{ - "a": {ptr(13), nil, nil, false}, - "c": {ptr(0), nil, nil, false}, - "b": {ptr(15), nil, nil, true}, - "d": {nil, nil, nil, false}, - "f": {nil, nil, nil, false}, - } - tests = append(tests, s) - - s = PlanTestSuit{Replicas: replicas, MaxSurge: maxSurge, MaxUnavailable: maxUnavailable} - s.Name = "round 8" - s.Targets = Targets{ - newTarget("f", true, 5, 5, 5, 5, 5, 0), - newTarget("d", true, 10, 10, 10, 10, 5, 0), - newTarget("c", true, 0, 5, 0, 0, 5, 0), - newTarget("a", true, 13, 15, 13, 13, 5, 0), - newTarget("b", false, 15, 10, 0, 0, 0, 0), - newTarget("e", false, 5, 0, 0, 0, 0, 0), - } - s.Plans = RolloutPlans{ - "a": {ptr(15), nil, nil, false}, - "c": {ptr(0), nil, nil, false}, - "b": {ptr(12), nil, nil, true}, - "d": {nil, nil, nil, false}, - "f": {nil, nil, nil, false}, - } - tests = append(tests, s) - - s = PlanTestSuit{Replicas: replicas, MaxSurge: maxSurge, MaxUnavailable: maxUnavailable} - s.Name = "round 9" - s.Targets = Targets{ - newTarget("f", true, 5, 5, 5, 5, 5, 0), - newTarget("d", true, 10, 10, 10, 10, 5, 0), - newTarget("c", true, 0, 5, 0, 0, 5, 0), - newTarget("a", true, 15, 15, 15, 15, 5, 0), - newTarget("b", false, 12, 10, 0, 0, 0, 0), - newTarget("e", false, 5, 0, 0, 0, 0, 0), - } - s.Plans = RolloutPlans{ - "c": {ptr(3), nil, nil, false}, - "b": {ptr(10), nil, nil, true}, - "a": {nil, nil, nil, false}, - "d": {nil, nil, nil, false}, - "f": {nil, nil, nil, false}, - } - tests = append(tests, s) - - s = PlanTestSuit{Replicas: replicas, MaxSurge: maxSurge, MaxUnavailable: maxUnavailable} - s.Name = "round 10" - s.Targets = Targets{ - newTarget("f", true, 5, 5, 5, 5, 5, 0), - newTarget("d", true, 10, 10, 10, 10, 5, 0), - newTarget("c", true, 3, 5, 3, 3, 5, 0), - newTarget("a", true, 15, 15, 15, 15, 5, 0), - newTarget("b", false, 10, 10, 0, 0, 0, 0), - newTarget("e", false, 5, 0, 0, 0, 0, 0), - } - s.Plans = RolloutPlans{ - "c": {ptr(3), nil, nil, false}, - "e": {ptr(2), nil, nil, true}, - "a": {nil, nil, nil, false}, - "d": {nil, nil, nil, false}, - "f": {nil, nil, nil, false}, - "b": {nil, ptr(2), ptr(0), false}, - } - tests = append(tests, s) - - s = PlanTestSuit{Replicas: replicas, MaxSurge: maxSurge, MaxUnavailable: maxUnavailable} - s.Name = "round 11" - s.Targets = Targets{ - newTarget("f", true, 5, 5, 5, 5, 5, 0), - newTarget("d", true, 10, 10, 10, 10, 5, 0), - newTarget("c", true, 3, 5, 3, 3, 5, 0), - newTarget("a", true, 15, 15, 15, 15, 5, 0), - newTarget("b", true, 10, 10, 2, 0, 2, 0), - newTarget("e", false, 2, 0, 0, 0, 0, 0), - } - s.Plans = RolloutPlans{ - "c": {ptr(3), nil, nil, false}, - "a": {nil, nil, nil, false}, - "b": {nil, ptr(5), ptr(0), false}, - "d": {nil, nil, nil, false}, - "f": {nil, nil, nil, false}, - } - tests = append(tests, s) - - s = PlanTestSuit{Replicas: replicas, MaxSurge: maxSurge, MaxUnavailable: maxUnavailable} - s.Name = "round 12" - s.Targets = Targets{ - newTarget("f", true, 5, 5, 5, 5, 5, 0), - newTarget("d", true, 10, 10, 10, 10, 5, 0), - newTarget("c", true, 3, 5, 3, 3, 5, 0), - newTarget("a", true, 15, 15, 15, 15, 5, 0), - newTarget("b", true, 10, 10, 7, 7, 5, 0), - newTarget("e", false, 2, 0, 0, 0, 0, 0), - } - s.Plans = RolloutPlans{ - "c": {ptr(5), nil, nil, false}, - "a": {nil, nil, nil, false}, - "b": {nil, ptr(3), ptr(0), false}, - "d": {nil, nil, nil, false}, - "f": {nil, nil, nil, false}, - } - tests = append(tests, s) - - for _, test := range tests { - t.Run(test.Name, func(t *testing.T) { - planner := &RolloutPlanner{Targets: test.Targets, MaxSurge: maxSurge, MaxUnavailable: maxUnavailable, Replicas: 45} - got := planner.Plan() - if !reflect.DeepEqual(got, test.Plans) { - t.Errorf("%s: got: %v, expected: %v", test.Name, got, test.Plans) - } - }) - } -} - -func TestPlanCreation(t *testing.T) { - var replicas, maxSurge, maxUnavailable int32 = 15, 10, 20 - var tests []PlanTestSuit - - s := PlanTestSuit{Replicas: replicas, MaxSurge: maxSurge, MaxUnavailable: maxUnavailable} - s.Name = "round 1" - s.Targets = Targets{ - newTarget("b", false, 0, 5, 0, 0, 0, 0), - newTarget("a", false, 0, 10, 0, 0, 0, 0), - } - s.Plans = RolloutPlans{ - "a": {Replicas: nil}, - "b": {Replicas: nil}, - } - tests = append(tests, s) - - for _, test := range tests { - t.Run(test.Name, func(t *testing.T) { - planner := &RolloutPlanner{Targets: test.Targets, MaxSurge: test.MaxSurge, MaxUnavailable: test.MaxUnavailable} - got := planner.Plan() - if !reflect.DeepEqual(got, test.Plans) { - t.Errorf("%s: got: %v, expected: %v", test.Name, got, test.Plans) - } - }) - } -} - -func TestPlanScale(t *testing.T) { - var tests []PlanTestSuit - - s := PlanTestSuit{Replicas: 4, MaxSurge: 0, MaxUnavailable: 1} - s.Name = "scale out with creation" - s.Targets = Targets{ - newTargetWithActualInfo("b", true, 4, 4, 4, 4, 4, 4, 0, 1), - newTargetWithActualInfo("c", true, 0, 2, 0, 0, 0, 0, 0, 1), - newTargetWithActualInfo("k", true, 0, 1, 0, 0, 0, 0, 0, 0), - } - s.Plans = RolloutPlans{ - "c": {nil, nil, nil, false}, - "k": {nil, nil, nil, false}, - "b": {nil, nil, nil, false}, - } - tests = append(tests, s) - - for _, test := range tests { - t.Run(test.Name, func(t *testing.T) { - planner := &RolloutPlanner{Targets: test.Targets, MaxSurge: test.MaxSurge, MaxUnavailable: test.MaxUnavailable, Replicas: test.Replicas} - got := planner.Plan() - if !reflect.DeepEqual(got, test.Plans) { - t.Errorf("%s: got: %v, expected: %v", test.Name, got, test.Plans) - } - }) - } -} - -func TestPlanEmptyTargets(t *testing.T) { - var tests []PlanTestSuit - - s := PlanTestSuit{} - s.Name = "empty targets" - s.Plans = RolloutPlans{} - tests = append(tests, s) - - for _, test := range tests { - t.Run(test.Name, func(t *testing.T) { - planner := &RolloutPlanner{MaxSurge: 0, MaxUnavailable: 25, Replicas: 100} - got := planner.Plan() - if !reflect.DeepEqual(got, test.Plans) { - t.Errorf("%s: got: %v, expected: %v", test.Name, got, test.Plans) - } - }) - } - - s = PlanTestSuit{} - s.Name = "empty maxSurge & maxUnavailable" - s.Plans = RolloutPlans{} - tests = append(tests, s) - - for _, test := range tests { - t.Run(test.Name, func(t *testing.T) { - planner := &RolloutPlanner{MaxSurge: 0, MaxUnavailable: 0} - got := planner.Plan() - if !reflect.DeepEqual(got, test.Plans) { - t.Errorf("%s: got: %v, expected: %v", test.Name, got, test.Plans) - } - }) - } -} - -func TestPlanUnexceptedCases(t *testing.T) { - var tests []PlanTestSuit - - s := PlanTestSuit{Replicas: 500, MaxSurge: 0, MaxUnavailable: 25} - s.Name = "too many unavailable" - s.Targets = Targets{ - newTargetWithActualInfo("a", false, 200, 200, 0, 0, 200, 200, 0, 1), - newTargetWithActualInfo("b", false, 300, 300, 0, 0, 300, 100, 0, 1), - } - s.Plans = RolloutPlans{ - "b": {nil, ptr(0), ptr(1), false}, - } - tests = append(tests, s) - - s = PlanTestSuit{Replicas: 700, MaxSurge: 0, MaxUnavailable: 35} - s.Name = "too many unavailable during updating" - s.Targets = Targets{ - newTargetWithActualInfo("b", true, 400, 100, 4, 4, 400, 360, 0, 35), - newTargetWithActualInfo("c", false, 300, 600, 0, 0, 300, 300, 0, 15), - } - s.Plans = RolloutPlans{ - "b": {ptr(400), ptr(0), ptr(35), false}, - } - tests = append(tests, s) - - s = PlanTestSuit{Replicas: 600, MaxSurge: 0, MaxUnavailable: 25} - s.Name = "too many unavailable due to cluster not ready" - s.Targets = Targets{ - newTargetWithActualInfo("a", false, 200, 200, 0, 0, 200, 200, 0, 1), - newTargetWithActualInfo("b", false, 300, 300, 0, 0, 300, 300, 0, 1), - } - s.Plans = RolloutPlans{} - tests = append(tests, s) - - s = PlanTestSuit{Replicas: 510, MaxSurge: 0, MaxUnavailable: 25} - s.Name = "a few unavailable due to cluster not ready" - s.Targets = Targets{ - newTargetWithActualInfo("a", false, 200, 200, 0, 0, 200, 200, 0, 1), - newTargetWithActualInfo("b", false, 300, 300, 0, 0, 300, 300, 0, 1), - } - s.Plans = RolloutPlans{ - "a": {nil, ptr(10), ptr(15), false}, - } - tests = append(tests, s) - - s = PlanTestSuit{Replicas: 500, MaxSurge: 0, MaxUnavailable: 25} - s.Name = "missing too many replicas" - s.Targets = Targets{ - newTargetWithActualInfo("a", false, 200, 200, 0, 0, 200, 200, 0, 1), - newTargetWithActualInfo("b", false, 300, 300, 0, 0, 200, 200, 0, 1), - } - s.Plans = RolloutPlans{ - "b": {nil, ptr(0), ptr(1), false}, - } - tests = append(tests, s) - - s = PlanTestSuit{Replicas: 500, MaxSurge: 0, MaxUnavailable: 25} - s.Name = "missing a few replicas" - s.Targets = Targets{ - newTargetWithActualInfo("a", false, 200, 200, 0, 0, 199, 195, 0, 1), - newTargetWithActualInfo("b", false, 300, 300, 0, 0, 298, 295, 0, 1), - } - s.Plans = RolloutPlans{ - "a": {nil, ptr(0), ptr(20), false}, - "b": {nil, ptr(0), ptr(5), false}, - } - tests = append(tests, s) - - s = PlanTestSuit{Replicas: 500, MaxSurge: 0, MaxUnavailable: 25} - s.Name = "too many replicas actually" - s.Targets = Targets{ - newTargetWithActualInfo("a", false, 200, 200, 0, 0, 300, 250, 0, 1), - newTargetWithActualInfo("b", false, 300, 300, 0, 0, 300, 300, 0, 1), - } - s.Plans = RolloutPlans{ - "a": {nil, ptr(0), ptr(25), false}, - } - tests = append(tests, s) - - s = PlanTestSuit{Replicas: 700, MaxSurge: 0, MaxUnavailable: 35} - s.Name = "too many replicas actually while available still less than spec replicas" - s.Targets = Targets{ - newTargetWithActualInfo("a", false, 594, 300, 0, 0, 598, 598, 0, 1), - newTargetWithActualInfo("b", true, 102, 400, 100, 71, 100, 71, 0, 5), - } - s.Plans = RolloutPlans{ - "b": {ptr(102), ptr(0), ptr(31), false}, - } - tests = append(tests, s) - - s = PlanTestSuit{Replicas: 500, MaxSurge: 0, MaxUnavailable: 25} - s.Name = "unavailable in status lags during updating" - s.Targets = Targets{ - newTargetWithActualInfo("a", true, 200, 200, 0, 0, 200, 200, 0, 25), - newTargetWithActualInfo("b", false, 300, 300, 0, 0, 300, 300, 0, 10), - } - s.Plans = RolloutPlans{ - "a": {nil, ptr(0), ptr(25), false}, - } - tests = append(tests, s) - - // todo: it would be better if we can scale in "a" during it's updating - s = PlanTestSuit{Replicas: 500, MaxSurge: 0, MaxUnavailable: 25} - s.Name = "unavailable in status lags during updating 1" - s.Targets = Targets{ - newTargetWithActualInfo("a", true, 200, 100, 0, 0, 200, 200, 0, 25), - newTargetWithActualInfo("b", false, 300, 400, 0, 0, 300, 300, 0, 10), - } - s.Plans = RolloutPlans{ - "a": {ptr(200), ptr(0), ptr(25), false}, - } - tests = append(tests, s) - - s = PlanTestSuit{Replicas: 500, MaxSurge: 0, MaxUnavailable: 50} - s.Name = "unavailable in status lags during updating 2" - s.Targets = Targets{ - newTargetWithAllInfo("a", true, 200, 100, 0, 0, 0, 0, 200, 200, 0, 25), - newTargetWithActualInfo("b", false, 300, 400, 0, 0, 300, 300, 0, 10), - } - s.Plans = RolloutPlans{ - "b": {ptr(300), ptr(0), ptr(25), false}, - "a": {ptr(200), ptr(0), ptr(25), false}, - } - tests = append(tests, s) - - s = PlanTestSuit{Replicas: 500, MaxSurge: 0, MaxUnavailable: 50} - s.Name = "unavailable in status lags during updating 3" - s.Targets = Targets{ - newTargetWithActualInfo("a", true, 175, 100, 25, 25, 175, 174, 0, 1), - newTargetWithActualInfo("b", true, 300, 400, 25, 25, 300, 300, 0, 25), - } - s.Plans = RolloutPlans{ - "b": {ptr(300), ptr(25), ptr(24), false}, - "a": {ptr(175), ptr(0), ptr(1), false}, - } - tests = append(tests, s) - - for _, test := range tests { - t.Run(test.Name, func(t *testing.T) { - planner := &RolloutPlanner{Targets: test.Targets, MaxSurge: test.MaxSurge, MaxUnavailable: test.MaxUnavailable, Replicas: test.Replicas} - got := planner.Plan() - if !reflect.DeepEqual(got, test.Plans) { - t.Errorf("%s: got: %v, expected: %v", test.Name, got, test.Plans) - } - }) - } -} - -func TestPlanActualCases(t *testing.T) { - var tests []PlanTestSuit - - s := PlanTestSuit{Replicas: 420, MaxSurge: 0, MaxUnavailable: 21} - s.Name = "I0721 13:28:24.347552" - s.Targets = Targets{ - newTargetWithActualInfo("b", true, 120, 120, 120, 120, 120, 120, 0, 6), - newTargetWithActualInfo("c", true, 380, 300, 380, 292, 380, 292, 0, 19), - } - s.Plans = RolloutPlans{ - "b": {nil, nil, nil, false}, - "c": {nil, nil, nil, false}, - } - tests = append(tests, s) - - s = PlanTestSuit{Replicas: 510, MaxSurge: 0, MaxUnavailable: 25} - s.Name = "I0721 13:33:21.582337" - s.Targets = Targets{ - newTargetWithActualInfo("b", true, 120, 220, 120, 120, 120, 120, 0, 6), - newTargetWithActualInfo("c", true, 290, 290, 290, 290, 290, 290, 0, 14), - } - s.Plans = RolloutPlans{ - "c": {nil, nil, nil, false}, - "b": {nil, nil, nil, false}, - } - tests = append(tests, s) - - s = PlanTestSuit{Replicas: 510, MaxSurge: 0, MaxUnavailable: 25} - s.Name = "I0721 13:33:26.799923" - s.Targets = Targets{ - newTargetWithActualInfo("b", true, 220, 220, 220, 120, 220, 120, 0, 11), - newTargetWithActualInfo("c", true, 290, 290, 290, 290, 290, 290, 0, 14), - } - s.Plans = RolloutPlans{ - "b": {nil, nil, nil, false}, - "c": {nil, nil, nil, false}, - } - tests = append(tests, s) - - s = PlanTestSuit{Replicas: 510, MaxSurge: 0, MaxUnavailable: 25} - s.Name = "I0721 14:08:04.842031" - s.Targets = Targets{ - newTargetWithActualInfo("b", false, 220, 220, 0, 0, 220, 220, 0, 11), - newTargetWithActualInfo("c", false, 290, 290, 0, 0, 290, 290, 0, 14), - } - s.Plans = RolloutPlans{ - "b": {nil, ptr(0), ptr(25), false}, - } - tests = append(tests, s) - - s = PlanTestSuit{Replicas: 510, MaxSurge: 0, MaxUnavailable: 25} - s.Name = "I0721 14:08:20.459339" - s.Targets = Targets{ - newTargetWithActualInfo("b", true, 220, 220, 43, 22, 216, 195, 0, 25), - newTargetWithActualInfo("c", false, 290, 290, 0, 0, 290, 290, 0, 14), - } - s.Plans = RolloutPlans{ - "b": {nil, ptr(0), ptr(25), false}, - } - tests = append(tests, s) - - s = PlanTestSuit{Replicas: 510, MaxSurge: 0, MaxUnavailable: 25} - s.Name = "I0721 14:09:42.009571" - s.Targets = Targets{ - newTargetWithActualInfo("b", true, 220, 220, 202, 182, 216, 196, 0, 25), - newTargetWithActualInfo("c", false, 290, 290, 0, 0, 290, 290, 0, 14), - } - s.Plans = RolloutPlans{ - "b": {nil, ptr(0), ptr(25), false}, - } - tests = append(tests, s) - - s = PlanTestSuit{Replicas: 510, MaxSurge: 0, MaxUnavailable: 25} - s.Name = "I0721 14:09:52.491311" - s.Targets = Targets{ - newTargetWithActualInfo("b", true, 220, 220, 220, 203, 220, 202, 0, 25), - newTargetWithActualInfo("c", false, 290, 290, 0, 0, 290, 290, 0, 14), - } - s.Plans = RolloutPlans{ - "b": {nil, nil, nil, false}, - "c": {nil, ptr(0), ptr(7), false}, - } - tests = append(tests, s) - - s = PlanTestSuit{Replicas: 510, MaxSurge: 0, MaxUnavailable: 25} - s.Name = "I0721 14:10:02.860697" - s.Targets = Targets{ - newTargetWithActualInfo("b", true, 220, 220, 220, 220, 220, 220, 0, 25), - newTargetWithActualInfo("c", true, 290, 290, 16, 0, 290, 274, 0, 25), - } - s.Plans = RolloutPlans{ - "b": {nil, nil, nil, false}, - "c": {nil, ptr(0), ptr(25), false}, - } - tests = append(tests, s) - - s = PlanTestSuit{Replicas: 510, MaxSurge: 0, MaxUnavailable: 25} - s.Name = "I0721 14:27:19.667982" - s.Targets = Targets{ - newTargetWithActualInfo("b", false, 220, 320, 0, 0, 220, 220, 0, 11), - newTargetWithActualInfo("c", false, 290, 190, 0, 0, 290, 281, 0, 14), - } - s.Plans = RolloutPlans{ - "b": {ptr(220), ptr(0), ptr(16), false}, - "c": {ptr(281), nil, nil, true}, - } - tests = append(tests, s) - - s = PlanTestSuit{Replicas: 510, MaxSurge: 0, MaxUnavailable: 25} - s.Name = "I0721 14:27:25.862175" - s.Targets = Targets{ - newTargetWithActualInfo("b", true, 220, 320, 16, 5, 220, 205, 0, 16), - newTargetWithActualInfo("c", false, 290, 190, 0, 0, 290, 281, 0, 14), - } - s.Plans = RolloutPlans{ - "b": {ptr(220), ptr(0), ptr(16), false}, - "c": {ptr(281), nil, nil, true}, - } - tests = append(tests, s) - - s = PlanTestSuit{Replicas: 510, MaxSurge: 0, MaxUnavailable: 25} - s.Name = "I0721 14:28:26.178728" - s.Targets = Targets{ - newTargetWithActualInfo("b", true, 220, 320, 121, 110, 218, 204, 0, 16), - newTargetWithAllInfo("c", false, 290, 190, 0, 0, 290, 282, 290, 282, 0, 14), - } - s.Plans = RolloutPlans{ - "b": {ptr(220), ptr(0), ptr(17), false}, - "c": {ptr(282), nil, nil, true}, - } - tests = append(tests, s) - - s = PlanTestSuit{Replicas: 510, MaxSurge: 0, MaxUnavailable: 25} - s.Name = "I0721 14:29:32.612121" - s.Targets = Targets{ - newTargetWithActualInfo("b", true, 220, 320, 220, 217, 220, 217, 0, 18), - newTargetWithActualInfo("c", false, 290, 190, 0, 0, 290, 283, 0, 14), - } - s.Plans = RolloutPlans{ - "b": {ptr(220), nil, nil, false}, - "c": {ptr(268), nil, nil, true}, - } - tests = append(tests, s) - - s = PlanTestSuit{Replicas: 510, MaxSurge: 0, MaxUnavailable: 25} - s.Name = "I0721 14:29:43.095244" - s.Targets = Targets{ - newTargetWithActualInfo("b", true, 220, 320, 220, 220, 220, 220, 0, 11), - newTargetWithActualInfo("c", false, 268, 190, 0, 0, 268, 268, 0, 13), - } - s.Plans = RolloutPlans{ - "b": {ptr(242), nil, nil, false}, - "c": {ptr(265), nil, nil, true}, - } - tests = append(tests, s) - - s = PlanTestSuit{Replicas: 510, MaxSurge: 0, MaxUnavailable: 25} - s.Name = "I0721 14:29:53.385275" - s.Targets = Targets{ - newTargetWithActualInfo("b", true, 242, 320, 242, 242, 242, 242, 0, 12), - newTargetWithActualInfo("c", false, 263, 190, 0, 0, 263, 263, 0, 13), - } - s.Plans = RolloutPlans{ - "b": {ptr(247), nil, nil, false}, - "c": {ptr(243), nil, nil, true}, - } - tests = append(tests, s) - - s = PlanTestSuit{Replicas: 510, MaxSurge: 0, MaxUnavailable: 25} - s.Name = "I0721 14:29:53.799261" - s.Targets = Targets{ - newTargetWithActualInfo("b", true, 247, 320, 242, 242, 242, 242, 0, 12), - newTargetWithActualInfo("c", false, 243, 190, 0, 0, 243, 243, 0, 12), - } - s.Plans = RolloutPlans{ - "b": {ptr(262), ptr(5), ptr(5), false}, - } - tests = append(tests, s) - - s = PlanTestSuit{Replicas: 510, MaxSurge: 0, MaxUnavailable: 25} - s.Name = "I0721 14:30:35.912725" - s.Targets = Targets{ - newTargetWithActualInfo("b", true, 297, 320, 297, 297, 297, 297, 0, 14), - newTargetWithActualInfo("c", false, 203, 190, 0, 0, 203, 203, 0, 10), - } - s.Plans = RolloutPlans{ - "b": {ptr(307), nil, nil, false}, - "c": {ptr(190), ptr(0), ptr(2), false}, - } - tests = append(tests, s) - - s = PlanTestSuit{Replicas: 510, MaxSurge: 0, MaxUnavailable: 25} - s.Name = "I0721 14:30:36.162431" - s.Targets = Targets{ - newTargetWithActualInfo("b", true, 307, 320, 297, 297, 297, 297, 0, 15), - newTargetWithAllInfo("c", true, 190, 190, 0, 0, 203, 203, 203, 203, 0, 2), - } - s.Plans = RolloutPlans{ - "b": {ptr(307), ptr(0), ptr(10), false}, - "c": {nil, ptr(13), ptr(2), false}, - } - tests = append(tests, s) - - s = PlanTestSuit{Replicas: 510, MaxSurge: 0, MaxUnavailable: 25} - s.Name = "I0721 14:30:46.793575" - s.Targets = Targets{ - newTargetWithActualInfo("b", true, 307, 320, 307, 307, 307, 307, 0, 15), - newTargetWithActualInfo("c", true, 190, 190, 19, 4, 203, 188, 13, 12), - } - s.Plans = RolloutPlans{ - "b": {ptr(307), nil, nil, false}, - "c": {nil, ptr(13), ptr(12), false}, - } - tests = append(tests, s) - - s = PlanTestSuit{Replicas: 510, MaxSurge: 0, MaxUnavailable: 25} - s.Name = "I0721 14:33:31.262478" - s.Targets = Targets{ - newTargetWithActualInfo("b", true, 307, 320, 307, 307, 307, 307, 0, 15), - newTargetWithActualInfo("c", true, 190, 190, 190, 173, 195, 178, 13, 12), - } - s.Plans = RolloutPlans{ - "b": {nil, nil, nil, false}, - "c": {nil, nil, nil, false}, - } - tests = append(tests, s) - - // the result plan is judged as invalid for now so an empty plan will be returned. - // remove this test case if the validation rule changed. Refer to the comments in validatePlans for more information - s = PlanTestSuit{Replicas: 610, MaxSurge: 0, MaxUnavailable: 30} - s.Name = "I0721 15:36:50.064432" - s.Targets = Targets{ - newTargetWithActualInfo("b", false, 320, 320, 0, 0, 320, 320, 0, 16), - newTargetWithActualInfo("c", false, 190, 290, 0, 0, 190, 190, 0, 9), - } - s.Plans = RolloutPlans{ - "c": {ptr(190), ptr(100), ptr(0), false}, - } - tests = append(tests, s) - - s = PlanTestSuit{Replicas: 510, MaxSurge: 0, MaxUnavailable: 25} - s.Name = "I0721 16:01:56.842312" - s.Targets = Targets{ - newTargetWithActualInfo("b", false, 310, 320, 0, 0, 310, 310, 0, 15), - newTargetWithActualInfo("c", true, 190, 190, 190, 182, 190, 182, 0, 9), - } - s.Plans = RolloutPlans{ - "b": {ptr(310), ptr(10), ptr(7), false}, - "c": {nil, nil, nil, false}, - } - tests = append(tests, s) - - s = PlanTestSuit{Replicas: 255, MaxSurge: 0, MaxUnavailable: 2} - s.Name = "I0721 11:10:26.009785" - s.Targets = Targets{ - newTargetWithActualInfo("a", true, 99, 99, 4, 2, 101, 99, 0, 2), - newTargetWithActualInfo("b", true, 16, 16, 16, 16, 16, 16, 0, 1), - newTargetWithActualInfo("c", false, 40, 40, 0, 0, 40, 40, 0, 0), - newTargetWithActualInfo("d", false, 29, 29, 0, 0, 29, 29, 0, 0), - newTargetWithActualInfo("e", false, 30, 30, 0, 0, 30, 30, 0, 0), - newTargetWithActualInfo("f", false, 16, 16, 0, 0, 16, 16, 0, 0), - newTargetWithActualInfo("g", false, 10, 10, 0, 0, 10, 10, 0, 0), - newTargetWithActualInfo("h", false, 15, 15, 0, 0, 15, 15, 0, 0), - } - s.Plans = RolloutPlans{ - "a": {nil, ptr(0), ptr(2), false}, - "b": {nil, nil, nil, false}, - } - tests = append(tests, s) - - s = PlanTestSuit{Replicas: 510, MaxSurge: 0, MaxUnavailable: 25} - s.Name = "I0721 16:45:31.681757" - s.Targets = Targets{ - newTargetWithActualInfo("b", true, 320, 220, 320, 320, 320, 320, 0, 16), - newTargetWithActualInfo("c", true, 190, 290, 190, 190, 190, 190, 0, 9), - } - s.Plans = RolloutPlans{ - "c": {ptr(190), nil, nil, false}, - "b": {ptr(295), nil, nil, false}, - } - tests = append(tests, s) - - s = PlanTestSuit{Replicas: 510, MaxSurge: 0, MaxUnavailable: 25} - s.Name = "I0721 16:45:36.682072" - s.Targets = Targets{ - newTargetWithActualInfo("b", true, 295, 220, 295, 295, 295, 295, 0, 14), - newTargetWithActualInfo("c", true, 190, 290, 190, 190, 190, 190, 0, 9), - } - s.Plans = RolloutPlans{ - "c": {ptr(215), nil, nil, false}, - "b": {ptr(295), nil, nil, false}, - } - tests = append(tests, s) - - s = PlanTestSuit{Replicas: 510, MaxSurge: 0, MaxUnavailable: 25} - s.Name = "I0721 16:46:50.975936" - s.Targets = Targets{ - newTargetWithActualInfo("b", true, 220, 220, 243, 243, 243, 243, 0, 11), - newTargetWithActualInfo("c", true, 267, 290, 267, 267, 267, 267, 0, 13), - } - s.Plans = RolloutPlans{ - "b": {nil, nil, nil, false}, - "c": {nil, nil, nil, false}, - } - tests = append(tests, s) - - s = PlanTestSuit{Replicas: 510, MaxSurge: 0, MaxUnavailable: 25} - s.Name = "I0721 17:02:19.762340" - s.Targets = Targets{ - newTargetWithActualInfo("b", false, 220, 320, 0, 0, 220, 220, 0, 11), - newTargetWithActualInfo("c", false, 290, 190, 0, 0, 290, 275, 0, 14), - } - s.Plans = RolloutPlans{ - "b": {ptr(220), ptr(0), ptr(10), false}, - "c": {ptr(275), nil, nil, true}, - } - tests = append(tests, s) - - s = PlanTestSuit{Replicas: 510, MaxSurge: 0, MaxUnavailable: 25} - s.Name = "I0721 17:04:36.844995" - s.Targets = Targets{ - newTargetWithActualInfo("b", true, 220, 320, 220, 214, 220, 214, 0, 10), - newTargetWithActualInfo("c", false, 290, 190, 0, 0, 290, 275, 0, 14), - } - s.Plans = RolloutPlans{ - "b": {ptr(220), nil, nil, false}, - "c": {ptr(271), nil, nil, true}, - } - tests = append(tests, s) - - s = PlanTestSuit{Replicas: 410, MaxSurge: 0, MaxUnavailable: 20} - s.Name = "I0721 20:03:08.508247" - s.Targets = Targets{ - newTargetWithActualInfo("b", false, 193, 120, 0, 0, 193, 193, 0, 9), - newTargetWithActualInfo("c", false, 317, 290, 0, 0, 317, 292, 0, 15), - } - s.Plans = RolloutPlans{ - "b": {ptr(120), ptr(0), ptr(20), false}, - "c": {ptr(290), nil, nil, true}, - } - tests = append(tests, s) - - s = PlanTestSuit{Replicas: 510, MaxSurge: 0, MaxUnavailable: 25} - s.Name = "I0721 20:04:47.826333" - s.Targets = Targets{ - newTargetWithActualInfo("b", true, 120, 220, 120, 120, 120, 120, 0, 6), - newTargetWithActualInfo("c", true, 290, 290, 23, 4, 290, 271, 0, 20), - } - s.Plans = RolloutPlans{ - "b": {ptr(120), nil, nil, false}, - "c": {nil, ptr(100), ptr(0), false}, - } - tests = append(tests, s) - - s = PlanTestSuit{Replicas: 510, MaxSurge: 0, MaxUnavailable: 25} - s.Name = "I0721 20:08:48.439058" - s.Targets = Targets{ - newTargetWithActualInfo("b", true, 120, 220, 120, 120, 120, 120, 0, 6), - newTargetWithActualInfo("c", true, 290, 290, 265, 245, 290, 270, 0, 20), - } - s.Plans = RolloutPlans{ - "b": {ptr(195), nil, nil, false}, - "c": {nil, ptr(25), ptr(0), false}, - } - tests = append(tests, s) - - s = PlanTestSuit{Replicas: 510, MaxSurge: 0, MaxUnavailable: 25} - s.Name = "I0721 22:00:34.777064" - s.Targets = Targets{ - newTargetWithActualInfo("b", true, 220, 320, 220, 217, 220, 215, 0, 11), - newTargetWithActualInfo("c", true, 290, 190, 290, 279, 290, 279, 0, 14), - } - s.Plans = RolloutPlans{ - "b": {ptr(220), nil, nil, false}, - "c": {ptr(270), nil, nil, false}, - } - tests = append(tests, s) - - s = PlanTestSuit{Replicas: 510, MaxSurge: 0, MaxUnavailable: 25} - s.Name = "I0722 10:58:38.428687" - s.Targets = Targets{ - newTargetWithActualInfo("b", false, 120, 220, 0, 0, 120, 120, 0, 6), - newTargetWithActualInfo("c", false, 190, 290, 0, 0, 190, 190, 0, 9), - } - s.Plans = RolloutPlans{ - "b": {ptr(120), ptr(120), ptr(0), false}, - "c": {ptr(190), ptr(80), ptr(0), false}, - } - tests = append(tests, s) - - s = PlanTestSuit{Replicas: 510, MaxSurge: 0, MaxUnavailable: 25} - s.Name = "I0722 10:59:21.853509" - s.Targets = Targets{ - newTargetWithActualInfo("b", true, 120, 220, 120, 120, 120, 120, 120, 0), - newTargetWithActualInfo("c", true, 190, 290, 96, 0, 286, 190, 94, 0), - } - s.Plans = RolloutPlans{ - "b": {ptr(220), nil, nil, false}, - "c": {ptr(194), ptr(94), ptr(0), false}, - } - tests = append(tests, s) - - s = PlanTestSuit{Replicas: 510, MaxSurge: 0, MaxUnavailable: 25} - s.Name = "I0722 11:43:31.110287" - s.Targets = Targets{ - newTargetWithActualInfo("b", false, 220, 420, 0, 0, 220, 220, 0, 11), - newTargetWithActualInfo("c", false, 290, 90, 0, 0, 290, 281, 0, 14), - } - s.Plans = RolloutPlans{ - "b": {ptr(220), ptr(0), ptr(16), false}, - "c": {ptr(281), nil, nil, true}, - } - tests = append(tests, s) - - // c is actually during updating, it's paused during updating from a->b. While b is paused - // during updating from b->c - s = PlanTestSuit{Replicas: 500, MaxSurge: 0, MaxUnavailable: 50} - s.Name = "I0726 13:08:28.299562" - s.Targets = Targets{ - newTargetWithAllInfo("b", false, 350, 350, 0, 0, 320, 320, 350, 350, 0, 30), - newTargetWithAllInfo("c", false, 150, 150, 0, 0, 0, 0, 150, 150, 0, 20), - } - s.Plans = RolloutPlans{ - "b": {nil, ptr(0), ptr(30), false}, - "c": {nil, ptr(0), ptr(20), false}, - } - tests = append(tests, s) - - s = PlanTestSuit{Replicas: 500, MaxSurge: 0, MaxUnavailable: 50} - s.Name = "I0726 17:34:07.505088" - s.Targets = Targets{ - newTargetWithActualInfo("b", false, 261, 320, 0, 0, 261, 261, 0, 26), - newTargetWithActualInfo("c", true, 239, 180, 239, 189, 239, 189, 0, 23), - } - s.Plans = RolloutPlans{ - "c": {ptr(189), nil, nil, false}, - } - tests = append(tests, s) - - // it would be better if we scale in c during it's updating - s = PlanTestSuit{Replicas: 500, MaxSurge: 0, MaxUnavailable: 50} - s.Name = "I0726 19:32:32.905839" - s.Targets = Targets{ - newTargetWithAllInfo("b", false, 320, 220, 0, 0, 235, 235, 320, 282, 0, 43), - newTargetWithActualInfo("c", false, 180, 280, 0, 0, 180, 173, 0, 18), - } - s.Plans = RolloutPlans{ - "b": {ptr(320), ptr(0), ptr(43), false}, - "c": {ptr(180), ptr(0), ptr(7), false}, - } - tests = append(tests, s) - - s = PlanTestSuit{Replicas: 25, MaxSurge: 0, MaxUnavailable: 6} - s.Name = "I0729 15:55:04.138529" - s.Targets = Targets{ - newTargetWithAllInfo("a", false, 8, 8, 0, 0, 8, 1, 8, 1, 0, 2), - newTargetWithAllInfo("b", false, 1, 1, 0, 0, 1, 0, 1, 0, 0, 1), - newTargetWithAllInfo("c", false, 4, 4, 0, 0, 4, 0, 4, 0, 0, 1), - newTargetWithAllInfo("e", false, 4, 4, 0, 0, 4, 0, 4, 0, 0, 1), - newTargetWithAllInfo("f", false, 4, 4, 0, 0, 4, 2, 4, 2, 0, 1), - newTargetWithAllInfo("h", false, 4, 4, 0, 0, 4, 1, 4, 1, 0, 1), - } - s.Plans = RolloutPlans{ - "a": {nil, ptr(0), ptr(1), false}, - "b": {nil, ptr(0), ptr(1), false}, - "c": {nil, ptr(0), ptr(1), false}, - "e": {nil, ptr(0), ptr(1), false}, - "f": {nil, ptr(0), ptr(1), false}, - "h": {nil, ptr(0), ptr(1), false}, - } - tests = append(tests, s) - - s = PlanTestSuit{Replicas: 10, MaxSurge: 0, MaxUnavailable: 2} - s.Name = "I0801 23:42:36.557596" - s.Targets = Targets{ - newTargetWithAllInfo("a", false, 3, 0, 0, 0, 5, 5, 5, 5, 0, 1), - newTargetWithAllInfo("c", false, 2, 0, 0, 0, 2, 2, 2, 2, 0, 1), - newTargetWithAllInfo("d", true, 0, 10, 0, 0, 0, 0, 0, 0, 0, 1), - newTargetWithAllInfo("e", false, 1, 0, 0, 0, 1, 1, 1, 1, 0, 1), - newTargetWithAllInfo("f", false, 2, 0, 0, 0, 2, 2, 2, 2, 0, 1), - } - s.Plans = RolloutPlans{ - "d": {ptr(0), nil, nil, false}, - } - tests = append(tests, s) - - s = PlanTestSuit{Replicas: 8, MaxSurge: 0, MaxUnavailable: 2} - s.Name = "I0801 18:08:25.282496" - s.Targets = Targets{ - newTargetWithAllInfo("a", false, 6, 1, 0, 0, 8, 8, 8, 8, 0, 1), - newTargetWithAllInfo("d", true, 0, 7, 0, 0, 0, 0, 0, 0, 0, 1), - } - s.Plans = RolloutPlans{ - "d": {ptr(0), nil, nil, false}, - } - tests = append(tests, s) - - s = PlanTestSuit{Replicas: 50, MaxSurge: 5, MaxUnavailable: 0} - s.Name = "I0823 11:46:19.897608" - s.Targets = Targets{ - newTargetWithAllInfo("a", true, 19, 19, 19, 19, 19, 19, 19, 19, 2, 0), - newTargetWithAllInfo("b", true, 8, 8, 6, 2, 6, 2, 10, 8, 4, 0), - newTargetWithAllInfo("d", true, 5, 5, 5, 5, 5, 5, 5, 5, 1, 0), - newTargetWithAllInfo("e", true, 7, 7, 7, 6, 7, 6, 8, 7, 1, 0), - newTargetWithAllInfo("f", false, 7, 7, 0, 0, 7, 7, 7, 7, 1, 0), - newTargetWithAllInfo("h", true, 4, 4, 4, 4, 4, 4, 4, 4, 1, 0), - } - s.Plans = RolloutPlans{ - "a": {nil, nil, nil, false}, - "b": {nil, ptr(2), ptr(0), false}, - "d": {nil, nil, nil, false}, - "e": {nil, nil, nil, false}, - "h": {nil, nil, nil, false}, - } - tests = append(tests, s) - - s = PlanTestSuit{Replicas: 50, MaxSurge: 5, MaxUnavailable: 0} - s.Name = "I0823 11:46:24.931388" - s.Targets = Targets{ - newTargetWithAllInfo("a", true, 19, 19, 19, 19, 19, 19, 19, 19, 2, 0), - newTargetWithAllInfo("b", true, 8, 8, 6, 2, 6, 2, 12, 8, 4, 0), - newTargetWithAllInfo("d", true, 5, 5, 5, 5, 5, 5, 5, 5, 1, 0), - newTargetWithAllInfo("e", true, 7, 7, 7, 6, 7, 6, 8, 7, 1, 0), - newTargetWithAllInfo("f", true, 7, 7, 2, 0, 2, 0, 9, 7, 2, 0), - newTargetWithAllInfo("h", true, 4, 4, 4, 4, 4, 4, 4, 4, 1, 0), - } - s.Plans = RolloutPlans{ - "a": {nil, nil, nil, false}, - "b": {nil, ptr(2), ptr(0), false}, - "d": {nil, nil, nil, false}, - "e": {nil, nil, nil, false}, - "f": {nil, ptr(1), ptr(0), false}, - "h": {nil, nil, nil, false}, - } - tests = append(tests, s) - - s = PlanTestSuit{Replicas: 2, MaxSurge: 0, MaxUnavailable: 1} - s.Name = "I0819 14:27:50.120900" - s.Targets = Targets{ - newTargetWithAllInfo("a", true, 1, 1, 1, 0, 1, 0, 2, 1, 1, 0), - newTargetWithAllInfo("e", true, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1), - } - s.Plans = RolloutPlans{ - "a": {nil, ptr(1), ptr(0), false}, - "e": {ptr(0), nil, nil, false}, - } - tests = append(tests, s) - - for _, test := range tests { - t.Run(test.Name, func(t *testing.T) { - planner := &RolloutPlanner{Targets: test.Targets, MaxSurge: test.MaxSurge, MaxUnavailable: test.MaxUnavailable, Replicas: test.Replicas} - got := planner.Plan() - if !reflect.DeepEqual(got, test.Plans) { - t.Errorf("%s: got: %v, expected: %v", test.Name, got, test.Plans) - } - }) - } -} diff --git a/pkg/controllers/util/adoptedannotation.go b/pkg/util/adoption/adopted.go similarity index 70% rename from pkg/controllers/util/adoptedannotation.go rename to pkg/util/adoption/adopted.go index 63060b74..373890ad 100644 --- a/pkg/controllers/util/adoptedannotation.go +++ b/pkg/util/adoption/adopted.go @@ -14,17 +14,17 @@ See the License for the specific language governing permissions and limitations under the License. */ -package util +package adoption import ( - "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "github.com/kubewharf/kubeadmiral/pkg/controllers/common" ) var AdoptedAnnotation = common.DefaultPrefix + "adopted" -func HasAdoptedAnnotation(obj *unstructured.Unstructured) bool { +func HasAdoptedAnnotation(obj metav1.Object) bool { annotations := obj.GetAnnotations() if annotations == nil { return false @@ -32,7 +32,18 @@ func HasAdoptedAnnotation(obj *unstructured.Unstructured) bool { return annotations[AdoptedAnnotation] == common.AnnotationValueTrue } -func RemoveAdoptedAnnotation(obj *unstructured.Unstructured) { +func AddAdoptedAnnotation(obj metav1.Object) bool { + annotations := obj.GetAnnotations() + if annotations[AdoptedAnnotation] == common.AnnotationValueTrue { + return false + } + + annotations[AdoptedAnnotation] = common.AnnotationValueTrue + obj.SetAnnotations(annotations) + return true +} + +func RemoveAdoptedAnnotation(obj metav1.Object) { annotations := obj.GetAnnotations() if annotations == nil || annotations[AdoptedAnnotation] != common.AnnotationValueTrue { return diff --git a/pkg/controllers/util/conflictresolutionannotation.go b/pkg/util/adoption/conflictresolution.go similarity index 89% rename from pkg/controllers/util/conflictresolutionannotation.go rename to pkg/util/adoption/conflictresolution.go index e726ac5e..f8c08eb5 100644 --- a/pkg/controllers/util/conflictresolutionannotation.go +++ b/pkg/util/adoption/conflictresolution.go @@ -14,10 +14,10 @@ See the License for the specific language governing permissions and limitations under the License. */ -package util +package adoption import ( - "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "github.com/kubewharf/kubeadmiral/pkg/controllers/common" ) @@ -34,7 +34,7 @@ const ( ConflictResolutionAdopt ConflictResolution = "adopt" ) -func ShouldAdoptPreexistingResources(obj *unstructured.Unstructured) bool { +func ShouldAdoptPreexistingResources(obj metav1.Object) bool { annotations := obj.GetAnnotations() value, exists := annotations[ConflictResolutionInternalAnnotation] diff --git a/pkg/util/fedobjectadapters/adapters.go b/pkg/util/fedobjectadapters/adapters.go index b30f52ff..7741a7d7 100644 --- a/pkg/util/fedobjectadapters/adapters.go +++ b/pkg/util/fedobjectadapters/adapters.go @@ -34,6 +34,23 @@ func GetFromLister( } } +func Get( + ctx context.Context, + fedv1a1Client fedcorev1a1client.CoreV1alpha1Interface, + namespace, name string, + opts metav1.GetOptions, +) (fedcorev1a1.GenericFederatedObject, error) { + if namespace == "" { + return ensureNilInterface( + fedv1a1Client.ClusterFederatedObjects().Get(ctx, name, opts), + ) + } else { + return ensureNilInterface( + fedv1a1Client.FederatedObjects(namespace).Get(ctx, name, opts), + ) + } +} + func Create( ctx context.Context, fedv1a1Client fedcorev1a1client.CoreV1alpha1Interface, diff --git a/pkg/util/informermanager/federatedinformermanager.go b/pkg/util/informermanager/federatedinformermanager.go index 593105dd..af046f0d 100644 --- a/pkg/util/informermanager/federatedinformermanager.go +++ b/pkg/util/informermanager/federatedinformermanager.go @@ -361,6 +361,22 @@ func (m *federatedInformerManager) GetFederatedClusterLister() fedcorev1a1lister return m.clusterInformer.Lister() } +func (m *federatedInformerManager) GetJoinedClusters() ([]*fedcorev1a1.FederatedCluster, error) { + var clusters []*fedcorev1a1.FederatedCluster + + allClusters, err := m.GetFederatedClusterLister().List(labels.Everything()) + if err != nil { + return nil, fmt.Errorf("failed to list clusters: %w", err) + } + for _, cluster := range allClusters { + if clusterutil.IsClusterJoined(&cluster.Status) { + clusters = append(clusters, cluster) + } + } + + return clusters, nil +} + func (m *federatedInformerManager) GetFederatedTypeConfigLister() fedcorev1a1listers.FederatedTypeConfigLister { return m.ftcInformer.Lister() } diff --git a/pkg/util/informermanager/interface.go b/pkg/util/informermanager/interface.go index 5ea8b2a6..185e8b70 100644 --- a/pkg/util/informermanager/interface.go +++ b/pkg/util/informermanager/interface.go @@ -54,6 +54,9 @@ type FederatedTypeConfigManager interface { // Returns the FederatedTypeConfig lister used by the manager. GetFederatedTypeConfigLister() fedcorev1a1listers.FederatedTypeConfigLister + // Adds a FTCUpdateHandler that is called each time the InformerManager finishes processing an FTC. + AddFTCUpdateHandler(handler FTCUpdateHandler) error + // Returns true if the manager's view of FederatedTypeConfigs is synced. HasSynced() bool } @@ -65,21 +68,14 @@ type FederatedTypeConfigManager interface { // Having multiple FTCs with the same source type is not supported and may cause InformerManager to behave incorrectly. // Updating FTC source types is also not supported and may also cause InformerManager to behave incorrectly. type InformerManager interface { + FederatedTypeConfigManager + // Adds an EventHandler used to generate and register ResourceEventHandlers for each FTC's source type informer. AddEventHandlerGenerator(generator *EventHandlerGenerator) error - // Adds a FTCUpdateHandler that is called each time the InformerManager finishes processing an FTC. - AddFTCUpdateHandler(handler FTCUpdateHandler) error // Returns a lister for the given GroupResourceVersion if it exists. The lister for each FTC's source type will // eventually exist. GetResourceLister(gvk schema.GroupVersionKind) (lister cache.GenericLister, informerSynced cache.InformerSynced, exists bool) - // Returns the known FTC mapping for the given GVK if it exists. - GetResourceFTC(gvk schema.GroupVersionKind) (ftc *fedcorev1a1.FederatedTypeConfig, exists bool) - - // Returns the FederatedTypeConfig lister used by the InformerManager. - GetFederatedTypeConfigLister() fedcorev1a1listers.FederatedTypeConfigLister - // Returns true if the InformerManager's view of FederatedTypeConfigs is synced. - HasSynced() bool // Starts processing FederatedTypeConfig events. Start(ctx context.Context) @@ -134,6 +130,8 @@ type FederatedInformerManager interface { GetFederatedClusterLister() fedcorev1a1listers.FederatedClusterLister // Returns the joined clusters in ready status listed from the FederatedInformerManager. GetReadyClusters() ([]*fedcorev1a1.FederatedCluster, error) + // Returns the joined clusters listed from the FederatedInformerManager. + GetJoinedClusters() ([]*fedcorev1a1.FederatedCluster, error) // Returns true if the FederatedInformerManager's view of FederatedTypeConfigs and FederatedClusters is synced. HasSynced() bool diff --git a/pkg/controllers/util/orphaningannotation.go b/pkg/util/orphaning/orphaningannotation.go similarity index 92% rename from pkg/controllers/util/orphaningannotation.go rename to pkg/util/orphaning/orphaningannotation.go index 974aa5fd..26c85b67 100644 --- a/pkg/controllers/util/orphaningannotation.go +++ b/pkg/util/orphaning/orphaningannotation.go @@ -18,10 +18,10 @@ This file may have been modified by The KubeAdmiral Authors are Copyright 2023 The KubeAdmiral Authors. */ -package util +package orphaning import ( - "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "github.com/kubewharf/kubeadmiral/pkg/controllers/common" ) @@ -45,7 +45,7 @@ const ( OrphanManagedResourcesNone OrphanManagedResourcesBehavior = "" ) -func GetOrphaningBehavior(obj *unstructured.Unstructured) OrphanManagedResourcesBehavior { +func GetOrphaningBehavior(obj metav1.Object) OrphanManagedResourcesBehavior { annotations := obj.GetAnnotations() value, exists := annotations[OrphanManagedResourcesInternalAnnotation] From 5b61310e6d74d421d350fc05d3f6896c2a5cca04 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Gary=20Liu=20=28=E5=88=98=E5=B9=BF=E6=BA=90=29?= Date: Tue, 18 Jul 2023 10:46:22 +0000 Subject: [PATCH 114/173] chore: remove unused fedobj condition reasons --- pkg/apis/core/v1alpha1/types_federatedobject.go | 2 -- 1 file changed, 2 deletions(-) diff --git a/pkg/apis/core/v1alpha1/types_federatedobject.go b/pkg/apis/core/v1alpha1/types_federatedobject.go index e4948571..8be8ea96 100644 --- a/pkg/apis/core/v1alpha1/types_federatedobject.go +++ b/pkg/apis/core/v1alpha1/types_federatedobject.go @@ -207,9 +207,7 @@ type FederatedObjectConditionReason string const ( AggregateSuccess FederatedObjectConditionReason = "" - SyncRevisionsFailed FederatedObjectConditionReason = "SyncRevisionsFailed" ClusterRetrievalFailed FederatedObjectConditionReason = "ClusterRetrievalFailed" - PlanRolloutFailed FederatedObjectConditionReason = "PlanRolloutFailed" CheckClusters FederatedObjectConditionReason = "CheckClusters" EnsureDeletionFailed FederatedObjectConditionReason = "EnsureDeletionFailed" ) From 35c7207aabdb5a0fd66c02f5f594e240feaaa9f0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Gary=20Liu=20=28=E5=88=98=E5=B9=BF=E6=BA=90=29?= Date: Tue, 18 Jul 2023 16:00:50 +0000 Subject: [PATCH 115/173] fix: bugs introduced during sync controller refactor --- pkg/controllers/sync/accessor.go | 2 +- pkg/controllers/sync/controller.go | 19 ++------- pkg/controllers/sync/dispatch/managed.go | 14 +++---- pkg/controllers/sync/resource.go | 20 --------- pkg/controllers/util/propagatedversion.go | 51 ----------------------- 5 files changed, 12 insertions(+), 94 deletions(-) diff --git a/pkg/controllers/sync/accessor.go b/pkg/controllers/sync/accessor.go index 4507e48f..d9bfcac7 100644 --- a/pkg/controllers/sync/accessor.go +++ b/pkg/controllers/sync/accessor.go @@ -155,7 +155,7 @@ func (a *resourceAccessor) FederatedResource( } targetGVK := template.GroupVersionKind() - if targetGVK == corev1.SchemeGroupVersion.WithKind(common.NamespaceKind) && a.isSystemNamespace(qualifiedName.Name) { + if targetGVK == corev1.SchemeGroupVersion.WithKind(common.NamespaceKind) && a.isSystemNamespace(template.GetName()) { return nil, nil } diff --git a/pkg/controllers/sync/controller.go b/pkg/controllers/sync/controller.go index a4881f16..af24e6f1 100644 --- a/pkg/controllers/sync/controller.go +++ b/pkg/controllers/sync/controller.go @@ -129,21 +129,8 @@ type SyncController struct { /* TODOs -- two cluster queues -- cluster event handlers -- federated accessor -- version manager -- fo and cfo -- managed label for federatedinformermanager -- already partially deleted revision history -- already partially deleted fed namespace placement constraint -- use jsonutil for unmarshalling -- check deepcopy, especially when previously used UnmarshalGeneric -- federated name and target name are not the same -- check if namespace are used for all clients - generic client: - version manager (have to experiment with generics, reconsider after refactoring everything else) - - dispatcher */ // NewSyncController returns a new sync controller for the configuration @@ -311,6 +298,7 @@ func (s *SyncController) getClusterClient(clusterName string) (dynamic.Interface // The function triggers reconciliation of all target federated resources. func (s *SyncController) enqueueAllObjects() { + s.logger.V(2).Info("Enqueuing all federated resources") s.fedAccessor.VisitFederatedResources(func(obj fedcorev1a1.GenericFederatedObject) { qualifiedName := common.NewQualifiedName(obj) s.worker.EnqueueWithDelay(qualifiedName, s.reconcileOnClusterChangeDelay) @@ -341,7 +329,7 @@ func (s *SyncController) reconcile(ctx context.Context, federatedName common.Qua ctx, keyedLogger = logging.InjectLoggerValues( ctx, "target-name", fedResource.TargetName().String(), - "target-gvk", fedResource.TargetGVK().String(), + "gvk", fedResource.TargetGVK().String(), ) if fedResource.Object().GetDeletionTimestamp() != nil { @@ -872,6 +860,7 @@ func (s *SyncController) removeClusterFinalizer(ctx context.Context, cluster *fe func (s *SyncController) reconcileClusterForCascadingDeletion(ctx context.Context, qualifiedName common.QualifiedName) worker.Result { logger := s.logger.WithValues("cluster-name", qualifiedName.String(), "process", "cluster-cascading-deletion") ctx = klog.NewContext(ctx, logger) + logger.V(3).Info("Starting to reconcile cluster for cascading deletion") clusterLister := s.fedInformerManager.GetFederatedClusterLister() cluster, err := clusterLister.Get(qualifiedName.Name) @@ -951,7 +940,7 @@ func (s *SyncController) reconcileClusterForCascadingDeletion(ctx context.Contex ) if err == nil && len(objects.Items) > 0 { remainingByGVK[gvk] = strconv.Itoa(len(objects.Items)) - } else if err != nil && !meta.IsNoMatchError(err) { + } else if err != nil && !meta.IsNoMatchError(err) && !apierrors.IsNotFound(err) { remainingByGVK[gvk] = fmt.Sprintf("Unknown (failed to list from cluster: %v)", err) } } diff --git a/pkg/controllers/sync/dispatch/managed.go b/pkg/controllers/sync/dispatch/managed.go index c9b2a7fd..9fcf176f 100644 --- a/pkg/controllers/sync/dispatch/managed.go +++ b/pkg/controllers/sync/dispatch/managed.go @@ -170,12 +170,12 @@ func (d *managedDispatcherImpl) Create(ctx context.Context, clusterName string) recordPropagatedLabelsAndAnnotations(obj) - ctx, cancel := context.WithTimeout(ctx, d.dispatcher.timeout) + ctxWithTimeout, cancel := context.WithTimeout(ctx, d.dispatcher.timeout) defer cancel() keyedLogger.V(1).Info("Creating target object in cluster") obj, err = client.Resource(d.fedResource.TargetGVR()).Namespace(obj.GetNamespace()).Create( - ctx, obj, metav1.CreateOptions{}, + ctxWithTimeout, obj, metav1.CreateOptions{}, ) if err == nil { version := util.ObjectVersion(obj) @@ -188,22 +188,22 @@ func (d *managedDispatcherImpl) Create(ctx context.Context, clusterName string) alreadyExists := apierrors.IsAlreadyExists(err) || d.fedResource.TargetGVK() == corev1.SchemeGroupVersion.WithKind(common.NamespaceKind) && apierrors.IsServerTimeout(err) if !alreadyExists { - return d.recordOperationError(ctx, fedcorev1a1.CreationFailed, clusterName, op, err) + return d.recordOperationError(ctxWithTimeout, fedcorev1a1.CreationFailed, clusterName, op, err) } // Attempt to update the existing resource to ensure that it // is labeled as a managed resource. obj, err = client.Resource(d.fedResource.TargetGVR()).Namespace(obj.GetNamespace()).Get( - ctx, obj.GetName(), metav1.GetOptions{}, + ctxWithTimeout, obj.GetName(), metav1.GetOptions{}, ) if err != nil { wrappedErr := errors.Wrapf(err, "failed to retrieve object potentially requiring adoption") - return d.recordOperationError(ctx, fedcorev1a1.RetrievalFailed, clusterName, op, wrappedErr) + return d.recordOperationError(ctxWithTimeout, fedcorev1a1.RetrievalFailed, clusterName, op, wrappedErr) } if d.skipAdoptingResources { return d.recordOperationError( - ctx, + ctxWithTimeout, fedcorev1a1.AlreadyExists, clusterName, op, @@ -212,7 +212,7 @@ func (d *managedDispatcherImpl) Create(ctx context.Context, clusterName string) } d.recordError( - ctx, + ctxWithTimeout, clusterName, op, errors.Errorf("An update will be attempted instead of a creation due to an existing resource"), diff --git a/pkg/controllers/sync/resource.go b/pkg/controllers/sync/resource.go index 550b7d40..4bca9fe4 100644 --- a/pkg/controllers/sync/resource.go +++ b/pkg/controllers/sync/resource.go @@ -29,7 +29,6 @@ import ( "sort" "sync" - "github.com/pkg/errors" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime/schema" @@ -152,28 +151,9 @@ func (r *federatedResource) ComputePlacement(clusters []*fedcorev1a1.FederatedCl return computePlacement(r.federatedObject, clusters) } -// TODO Marshall the template once per reconcile, not per-cluster func (r *federatedResource) ObjectForCluster(clusterName string) (*unstructured.Unstructured, error) { obj := r.template.DeepCopy() - // TODO: do we still need this? The template created by federate controller should never contain finalizers. - notSupportedTemplate := "metadata.%s cannot be set via template to avoid conflicting with controllers " + - "in member clusters. Consider using an override to add or remove elements from this collection." - if len(obj.GetFinalizers()) > 0 { - r.RecordError("FinalizersNotSupported", errors.Errorf(notSupportedTemplate, "finalizers")) - obj.SetFinalizers(nil) - } - - // Avoid having to duplicate these details in the template or have - // the name/namespace vary between host and member clusters. - // TODO: consider omitting these fields in the template created by federate controller - obj.SetName(r.federatedObject.GetName()) - obj.SetNamespace(r.federatedObject.GetNamespace()) - - targetAPIResource := r.typeConfig.GetSourceType() - obj.SetAPIVersion(schema.GroupVersion{Group: targetAPIResource.Group, Version: targetAPIResource.Version}.String()) - obj.SetKind(targetAPIResource.Kind) - if schemautil.IsJobGvk(r.TargetGVK()) { if err := dropJobFields(obj); err != nil { return nil, err diff --git a/pkg/controllers/util/propagatedversion.go b/pkg/controllers/util/propagatedversion.go index 71a1d095..f1f446df 100644 --- a/pkg/controllers/util/propagatedversion.go +++ b/pkg/controllers/util/propagatedversion.go @@ -31,7 +31,6 @@ import ( "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" fedcorev1a1 "github.com/kubewharf/kubeadmiral/pkg/apis/core/v1alpha1" - utilunstructured "github.com/kubewharf/kubeadmiral/pkg/controllers/util/unstructured" ) const ( @@ -63,56 +62,6 @@ func ObjectNeedsUpdate( return true } - needUpdate := true - if desiredReplicas, err := utilunstructured.GetInt64FromPath(desiredObj, typeConfig.Spec.PathDefinition.ReplicasSpec, nil); err == nil { - if currentReplicas, err := utilunstructured.GetInt64FromPath(clusterObj, typeConfig.Spec.PathDefinition.ReplicasSpec, nil); err == nil { - if desiredReplicas == nil && currentReplicas == nil || - desiredReplicas != nil && currentReplicas != nil && *desiredReplicas == *currentReplicas { - needUpdate = false - } - } - } - if needUpdate { - return true - } - - needUpdate = true - if desiredMaxSurge, ok, err := unstructured.NestedString(desiredObj.Object, MaxSurgePathSlice...); err == nil { - if currentMaxSurge, ok2, err := unstructured.NestedString(clusterObj.Object, MaxSurgePathSlice...); err == nil && - ok == ok2 { - if desiredMaxSurge == currentMaxSurge { - needUpdate = false - } - } - } else if desiredMaxSurge, ok, err := unstructured.NestedInt64(desiredObj.Object, MaxSurgePathSlice...); err == nil { - if currentMaxSurge, ok2, err := unstructured.NestedInt64(clusterObj.Object, MaxSurgePathSlice...); err == nil && ok == ok2 { - if desiredMaxSurge == currentMaxSurge { - needUpdate = false - } - } - } - if needUpdate { - return true - } - - needUpdate = true - if desiredMaxUnavailable, ok, err := unstructured.NestedString(desiredObj.Object, MaxUnavailablePathSlice...); err == nil { - if currentMaxUnavailable, ok2, err := unstructured.NestedString(clusterObj.Object, MaxUnavailablePathSlice...); err == nil && - ok == ok2 { - if desiredMaxUnavailable == currentMaxUnavailable { - needUpdate = false - } - } - } else if desiredMaxUnavailable, ok, err := unstructured.NestedInt64(desiredObj.Object, MaxUnavailablePathSlice...); err == nil { - if currentMaxUnavailable, ok2, err := unstructured.NestedInt64(clusterObj.Object, MaxUnavailablePathSlice...); err == nil && ok == ok2 { - if desiredMaxUnavailable == currentMaxUnavailable { - needUpdate = false - } - } - } - if needUpdate { - return true - } // If versions match and the version is sourced from the // generation field, a further check of metadata equivalency is // required. From c502ca2f3b0eaccae04e2b98f232833d3eb23235 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Gary=20Liu=20=28=E5=88=98=E5=B9=BF=E6=BA=90=29?= Date: Wed, 19 Jul 2023 04:22:28 +0000 Subject: [PATCH 116/173] refactor: decouple sync from generic client --- pkg/controllers/sync/accessor.go | 13 +-- pkg/controllers/sync/controller.go | 12 +-- pkg/controllers/sync/version/adapter.go | 32 +++++- pkg/controllers/sync/version/cluster.go | 43 ++++++++- pkg/controllers/sync/version/manager.go | 107 +++++++-------------- pkg/controllers/sync/version/namespaced.go | 43 ++++++++- 6 files changed, 148 insertions(+), 102 deletions(-) diff --git a/pkg/controllers/sync/accessor.go b/pkg/controllers/sync/accessor.go index d9bfcac7..9a85e349 100644 --- a/pkg/controllers/sync/accessor.go +++ b/pkg/controllers/sync/accessor.go @@ -22,6 +22,7 @@ are Copyright 2023 The KubeAdmiral Authors. package sync import ( + "context" "fmt" corev1 "k8s.io/api/core/v1" @@ -33,7 +34,7 @@ import ( "k8s.io/klog/v2" fedcorev1a1 "github.com/kubewharf/kubeadmiral/pkg/apis/core/v1alpha1" - genericclient "github.com/kubewharf/kubeadmiral/pkg/client/generic" + fedcorev1a1client "github.com/kubewharf/kubeadmiral/pkg/client/clientset/versioned/typed/core/v1alpha1" fedcorev1a1informers "github.com/kubewharf/kubeadmiral/pkg/client/informers/externalversions/core/v1alpha1" "github.com/kubewharf/kubeadmiral/pkg/controllers/common" "github.com/kubewharf/kubeadmiral/pkg/controllers/sync/version" @@ -45,7 +46,7 @@ import ( // FederatedResourceAccessor provides a way to retrieve and visit // logical federated resources (e.g. FederatedConfigMap) type FederatedResourceAccessor interface { - Run(stopChan <-chan struct{}) + Run(context.Context) HasSynced() bool FederatedResource( qualifiedName common.QualifiedName, @@ -76,7 +77,7 @@ type resourceAccessor struct { func NewFederatedResourceAccessor( logger klog.Logger, controllerConfig *util.ControllerConfig, - client genericclient.Client, + client fedcorev1a1client.CoreV1alpha1Interface, fedObjectInformer fedcorev1a1informers.FederatedObjectInformer, clusterFedObjectInformer fedcorev1a1informers.ClusterFederatedObjectInformer, ftcManager informermanager.FederatedTypeConfigManager, @@ -114,9 +115,9 @@ func NewFederatedResourceAccessor( return a } -func (a *resourceAccessor) Run(stopChan <-chan struct{}) { - go a.versionManager.Sync(stopChan) - go a.clusterVersionManager.Sync(stopChan) +func (a *resourceAccessor) Run(ctx context.Context) { + go a.versionManager.Sync(ctx) + go a.clusterVersionManager.Sync(ctx) } func (a *resourceAccessor) HasSynced() bool { diff --git a/pkg/controllers/sync/controller.go b/pkg/controllers/sync/controller.go index af24e6f1..a81dd584 100644 --- a/pkg/controllers/sync/controller.go +++ b/pkg/controllers/sync/controller.go @@ -48,7 +48,6 @@ import ( "k8s.io/klog/v2" fedclient "github.com/kubewharf/kubeadmiral/pkg/client/clientset/versioned" - "github.com/kubewharf/kubeadmiral/pkg/client/generic" "github.com/kubewharf/kubeadmiral/pkg/util/adoption" clusterutil "github.com/kubewharf/kubeadmiral/pkg/util/cluster" "github.com/kubewharf/kubeadmiral/pkg/util/fedobjectadapters" @@ -127,19 +126,12 @@ type SyncController struct { logger klog.Logger } -/* -TODOs -- generic client: - - version manager (have to experiment with generics, reconsider after refactoring everything else) -*/ - // NewSyncController returns a new sync controller for the configuration func NewSyncController( logger klog.Logger, controllerConfig *util.ControllerConfig, kubeClient kubernetes.Interface, - genericClient generic.Client, fedClient fedclient.Interface, fedObjectInformer fedcorev1a1informers.FederatedObjectInformer, @@ -235,7 +227,7 @@ func NewSyncController( } s.fedAccessor = NewFederatedResourceAccessor( - logger, controllerConfig, genericClient, + logger, controllerConfig, fedClient.CoreV1alpha1(), fedObjectInformer, clusterFedObjectInformer, ftcManager, func(qualifiedName common.QualifiedName) { @@ -248,7 +240,7 @@ func NewSyncController( } func (s *SyncController) Run(ctx context.Context) { - s.fedAccessor.Run(ctx.Done()) + s.fedAccessor.Run(ctx) go func() { for { _, shutdown := s.clusterReadinessTransitionQueue.Get() diff --git a/pkg/controllers/sync/version/adapter.go b/pkg/controllers/sync/version/adapter.go index 65bcf84e..30d1992b 100644 --- a/pkg/controllers/sync/version/adapter.go +++ b/pkg/controllers/sync/version/adapter.go @@ -22,20 +22,44 @@ are Copyright 2023 The KubeAdmiral Authors. package version import ( + "context" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "sigs.k8s.io/controller-runtime/pkg/client" fedcorev1a1 "github.com/kubewharf/kubeadmiral/pkg/apis/core/v1alpha1" + fedcorev1a1client "github.com/kubewharf/kubeadmiral/pkg/client/clientset/versioned/typed/core/v1alpha1" "github.com/kubewharf/kubeadmiral/pkg/controllers/common" ) type VersionAdapter interface { TypeName() string - // Create an empty instance of the version type - NewObject() client.Object - // Create an empty instance of list version type - NewListObject() client.ObjectList + Get( + ctx context.Context, + client fedcorev1a1client.CoreV1alpha1Interface, + namespace, name string, + opts metav1.GetOptions, + ) (client.Object, error) + List( + ctx context.Context, + client fedcorev1a1client.CoreV1alpha1Interface, + namespace string, + opts metav1.ListOptions, + ) (client.ObjectList, error) + Create( + ctx context.Context, + client fedcorev1a1client.CoreV1alpha1Interface, + obj client.Object, + opts metav1.CreateOptions, + ) (client.Object, error) + UpdateStatus( + ctx context.Context, + client fedcorev1a1client.CoreV1alpha1Interface, + obj client.Object, + opts metav1.UpdateOptions, + ) (client.Object, error) + // Create a populated instance of the version type NewVersion( qualifiedName common.QualifiedName, diff --git a/pkg/controllers/sync/version/cluster.go b/pkg/controllers/sync/version/cluster.go index 3d78843d..7ce77573 100644 --- a/pkg/controllers/sync/version/cluster.go +++ b/pkg/controllers/sync/version/cluster.go @@ -22,10 +22,13 @@ are Copyright 2023 The KubeAdmiral Authors. package version import ( + "context" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "sigs.k8s.io/controller-runtime/pkg/client" fedcorev1a1 "github.com/kubewharf/kubeadmiral/pkg/apis/core/v1alpha1" + fedcorev1a1client "github.com/kubewharf/kubeadmiral/pkg/client/clientset/versioned/typed/core/v1alpha1" "github.com/kubewharf/kubeadmiral/pkg/controllers/common" ) @@ -35,12 +38,44 @@ func (*clusterVersionAdapter) TypeName() string { return "ClusterPropagatedVersion" } -func (*clusterVersionAdapter) NewListObject() client.ObjectList { - return &fedcorev1a1.ClusterPropagatedVersionList{} +func (*clusterVersionAdapter) Get( + ctx context.Context, + client fedcorev1a1client.CoreV1alpha1Interface, + namespace, name string, + opts metav1.GetOptions, +) (client.Object, error) { + return client.ClusterPropagatedVersions().Get(ctx, name, opts) +} + +func (*clusterVersionAdapter) List( + ctx context.Context, + client fedcorev1a1client.CoreV1alpha1Interface, + namespace string, + opts metav1.ListOptions, +) (client.ObjectList, error) { + return client.ClusterPropagatedVersions().List(ctx, opts) +} + +func (*clusterVersionAdapter) Create( + ctx context.Context, + client fedcorev1a1client.CoreV1alpha1Interface, + obj client.Object, + opts metav1.CreateOptions, +) (client.Object, error) { + return client.ClusterPropagatedVersions().Create( + ctx, obj.(*fedcorev1a1.ClusterPropagatedVersion), opts, + ) } -func (*clusterVersionAdapter) NewObject() client.Object { - return &fedcorev1a1.ClusterPropagatedVersion{} +func (*clusterVersionAdapter) UpdateStatus( + ctx context.Context, + client fedcorev1a1client.CoreV1alpha1Interface, + obj client.Object, + opts metav1.UpdateOptions, +) (client.Object, error) { + return client.ClusterPropagatedVersions().UpdateStatus( + ctx, obj.(*fedcorev1a1.ClusterPropagatedVersion), opts, + ) } func (*clusterVersionAdapter) NewVersion( diff --git a/pkg/controllers/sync/version/manager.go b/pkg/controllers/sync/version/manager.go index a531e4bf..93b9d2da 100644 --- a/pkg/controllers/sync/version/manager.go +++ b/pkg/controllers/sync/version/manager.go @@ -30,14 +30,13 @@ import ( apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - pkgruntime "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/util/sets" "k8s.io/apimachinery/pkg/util/wait" "k8s.io/klog/v2" runtimeclient "sigs.k8s.io/controller-runtime/pkg/client" fedcorev1a1 "github.com/kubewharf/kubeadmiral/pkg/apis/core/v1alpha1" - "github.com/kubewharf/kubeadmiral/pkg/client/generic" + fedcorev1a1client "github.com/kubewharf/kubeadmiral/pkg/client/clientset/versioned/typed/core/v1alpha1" "github.com/kubewharf/kubeadmiral/pkg/controllers/common" "github.com/kubewharf/kubeadmiral/pkg/controllers/util" ) @@ -63,15 +62,14 @@ type VersionManager struct { versions map[string]runtimeclient.Object - // TODO: consider switching to a fedcorev1a1client.Interface or fedcorev1a1client.Interface or dynamic.Interface - client generic.Client + client fedcorev1a1client.CoreV1alpha1Interface logger klog.Logger } func NewVersionManager( logger klog.Logger, - client generic.Client, + client fedcorev1a1client.CoreV1alpha1Interface, namespaced bool, namespace string, ) *VersionManager { @@ -89,12 +87,12 @@ func NewVersionManager( // Sync retrieves propagated versions from the api and loads it into // memory. -func (m *VersionManager) Sync(stopChan <-chan struct{}) { - versionList, ok := m.list(stopChan) +func (m *VersionManager) Sync(ctx context.Context) { + versionList, ok := m.list(ctx) if !ok { return } - ok = m.load(versionList, stopChan) + ok = m.load(ctx, versionList) if !ok { return } @@ -216,18 +214,12 @@ func (m *VersionManager) Delete(qualifiedName common.QualifiedName) { m.Unlock() } -func (m *VersionManager) list(stopChan <-chan struct{}) (runtimeclient.ObjectList, bool) { +func (m *VersionManager) list(ctx context.Context) (runtimeclient.ObjectList, bool) { // Attempt retrieval of list of versions until success or the channel is closed. var versionList runtimeclient.ObjectList - err := wait.PollImmediateInfinite(1*time.Second, func() (bool, error) { - select { - case <-stopChan: - m.logger.V(4).Info("Halting version manager list due to closed stop channel") - return false, errors.New("") - default: - } - versionList = m.adapter.NewListObject() - err := m.client.List(context.TODO(), versionList, m.namespace) + err := wait.PollImmediateInfiniteWithContext(ctx, 1*time.Second, func(ctx context.Context) (bool, error) { + var err error + versionList, err = m.adapter.List(ctx, m.client, m.namespace, metav1.ListOptions{}) if err != nil { m.logger.Error(err, "Failed to list propagated versions") // Do not return the error to allow the operation to be retried. @@ -244,14 +236,14 @@ func (m *VersionManager) list(stopChan <-chan struct{}) (runtimeclient.ObjectLis // load processes a list of versions into in-memory cache. Since the // version manager should not be used in advance of HasSynced // returning true, locking is assumed to be unnecessary. -func (m *VersionManager) load(versionList runtimeclient.ObjectList, stopChan <-chan struct{}) bool { +func (m *VersionManager) load(ctx context.Context, versionList runtimeclient.ObjectList) bool { objs, err := meta.ExtractList(versionList) if err != nil { return false } for _, obj := range objs { select { - case <-stopChan: + case <-ctx.Done(): m.logger.V(4).Info("Halting version manager load due to closed stop channel") return false default: @@ -281,22 +273,20 @@ func (m *VersionManager) versionQualifiedName(qualifiedName common.QualifiedName // resource is updated by at most one thread at a time. This should // guarantee safe manipulation of an object retrieved from the // version map. -func (m *VersionManager) writeVersion(obj pkgruntime.Object, qualifiedName common.QualifiedName) error { +func (m *VersionManager) writeVersion(obj runtimeclient.Object, qualifiedName common.QualifiedName) error { key := qualifiedName.String() adapterType := m.adapter.TypeName() keyedLogger := m.logger.WithValues("version-qualified-name", key) - resourceVersion, err := getResourceVersion(obj) - if err != nil { - return errors.Wrapf(err, "Failed to retrieve the resourceVersion from %s %q", adapterType, key) - } + resourceVersion := getResourceVersion(obj) refreshVersion := false // TODO Centralize polling interval and duration waitDuration := 30 * time.Second - err = wait.PollImmediate(100*time.Millisecond, waitDuration, func() (bool, error) { + err := wait.PollImmediate(100*time.Millisecond, waitDuration, func() (bool, error) { + var err error + if refreshVersion { // Version was written to the API by another process after the last manager write. - var err error resourceVersion, err = m.getResourceVersionFromAPI(qualifiedName) if err != nil { keyedLogger.Error(err, "Failed to refresh the resourceVersion from the API") @@ -308,15 +298,10 @@ func (m *VersionManager) writeVersion(obj pkgruntime.Object, qualifiedName commo if resourceVersion == "" { // Version resource needs to be created - createdObj := obj.DeepCopyObject() - err := setResourceVersion(createdObj, "") - if err != nil { - keyedLogger.Error(err, "Failed to clear the resourceVersion") - return false, nil - } - + createdObj := obj.DeepCopyObject().(runtimeclient.Object) + setResourceVersion(createdObj, "") keyedLogger.V(2).Info("Creating resourceVersion") - err = m.client.Create(context.TODO(), createdObj.(runtimeclient.Object)) + createdObj, err = m.adapter.Create(context.TODO(), m.client, createdObj, metav1.CreateOptions{}) if apierrors.IsAlreadyExists(err) { keyedLogger.V(3).Info("ResourceVersion was created by another process. Will refresh the resourceVersion and attempt to update") refreshVersion = true @@ -334,24 +319,16 @@ func (m *VersionManager) writeVersion(obj pkgruntime.Object, qualifiedName commo } // Update the resource version that will be used for update. - resourceVersion, err = getResourceVersion(createdObj) - if err != nil { - keyedLogger.Error(err, "Failed to retrieve the resourceVersion") - return false, nil - } + resourceVersion = getResourceVersion(createdObj) } // Update the status of an existing object - updatedObj := obj.DeepCopyObject() - err := setResourceVersion(updatedObj, resourceVersion) - if err != nil { - keyedLogger.Error(err, "Failed to set the resourceVersion") - return false, nil - } + updatedObj := obj.DeepCopyObject().(runtimeclient.Object) + setResourceVersion(updatedObj, resourceVersion) keyedLogger.V(2).Info("Updating the status") - err = m.client.UpdateStatus(context.TODO(), updatedObj.(runtimeclient.Object)) + updatedObj, err = m.adapter.UpdateStatus(context.TODO(), m.client, updatedObj, metav1.UpdateOptions{}) if apierrors.IsConflict(err) { keyedLogger.V(3).Info("ResourceVersion was updated by another process. Will refresh the resourceVersion and retry the update") refreshVersion = true @@ -378,15 +355,8 @@ func (m *VersionManager) writeVersion(obj pkgruntime.Object, qualifiedName commo // refresh the resource version if necessary. // Update the version resource - resourceVersion, err = getResourceVersion(updatedObj) - if err != nil { - keyedLogger.Error(err, "Failed to retrieve the resourceVersion") - return true, nil - } - err = setResourceVersion(obj, resourceVersion) - if err != nil { - keyedLogger.Error(err, "Failed to set the resourceVersion") - } + resourceVersion = getResourceVersion(updatedObj) + setResourceVersion(obj, resourceVersion) return true, nil }) @@ -397,31 +367,20 @@ func (m *VersionManager) writeVersion(obj pkgruntime.Object, qualifiedName commo } func (m *VersionManager) getResourceVersionFromAPI(qualifiedName common.QualifiedName) (string, error) { - m.logger.WithValues("version-qualified-name", qualifiedName). - V(2).Info("Retrieving resourceVersion from the API") - obj := m.adapter.NewObject() - err := m.client.Get(context.TODO(), obj, qualifiedName.Namespace, qualifiedName.Name) + m.logger.V(2).Info("Retrieving resourceVersion from the API", "version-qualified-name", qualifiedName) + obj, err := m.adapter.Get(context.TODO(), m.client, qualifiedName.Namespace, qualifiedName.Name, metav1.GetOptions{}) if err != nil { return "", err } - return getResourceVersion(obj) + return getResourceVersion(obj), nil } -func getResourceVersion(obj pkgruntime.Object) (string, error) { - metaAccessor, err := meta.Accessor(obj) - if err != nil { - return "", err - } - return metaAccessor.GetResourceVersion(), nil +func getResourceVersion(obj runtimeclient.Object) string { + return obj.GetResourceVersion() } -func setResourceVersion(obj pkgruntime.Object, resourceVersion string) error { - metaAccessor, err := meta.Accessor(obj) - if err != nil { - return err - } - metaAccessor.SetResourceVersion(resourceVersion) - return nil +func setResourceVersion(obj runtimeclient.Object, resourceVersion string) { + obj.SetResourceVersion(resourceVersion) } func ownerReferenceForFederatedObject(obj fedcorev1a1.GenericFederatedObject) metav1.OwnerReference { diff --git a/pkg/controllers/sync/version/namespaced.go b/pkg/controllers/sync/version/namespaced.go index 3047c2d4..a602cf56 100644 --- a/pkg/controllers/sync/version/namespaced.go +++ b/pkg/controllers/sync/version/namespaced.go @@ -22,10 +22,13 @@ are Copyright 2023 The KubeAdmiral Authors. package version import ( + "context" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "sigs.k8s.io/controller-runtime/pkg/client" fedcorev1a1 "github.com/kubewharf/kubeadmiral/pkg/apis/core/v1alpha1" + fedcorev1a1client "github.com/kubewharf/kubeadmiral/pkg/client/clientset/versioned/typed/core/v1alpha1" "github.com/kubewharf/kubeadmiral/pkg/controllers/common" ) @@ -35,12 +38,44 @@ func (*namespacedVersionAdapter) TypeName() string { return "PropagatedVersion" } -func (*namespacedVersionAdapter) NewListObject() client.ObjectList { - return &fedcorev1a1.PropagatedVersionList{} +func (*namespacedVersionAdapter) Get( + ctx context.Context, + client fedcorev1a1client.CoreV1alpha1Interface, + namespace, name string, + opts metav1.GetOptions, +) (client.Object, error) { + return client.PropagatedVersions(namespace).Get(ctx, name, opts) +} + +func (*namespacedVersionAdapter) List( + ctx context.Context, + client fedcorev1a1client.CoreV1alpha1Interface, + namespace string, + opts metav1.ListOptions, +) (client.ObjectList, error) { + return client.PropagatedVersions(namespace).List(ctx, opts) +} + +func (*namespacedVersionAdapter) Create( + ctx context.Context, + client fedcorev1a1client.CoreV1alpha1Interface, + obj client.Object, + opts metav1.CreateOptions, +) (client.Object, error) { + return client.PropagatedVersions(obj.GetNamespace()).Create( + ctx, obj.(*fedcorev1a1.PropagatedVersion), opts, + ) } -func (*namespacedVersionAdapter) NewObject() client.Object { - return &fedcorev1a1.PropagatedVersion{} +func (*namespacedVersionAdapter) UpdateStatus( + ctx context.Context, + client fedcorev1a1client.CoreV1alpha1Interface, + obj client.Object, + opts metav1.UpdateOptions, +) (client.Object, error) { + return client.PropagatedVersions(obj.GetNamespace()).UpdateStatus( + ctx, obj.(*fedcorev1a1.PropagatedVersion), opts, + ) } func (*namespacedVersionAdapter) NewVersion( From ec4328198454cfb471c628b94ebfe0e02c8a13d8 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Gary=20Liu=20=28=E5=88=98=E5=B9=BF=E6=BA=90=29?= Date: Wed, 19 Jul 2023 06:38:09 +0000 Subject: [PATCH 117/173] refactor: propagated version manager constructors --- pkg/controllers/sync/accessor.go | 7 ++----- pkg/controllers/sync/version/manager.go | 21 ++++++++++++++++++--- 2 files changed, 20 insertions(+), 8 deletions(-) diff --git a/pkg/controllers/sync/accessor.go b/pkg/controllers/sync/accessor.go index 9a85e349..416d4d40 100644 --- a/pkg/controllers/sync/accessor.go +++ b/pkg/controllers/sync/accessor.go @@ -99,17 +99,14 @@ func NewFederatedResourceAccessor( fedObjectInformer.Informer().AddEventHandler(handler) clusterFedObjectInformer.Informer().AddEventHandler(handler) - a.versionManager = version.NewVersionManager( + a.versionManager = version.NewNamespacedVersionManager( logger, client, - true, controllerConfig.TargetNamespace, ) - a.clusterVersionManager = version.NewVersionManager( + a.clusterVersionManager = version.NewClusterVersionManager( logger, client, - false, - controllerConfig.TargetNamespace, ) return a diff --git a/pkg/controllers/sync/version/manager.go b/pkg/controllers/sync/version/manager.go index 93b9d2da..ab32055a 100644 --- a/pkg/controllers/sync/version/manager.go +++ b/pkg/controllers/sync/version/manager.go @@ -67,13 +67,12 @@ type VersionManager struct { logger klog.Logger } -func NewVersionManager( +func NewNamespacedVersionManager( logger klog.Logger, client fedcorev1a1client.CoreV1alpha1Interface, - namespaced bool, namespace string, ) *VersionManager { - adapter := NewVersionAdapter(namespaced) + adapter := NewVersionAdapter(true) v := &VersionManager{ logger: logger.WithValues("origin", "version-manager", "type-name", adapter.TypeName()), namespace: namespace, @@ -85,6 +84,22 @@ func NewVersionManager( return v } +func NewClusterVersionManager( + logger klog.Logger, + client fedcorev1a1client.CoreV1alpha1Interface, +) *VersionManager { + adapter := NewVersionAdapter(false) + v := &VersionManager{ + logger: logger.WithValues("origin", "version-manager", "type-name", adapter.TypeName()), + namespace: "", + adapter: adapter, + versions: make(map[string]runtimeclient.Object), + client: client, + } + + return v +} + // Sync retrieves propagated versions from the api and loads it into // memory. func (m *VersionManager) Sync(ctx context.Context) { From ec1de65aff7b5e1afdde5618fb78b6f6ab97c22d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Gary=20Liu=20=28=E5=88=98=E5=B9=BF=E6=BA=90=29?= Date: Wed, 19 Jul 2023 06:41:50 +0000 Subject: [PATCH 118/173] docs: propagated version description --- ...eadmiral.io_clusterpropagatedversions.yaml | 2 +- ...ore.kubeadmiral.io_propagatedversions.yaml | 2 +- .../core/v1alpha1/types_propgatedversion.go | 32 ++++++++----------- 3 files changed, 16 insertions(+), 20 deletions(-) diff --git a/config/crds/core.kubeadmiral.io_clusterpropagatedversions.yaml b/config/crds/core.kubeadmiral.io_clusterpropagatedversions.yaml index a1b58028..b9d3601d 100644 --- a/config/crds/core.kubeadmiral.io_clusterpropagatedversions.yaml +++ b/config/crds/core.kubeadmiral.io_clusterpropagatedversions.yaml @@ -19,7 +19,7 @@ spec: - name: v1alpha1 schema: openAPIV3Schema: - description: ClusterPropagatedVersion holds version information about the state propagated from KubeFed APIs (configured by FederatedTypeConfig resources) to member clusters. The name of a ClusterPropagatedVersion encodes the kind and name of the resource it stores information for (i.e. -). If a target resource has a populated metadata.Generation field, the generation will be stored with a prefix of `gen:` as the version for the cluster. If metadata.Generation is not available, metadata.ResourceVersion will be stored with a prefix of `rv:` as the version for the cluster. + description: ClusterPropagatedVersion holds version information about the state propagated from ClusterFederatedObject to member clusters. The name of a ClusterPropagatedVersion is the same as its ClusterFederatedObject. If a target resource has a populated metadata.Generation field, the generation will be stored with a prefix of `gen:` as the version for the cluster. If metadata.Generation is not available, metadata.ResourceVersion will be stored with a prefix of `rv:` as the version for the cluster. properties: apiVersion: description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' diff --git a/config/crds/core.kubeadmiral.io_propagatedversions.yaml b/config/crds/core.kubeadmiral.io_propagatedversions.yaml index 52940220..c638a9ed 100644 --- a/config/crds/core.kubeadmiral.io_propagatedversions.yaml +++ b/config/crds/core.kubeadmiral.io_propagatedversions.yaml @@ -19,7 +19,7 @@ spec: - name: v1alpha1 schema: openAPIV3Schema: - description: PropagatedVersion holds version information about the state propagated from KubeFed APIs (configured by FederatedTypeConfig resources) to member clusters. The name of a PropagatedVersion encodes the kind and name of the resource it stores information for (i.e. -). If a target resource has a populated metadata.Generation field, the generation will be stored with a prefix of `gen:` as the version for the cluster. If metadata.Generation is not available, metadata.ResourceVersion will be stored with a prefix of `rv:` as the version for the cluster. + description: PropagatedVersion holds version information about the state propagated from FederatedObject to member clusters. The name of a PropagatedVersion is the same as its FederatedObject. If a target resource has a populated metadata.Generation field, the generation will be stored with a prefix of `gen:` as the version for the cluster. If metadata.Generation is not available, metadata.ResourceVersion will be stored with a prefix of `rv:` as the version for the cluster. properties: apiVersion: description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' diff --git a/pkg/apis/core/v1alpha1/types_propgatedversion.go b/pkg/apis/core/v1alpha1/types_propgatedversion.go index ee362673..04d1f5b8 100644 --- a/pkg/apis/core/v1alpha1/types_propgatedversion.go +++ b/pkg/apis/core/v1alpha1/types_propgatedversion.go @@ -30,15 +30,13 @@ import ( // +kubebuilder:resource:path=propagatedversions // +kubebuilder:subresource:status -// PropagatedVersion holds version information about the state -// propagated from KubeFed APIs (configured by FederatedTypeConfig -// resources) to member clusters. The name of a PropagatedVersion -// encodes the kind and name of the resource it stores information for -// (i.e. -). If a target resource has -// a populated metadata.Generation field, the generation will be -// stored with a prefix of `gen:` as the version for the cluster. If -// metadata.Generation is not available, metadata.ResourceVersion will -// be stored with a prefix of `rv:` as the version for the cluster. +// PropagatedVersion holds version information about the state propagated from +// FederatedObject to member clusters. The name of a PropagatedVersion is the +// same as its FederatedObject. If a target resource has a populated +// metadata.Generation field, the generation will be stored with a prefix of +// `gen:` as the version for the cluster. If metadata.Generation is not +// available, metadata.ResourceVersion will be stored with a prefix of `rv:` as +// the version for the cluster. type PropagatedVersion struct { metav1.TypeMeta `json:",inline"` metav1.ObjectMeta `json:"metadata,omitempty"` @@ -83,15 +81,13 @@ type ClusterObjectVersion struct { // +kubebuilder:resource:path=clusterpropagatedversions,scope=Cluster // +kubebuilder:subresource:status -// ClusterPropagatedVersion holds version information about the state -// propagated from KubeFed APIs (configured by FederatedTypeConfig -// resources) to member clusters. The name of a ClusterPropagatedVersion -// encodes the kind and name of the resource it stores information for -// (i.e. -). If a target resource has -// a populated metadata.Generation field, the generation will be -// stored with a prefix of `gen:` as the version for the cluster. If -// metadata.Generation is not available, metadata.ResourceVersion will -// be stored with a prefix of `rv:` as the version for the cluster. +// ClusterPropagatedVersion holds version information about the state propagated +// from ClusterFederatedObject to member clusters. The name of a +// ClusterPropagatedVersion is the same as its ClusterFederatedObject. If a +// target resource has a populated metadata.Generation field, the generation +// will be stored with a prefix of `gen:` as the version for the cluster. If +// metadata.Generation is not available, metadata.ResourceVersion will be stored +// with a prefix of `rv:` as the version for the cluster. type ClusterPropagatedVersion struct { metav1.TypeMeta `json:",inline"` metav1.ObjectMeta `json:"metadata,omitempty"` From b002e477cc12ab47c0b0c73dd753ecf1fcebbf8e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Gary=20Liu=20=28=E5=88=98=E5=B9=BF=E6=BA=90=29?= Date: Wed, 19 Jul 2023 10:04:38 +0000 Subject: [PATCH 119/173] fix(sync): enqueue with delay on cluster readiness transition --- pkg/controllers/sync/controller.go | 16 +++++++++++++--- 1 file changed, 13 insertions(+), 3 deletions(-) diff --git a/pkg/controllers/sync/controller.go b/pkg/controllers/sync/controller.go index a81dd584..e6415a55 100644 --- a/pkg/controllers/sync/controller.go +++ b/pkg/controllers/sync/controller.go @@ -214,12 +214,22 @@ func NewSyncController( }, &informermanager.ClusterEventHandler{ Predicate: func(oldCluster, newCluster *fedcorev1a1.FederatedCluster) bool { - // Reconcile all federated objects when cluster readiness changes + // Reconcile all federated objects when cluster becomes available return oldCluster != nil && newCluster != nil && - clusterutil.IsClusterReady(&oldCluster.Status) != clusterutil.IsClusterReady(&newCluster.Status) + !clusterutil.IsClusterReady(&oldCluster.Status) && clusterutil.IsClusterReady(&newCluster.Status) }, Callback: func(cluster *fedcorev1a1.FederatedCluster) { - s.clusterReadinessTransitionQueue.Add(struct{}{}) + s.clusterReadinessTransitionQueue.AddAfter(struct{}{}, s.clusterAvailableDelay) + }, + }, + &informermanager.ClusterEventHandler{ + Predicate: func(oldCluster, newCluster *fedcorev1a1.FederatedCluster) bool { + // Reconcile all federated objects when cluster becomes unavailable + return oldCluster != nil && newCluster != nil && + clusterutil.IsClusterReady(&oldCluster.Status) && !clusterutil.IsClusterReady(&newCluster.Status) + }, + Callback: func(cluster *fedcorev1a1.FederatedCluster) { + s.clusterReadinessTransitionQueue.AddAfter(struct{}{}, s.clusterUnavailableDelay) }, }, ); err != nil { From 77a0eafee82acbf8242408d0139753745377b4f2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Gary=20Liu=20=28=E5=88=98=E5=B9=BF=E6=BA=90=29?= Date: Thu, 20 Jul 2023 10:12:45 +0000 Subject: [PATCH 120/173] chore(sync): adapt to arch changes --- .../app/controllermanager.go | 2 + cmd/controller-manager/app/core.go | 33 +++++++- .../v1alpha1/extensions_federatedobject.go | 16 ++-- pkg/controllers/common/constants.go | 67 ++++++++------- pkg/controllers/federate/util.go | 1 - pkg/controllers/sync/accessor.go | 11 ++- pkg/controllers/sync/controller.go | 51 ++++++----- .../sync/dispatch/checkunmanaged.go | 10 +-- pkg/controllers/sync/dispatch/managed.go | 15 ++-- pkg/controllers/sync/dispatch/operation.go | 1 - pkg/controllers/sync/dispatch/retain.go | 20 ++--- pkg/controllers/sync/dispatch/unmanaged.go | 12 +-- pkg/controllers/sync/placement.go | 1 - pkg/controllers/sync/resource.go | 23 ++--- pkg/controllers/sync/status/status.go | 1 - pkg/controllers/sync/version/adapter.go | 1 - pkg/controllers/sync/version/cluster.go | 1 - pkg/controllers/sync/version/manager.go | 7 +- pkg/controllers/sync/version/namespaced.go | 1 - pkg/controllers/util/schema/gvk.go | 49 ----------- .../cascadingdeletion/annotation.go} | 3 +- pkg/util/eventsink/eventsink.go | 22 ++--- pkg/util/eventsink/eventsink_test.go | 81 +++++++++++++----- pkg/{controllers/util => util/meta}/meta.go | 2 +- .../util => util/meta}/meta_test.go | 2 +- pkg/util/overrides/overrides.go | 84 +++++++++++++++++++ .../propagatedversion}/propagatedversion.go | 3 +- 27 files changed, 305 insertions(+), 215 deletions(-) delete mode 100644 pkg/controllers/util/schema/gvk.go rename pkg/{controllers/util/cascadingdeleteannotation.go => util/cascadingdeletion/annotation.go} (97%) rename pkg/{controllers/util => util/meta}/meta.go (99%) rename pkg/{controllers/util => util/meta}/meta_test.go (99%) create mode 100644 pkg/util/overrides/overrides.go rename pkg/{controllers/util => util/propagatedversion}/propagatedversion.go (99%) diff --git a/cmd/controller-manager/app/controllermanager.go b/cmd/controller-manager/app/controllermanager.go index fdb5f2ef..b8211e92 100644 --- a/cmd/controller-manager/app/controllermanager.go +++ b/cmd/controller-manager/app/controllermanager.go @@ -43,6 +43,7 @@ const ( NamespaceAutoPropagationControllerName = "nsautoprop" StatusControllerName = "status" SchedulerName = "scheduler" + SyncControllerName = "sync" ) var knownControllers = map[string]controllermanager.StartControllerFunc{ @@ -53,6 +54,7 @@ var knownControllers = map[string]controllermanager.StartControllerFunc{ StatusControllerName: startStatusController, FederatedClusterControllerName: startFederatedClusterController, SchedulerName: startScheduler, + SyncControllerName: startSyncController, } var controllersDisabledByDefault = sets.New[string]() diff --git a/cmd/controller-manager/app/core.go b/cmd/controller-manager/app/core.go index 442bc45c..9be76235 100644 --- a/cmd/controller-manager/app/core.go +++ b/cmd/controller-manager/app/core.go @@ -25,12 +25,13 @@ import ( "github.com/kubewharf/kubeadmiral/pkg/controllermanager" controllercontext "github.com/kubewharf/kubeadmiral/pkg/controllers/context" "github.com/kubewharf/kubeadmiral/pkg/controllers/federate" + "github.com/kubewharf/kubeadmiral/pkg/controllers/federatedcluster" "github.com/kubewharf/kubeadmiral/pkg/controllers/nsautoprop" "github.com/kubewharf/kubeadmiral/pkg/controllers/override" "github.com/kubewharf/kubeadmiral/pkg/controllers/policyrc" - "github.com/kubewharf/kubeadmiral/pkg/controllers/status" - "github.com/kubewharf/kubeadmiral/pkg/controllers/federatedcluster" "github.com/kubewharf/kubeadmiral/pkg/controllers/scheduler" + "github.com/kubewharf/kubeadmiral/pkg/controllers/status" + "github.com/kubewharf/kubeadmiral/pkg/controllers/sync" ) func startFederateController( @@ -207,3 +208,31 @@ func startScheduler( return scheduler, nil } + +func startSyncController( + ctx context.Context, + controllerCtx *controllercontext.Context, +) (controllermanager.Controller, error) { + syncController, err := sync.NewSyncController( + controllerCtx.KubeClientset, + controllerCtx.FedClientset, + controllerCtx.FedInformerFactory.Core().V1alpha1().FederatedObjects(), + controllerCtx.FedInformerFactory.Core().V1alpha1().ClusterFederatedObjects(), + controllerCtx.InformerManager, + controllerCtx.FederatedInformerManager, + controllerCtx.FedSystemNamespace, + controllerCtx.TargetNamespace, + controllerCtx.ClusterAvailableDelay, + controllerCtx.ClusterUnavailableDelay, + klog.Background(), + controllerCtx.WorkerCount, + controllerCtx.Metrics, + ) + if err != nil { + return nil, fmt.Errorf("error creating sync controller: %w", err) + } + + go syncController.Run(ctx) + + return syncController, nil +} diff --git a/pkg/apis/core/v1alpha1/extensions_federatedobject.go b/pkg/apis/core/v1alpha1/extensions_federatedobject.go index f7ec115a..0ba91a1f 100644 --- a/pkg/apis/core/v1alpha1/extensions_federatedobject.go +++ b/pkg/apis/core/v1alpha1/extensions_federatedobject.go @@ -17,13 +17,13 @@ limitations under the License. package v1alpha1 import ( - "encoding/json" "reflect" "sort" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime/schema" + k8sjson "k8s.io/apimachinery/pkg/util/json" "k8s.io/apimachinery/pkg/util/sets" ) @@ -222,16 +222,12 @@ func (spec *GenericFederatedObjectSpec) GetTemplateAsUnstructured() (*unstructur return template, nil } -// GetTemplateGVK returns the GVK of the FederatedObject's source object by parsing the FederatedObject's template. -func (spec *GenericFederatedObjectSpec) GetTemplateGVK() (schema.GroupVersionKind, error) { - type partialTypeMetadata struct { - metav1.TypeMeta `json:",inline"` - } - metadata := &partialTypeMetadata{} - if err := json.Unmarshal(spec.Template.Raw, metadata); err != nil { - return schema.GroupVersionKind{}, nil +func (spec *GenericFederatedObjectSpec) GetTemplateMetadata() (*metav1.PartialObjectMetadata, error) { + metadata := &metav1.PartialObjectMetadata{} + if err := k8sjson.Unmarshal(spec.Template.Raw, metadata); err != nil { + return nil, err } - return metadata.GroupVersionKind(), nil + return metadata, nil } // Follower extensions diff --git a/pkg/controllers/common/constants.go b/pkg/controllers/common/constants.go index a795581d..12d280d8 100644 --- a/pkg/controllers/common/constants.go +++ b/pkg/controllers/common/constants.go @@ -22,6 +22,7 @@ package common import ( appsv1 "k8s.io/api/apps/v1" + batchv1 "k8s.io/api/batch/v1" corev1 "k8s.io/api/core/v1" ) @@ -32,29 +33,6 @@ const ( FederateControllerPrefix = "federate.controller." + DefaultPrefix ) -const ( - NamespaceResource = "namespaces" - DeploymentResource = "deployments" - DaemonSetResource = "daemonsets" - ConfigMapResource = "configmaps" - SecretResource = "secrets" - - NamespaceKind = "Namespace" - DeploymentKind = "Deployment" - StatefulSetKind = "StatefulSet" - DaemonSetKind = "DaemonSet" - JobKind = "Job" - CronJobKind = "CronJob" - ConfigMapKind = "ConfigMap" - SecretKind = "Secret" - ServiceKind = "Service" - ServiceAccountKind = "ServiceAccount" - IngressKind = "Ingress" - PersistentVolumeKind = "PersistentVolume" - PersistentVolumeClaimKind = "PersistentVolumeClaim" - PodKind = "Pod" -) - // The following consts are spec fields used to interact with unstructured resources const ( @@ -110,9 +88,6 @@ const ( NoSchedulingAnnotation = DefaultPrefix + "no-scheduling" - // FederatedObjectAnnotation indicates that the object was created by the federate controller. - FederatedObjectAnnotation = DefaultPrefix + "federated-object" - // RetainReplicasAnnotation indicates that the replicas field of the cluster objects should be retained during propagation. RetainReplicasAnnotation = DefaultPrefix + "retain-replicas" @@ -155,12 +130,46 @@ const ( ClusterServiceAccountCAKey = "service-account-ca-data" ) +const ( + NamespaceResource = "namespaces" + DeploymentResource = "deployments" + DaemonSetResource = "daemonsets" + ConfigMapResource = "configmaps" + SecretResource = "secrets" + + NamespaceKind = "Namespace" + DeploymentKind = "Deployment" + StatefulSetKind = "StatefulSet" + DaemonSetKind = "DaemonSet" + JobKind = "Job" + CronJobKind = "CronJob" + ConfigMapKind = "ConfigMap" + SecretKind = "Secret" + ServiceKind = "Service" + ServiceAccountKind = "ServiceAccount" + IngressKind = "Ingress" + PersistentVolumeKind = "PersistentVolume" + PersistentVolumeClaimKind = "PersistentVolumeClaim" + PodKind = "Pod" +) + +var ( + ServiceGVK = corev1.SchemeGroupVersion.WithKind(ServiceKind) + ServiceAccountGVK = corev1.SchemeGroupVersion.WithKind(ServiceAccountKind) + PersistentVolumeGVK = corev1.SchemeGroupVersion.WithKind(PersistentVolumeKind) + PersistentVolumeClaimGVK = corev1.SchemeGroupVersion.WithKind(PersistentVolumeClaimKind) + PodGVK = corev1.SchemeGroupVersion.WithKind(PodKind) + + JobGVK = batchv1.SchemeGroupVersion.WithKind(JobKind) +) + var ( + NamespaceGVR = corev1.SchemeGroupVersion.WithResource(NamespaceResource) + ConfigMapGVR = corev1.SchemeGroupVersion.WithResource(ConfigMapResource) + SecretGVR = corev1.SchemeGroupVersion.WithResource(SecretResource) + DeploymentGVR = appsv1.SchemeGroupVersion.WithResource(DeploymentResource) DaemonSetGVR = appsv1.SchemeGroupVersion.WithResource(DaemonSetResource) - NamespaceGVR = corev1.SchemeGroupVersion.WithResource(NamespaceResource) - ConfigMapGVR = corev1.SchemeGroupVersion.WithResource(ConfigMapResource) - SecretGVR = corev1.SchemeGroupVersion.WithResource(SecretResource) ) // MaxFederatedObjectNameLength defines the max length of a federated object name. diff --git a/pkg/controllers/federate/util.go b/pkg/controllers/federate/util.go index c6151f61..bdbdd377 100644 --- a/pkg/controllers/federate/util.go +++ b/pkg/controllers/federate/util.go @@ -305,7 +305,6 @@ func classifyAnnotations(annotations map[string]string) ( templateAnnotations map[string]string, ) { federatedAnnotations, templateAnnotations = classifyStringMap(annotations, classifyAnnotation) - federatedAnnotations[common.FederatedObjectAnnotation] = "1" return federatedAnnotations, templateAnnotations } diff --git a/pkg/controllers/sync/accessor.go b/pkg/controllers/sync/accessor.go index 416d4d40..d98c53d8 100644 --- a/pkg/controllers/sync/accessor.go +++ b/pkg/controllers/sync/accessor.go @@ -1,4 +1,3 @@ -//go:build exclude /* Copyright 2019 The Kubernetes Authors. @@ -38,7 +37,7 @@ import ( fedcorev1a1informers "github.com/kubewharf/kubeadmiral/pkg/client/informers/externalversions/core/v1alpha1" "github.com/kubewharf/kubeadmiral/pkg/controllers/common" "github.com/kubewharf/kubeadmiral/pkg/controllers/sync/version" - "github.com/kubewharf/kubeadmiral/pkg/controllers/util" + "github.com/kubewharf/kubeadmiral/pkg/util/eventhandlers" "github.com/kubewharf/kubeadmiral/pkg/util/fedobjectadapters" "github.com/kubewharf/kubeadmiral/pkg/util/informermanager" ) @@ -76,7 +75,7 @@ type resourceAccessor struct { func NewFederatedResourceAccessor( logger klog.Logger, - controllerConfig *util.ControllerConfig, + fedSystemNamespace, targetNamespace string, client fedcorev1a1client.CoreV1alpha1Interface, fedObjectInformer fedcorev1a1informers.FederatedObjectInformer, clusterFedObjectInformer fedcorev1a1informers.ClusterFederatedObjectInformer, @@ -85,7 +84,7 @@ func NewFederatedResourceAccessor( eventRecorder record.EventRecorder, ) FederatedResourceAccessor { a := &resourceAccessor{ - fedNamespace: controllerConfig.FedSystemNamespace, + fedNamespace: fedSystemNamespace, fedObjectInformer: fedObjectInformer, clusterFedObjectInformer: clusterFedObjectInformer, ftcManager: ftcManager, @@ -93,7 +92,7 @@ func NewFederatedResourceAccessor( logger: logger.WithValues("origin", "resource-accessor"), } - handler := util.NewTriggerOnAllChanges(func(o pkgruntime.Object) { + handler := eventhandlers.NewTriggerOnAllChanges(func(o pkgruntime.Object) { enqueue(common.NewQualifiedName(o)) }) fedObjectInformer.Informer().AddEventHandler(handler) @@ -102,7 +101,7 @@ func NewFederatedResourceAccessor( a.versionManager = version.NewNamespacedVersionManager( logger, client, - controllerConfig.TargetNamespace, + targetNamespace, ) a.clusterVersionManager = version.NewClusterVersionManager( logger, diff --git a/pkg/controllers/sync/controller.go b/pkg/controllers/sync/controller.go index e6415a55..73b5e6c7 100644 --- a/pkg/controllers/sync/controller.go +++ b/pkg/controllers/sync/controller.go @@ -1,4 +1,3 @@ -//go:build exclude /* Copyright 2018 The Kubernetes Authors. @@ -49,10 +48,13 @@ import ( fedclient "github.com/kubewharf/kubeadmiral/pkg/client/clientset/versioned" "github.com/kubewharf/kubeadmiral/pkg/util/adoption" + "github.com/kubewharf/kubeadmiral/pkg/util/cascadingdeletion" clusterutil "github.com/kubewharf/kubeadmiral/pkg/util/cluster" + "github.com/kubewharf/kubeadmiral/pkg/util/eventhandlers" "github.com/kubewharf/kubeadmiral/pkg/util/fedobjectadapters" "github.com/kubewharf/kubeadmiral/pkg/util/informermanager" "github.com/kubewharf/kubeadmiral/pkg/util/logging" + "github.com/kubewharf/kubeadmiral/pkg/util/naming" "github.com/kubewharf/kubeadmiral/pkg/util/orphaning" fedcorev1a1 "github.com/kubewharf/kubeadmiral/pkg/apis/core/v1alpha1" @@ -60,13 +62,12 @@ import ( "github.com/kubewharf/kubeadmiral/pkg/controllers/common" "github.com/kubewharf/kubeadmiral/pkg/controllers/sync/dispatch" "github.com/kubewharf/kubeadmiral/pkg/controllers/sync/status" - "github.com/kubewharf/kubeadmiral/pkg/controllers/util" - "github.com/kubewharf/kubeadmiral/pkg/controllers/util/eventsink" - finalizersutil "github.com/kubewharf/kubeadmiral/pkg/controllers/util/finalizers" - "github.com/kubewharf/kubeadmiral/pkg/controllers/util/worker" "github.com/kubewharf/kubeadmiral/pkg/stats" + "github.com/kubewharf/kubeadmiral/pkg/util/eventsink" + finalizersutil "github.com/kubewharf/kubeadmiral/pkg/util/finalizers" "github.com/kubewharf/kubeadmiral/pkg/util/managedlabel" "github.com/kubewharf/kubeadmiral/pkg/util/pendingcontrollers" + "github.com/kubewharf/kubeadmiral/pkg/util/worker" ) const ( @@ -128,9 +129,6 @@ type SyncController struct { // NewSyncController returns a new sync controller for the configuration func NewSyncController( - logger klog.Logger, - controllerConfig *util.ControllerConfig, - kubeClient kubernetes.Interface, fedClient fedclient.Interface, @@ -139,6 +137,13 @@ func NewSyncController( ftcManager informermanager.FederatedTypeConfigManager, fedInformerManager informermanager.FederatedInformerManager, + + fedSystemNamespace, targetNamespace string, + clusterAvailableDelay, clusterUnavailableDelay time.Duration, + + logger klog.Logger, + workerCount int, + metrics stats.Metrics, ) (*SyncController, error) { recorder := eventsink.NewDefederatingRecorderMux(kubeClient, SyncControllerName, 4) logger = klog.LoggerWithValues(logger, "controller", SyncControllerName) @@ -146,15 +151,15 @@ func NewSyncController( fedClient: fedClient, ftcManager: ftcManager, fedInformerManager: fedInformerManager, - clusterAvailableDelay: controllerConfig.ClusterAvailableDelay, - clusterUnavailableDelay: controllerConfig.ClusterUnavailableDelay, + clusterAvailableDelay: clusterAvailableDelay, + clusterUnavailableDelay: clusterUnavailableDelay, reconcileOnClusterChangeDelay: time.Second * 3, memberObjectEnqueueDelay: time.Second * 10, recheckAfterDispatchDelay: time.Second * 10, ensureDeletionRecheckDelay: time.Second * 5, cascadingDeletionRecheckDelay: time.Second * 10, eventRecorder: recorder, - metrics: controllerConfig.Metrics, + metrics: metrics, logger: logger, } @@ -163,8 +168,8 @@ func NewSyncController( nil, s.reconcile, worker.RateLimiterOptions{}, - controllerConfig.WorkerCount, - controllerConfig.Metrics, + workerCount, + metrics, ) s.clusterCascadingDeletionWorker = worker.NewReconcileWorker[common.QualifiedName]( @@ -173,7 +178,7 @@ func NewSyncController( s.reconcileClusterForCascadingDeletion, worker.RateLimiterOptions{}, 1, - controllerConfig.Metrics, + metrics, ) // Build queue for triggering cluster reconciliations. @@ -182,7 +187,7 @@ func NewSyncController( if err := s.fedInformerManager.AddEventHandlerGenerator(&informermanager.EventHandlerGenerator{ Predicate: informermanager.RegisterOncePredicate, Generator: func(ftc *fedcorev1a1.FederatedTypeConfig) cache.ResourceEventHandler { - return util.NewTriggerOnAllChanges(func(o pkgruntime.Object) { + return eventhandlers.NewTriggerOnAllChanges(func(o pkgruntime.Object) { obj := o.(*unstructured.Unstructured) ftc, exists := s.ftcManager.GetResourceFTC(obj.GroupVersionKind()) @@ -192,7 +197,7 @@ func NewSyncController( federatedName := common.QualifiedName{ Namespace: obj.GetNamespace(), - Name: util.GenerateFederatedObjectName(obj.GetName(), ftc.GetName()), + Name: naming.GenerateFederatedObjectName(obj.GetName(), ftc.GetName()), } s.worker.EnqueueWithDelay(federatedName, s.memberObjectEnqueueDelay) }) @@ -237,7 +242,9 @@ func NewSyncController( } s.fedAccessor = NewFederatedResourceAccessor( - logger, controllerConfig, fedClient.CoreV1alpha1(), + logger, + fedSystemNamespace, targetNamespace, + fedClient.CoreV1alpha1(), fedObjectInformer, clusterFedObjectInformer, ftcManager, func(qualifiedName common.QualifiedName) { @@ -291,6 +298,10 @@ func (s *SyncController) HasSynced() bool { return true } +func (s *SyncController) IsControllerReady() bool { + return s.HasSynced() +} + func (s *SyncController) getClusterClient(clusterName string) (dynamic.Interface, error) { if client, exists := s.fedInformerManager.GetClusterClient(clusterName); exists { return client, nil @@ -389,7 +400,7 @@ func (s *SyncController) syncToClusters(ctx context.Context, fedResource Federat for _, cluster := range clusters { clusterName := cluster.Name isSelectedCluster := selectedClusterNames.Has(clusterName) - isCascadingDeletionTriggered := cluster.GetDeletionTimestamp() != nil && util.IsCascadingDeleteEnabled(cluster) + isCascadingDeletionTriggered := cluster.GetDeletionTimestamp() != nil && cascadingdeletion.IsCascadingDeleteEnabled(cluster) shouldBeDeleted := !isSelectedCluster || isCascadingDeletionTriggered if !clusterutil.IsClusterReady(&cluster.Status) { @@ -434,7 +445,7 @@ func (s *SyncController) syncToClusters(ctx context.Context, fedResource Federat dispatcher.RecordStatus(clusterName, fedcorev1a1.WaitingForRemoval) continue } - if cluster.GetDeletionTimestamp() != nil && !util.IsCascadingDeleteEnabled(cluster) { + if cluster.GetDeletionTimestamp() != nil && !cascadingdeletion.IsCascadingDeleteEnabled(cluster) { // If cluster is terminating and cascading-delete is disabled, // disallow deletion to preserve cluster object. // This could happen right after a cluster is deleted: @@ -887,7 +898,7 @@ func (s *SyncController) reconcileClusterForCascadingDeletion(ctx context.Contex return worker.StatusAllOK } - if !clusterutil.IsClusterJoined(&cluster.Status) || !util.IsCascadingDeleteEnabled(cluster) { + if !clusterutil.IsClusterJoined(&cluster.Status) || !cascadingdeletion.IsCascadingDeleteEnabled(cluster) { // cascading-delete is not required, remove cascading-delete finalizer immediately err := s.removeClusterFinalizer(ctx, cluster) if err != nil { diff --git a/pkg/controllers/sync/dispatch/checkunmanaged.go b/pkg/controllers/sync/dispatch/checkunmanaged.go index 859c552d..d3dc66c5 100644 --- a/pkg/controllers/sync/dispatch/checkunmanaged.go +++ b/pkg/controllers/sync/dispatch/checkunmanaged.go @@ -1,4 +1,3 @@ -//go:build exclude /* Copyright 2019 The Kubernetes Authors. @@ -33,7 +32,6 @@ import ( "k8s.io/klog/v2" "github.com/kubewharf/kubeadmiral/pkg/controllers/common" - "github.com/kubewharf/kubeadmiral/pkg/controllers/util" "github.com/kubewharf/kubeadmiral/pkg/util/managedlabel" ) @@ -76,7 +74,7 @@ func (d *checkUnmanagedDispatcherImpl) CheckRemovedOrUnlabeled(ctx context.Conte errLogMessage := fmt.Sprintf("Failed to %s target obj", op) go d.dispatcher.clusterOperation(ctx, clusterName, op, func(client dynamic.Interface) bool { keyedLogger := klog.FromContext(ctx).WithValues("cluster-name", clusterName) - targetName := d.targetNameForCluster(clusterName) + targetName := d.targetName keyedLogger.V(2).Info("Checking for deletion of resource or removal of managed label from target obj") clusterObj, err := client.Resource(d.targetGVR).Namespace(targetName.Namespace).Get( @@ -108,11 +106,7 @@ func (d *checkUnmanagedDispatcherImpl) wrapOperationError(err error, clusterName err, operation, d.targetGVR.String(), - d.targetNameForCluster(clusterName).String(), + d.targetName.String(), clusterName, ) } - -func (d *checkUnmanagedDispatcherImpl) targetNameForCluster(clusterName string) common.QualifiedName { - return util.QualifiedNameForCluster(clusterName, d.targetName) -} diff --git a/pkg/controllers/sync/dispatch/managed.go b/pkg/controllers/sync/dispatch/managed.go index 9fcf176f..7f7d9887 100644 --- a/pkg/controllers/sync/dispatch/managed.go +++ b/pkg/controllers/sync/dispatch/managed.go @@ -1,4 +1,3 @@ -//go:build exclude /* Copyright 2019 The Kubernetes Authors. @@ -40,10 +39,10 @@ import ( fedcorev1a1 "github.com/kubewharf/kubeadmiral/pkg/apis/core/v1alpha1" "github.com/kubewharf/kubeadmiral/pkg/controllers/common" "github.com/kubewharf/kubeadmiral/pkg/controllers/sync/status" - "github.com/kubewharf/kubeadmiral/pkg/controllers/util" "github.com/kubewharf/kubeadmiral/pkg/stats" "github.com/kubewharf/kubeadmiral/pkg/util/adoption" "github.com/kubewharf/kubeadmiral/pkg/util/managedlabel" + "github.com/kubewharf/kubeadmiral/pkg/util/propagatedversion" ) const IndexRolloutPlans = "federation_placement_rollout" @@ -178,7 +177,7 @@ func (d *managedDispatcherImpl) Create(ctx context.Context, clusterName string) ctxWithTimeout, obj, metav1.CreateOptions{}, ) if err == nil { - version := util.ObjectVersion(obj) + version := propagatedversion.ObjectVersion(obj) d.recordVersion(clusterName, version) return true } @@ -271,7 +270,7 @@ func (d *managedDispatcherImpl) Update(ctx context.Context, clusterName string, return d.recordOperationError(ctx, fedcorev1a1.VersionRetrievalFailed, clusterName, op, err) } - if !util.ObjectNeedsUpdate(obj, clusterObj, version, d.fedResource.TypeConfig()) { + if !propagatedversion.ObjectNeedsUpdate(obj, clusterObj, version, d.fedResource.TypeConfig()) { // Resource is current, we still record version in dispatcher // so that federated status can be set with cluster resource generation d.recordVersion(clusterName, version) @@ -289,7 +288,7 @@ func (d *managedDispatcherImpl) Update(ctx context.Context, clusterName string, return d.recordOperationError(ctx, fedcorev1a1.UpdateFailed, clusterName, op, err) } d.setResourcesUpdated() - version = util.ObjectVersion(obj) + version = propagatedversion.ObjectVersion(obj) d.recordVersion(clusterName, version) return true }) @@ -334,7 +333,7 @@ func (d *managedDispatcherImpl) recordOperationError( } func (d *managedDispatcherImpl) recordError(ctx context.Context, clusterName, operation string, err error) { - targetName := d.unmanagedDispatcher.targetNameForCluster(clusterName) + targetName := d.unmanagedDispatcher.targetName args := []interface{}{operation, d.fedResource.TargetGVR().String(), targetName, clusterName} eventType := fmt.Sprintf("%sInClusterFailed", strings.Replace(strings.Title(operation), " ", "", -1)) eventErr := errors.Wrapf(err, "Failed to "+eventTemplate, args...) @@ -349,7 +348,7 @@ func (d *managedDispatcherImpl) recordError(ctx context.Context, clusterName, op } func (d *managedDispatcherImpl) recordEvent(clusterName, operation, operationContinuous string) { - targetName := d.unmanagedDispatcher.targetNameForCluster(clusterName) + targetName := d.unmanagedDispatcher.targetName args := []interface{}{operationContinuous, d.fedResource.TargetGVR().String(), targetName, clusterName} eventType := fmt.Sprintf("%sInCluster", strings.Replace(strings.Title(operation), " ", "", -1)) d.fedResource.RecordEvent(eventType, eventTemplate, args...) @@ -386,7 +385,7 @@ func (d *managedDispatcherImpl) CollectedStatus() status.CollectedPropagationSta } return status.CollectedPropagationStatus{ StatusMap: statusMap, - GenerationMap: util.ConvertVersionMapToGenerationMap(d.versionMap), + GenerationMap: propagatedversion.ConvertVersionMapToGenerationMap(d.versionMap), ResourcesUpdated: d.resourcesUpdated, } } diff --git a/pkg/controllers/sync/dispatch/operation.go b/pkg/controllers/sync/dispatch/operation.go index 42a1174a..4d140e7e 100644 --- a/pkg/controllers/sync/dispatch/operation.go +++ b/pkg/controllers/sync/dispatch/operation.go @@ -1,4 +1,3 @@ -//go:build exclude /* Copyright 2019 The Kubernetes Authors. diff --git a/pkg/controllers/sync/dispatch/retain.go b/pkg/controllers/sync/dispatch/retain.go index 83b95dee..a7391a77 100644 --- a/pkg/controllers/sync/dispatch/retain.go +++ b/pkg/controllers/sync/dispatch/retain.go @@ -1,4 +1,3 @@ -//go:build exclude /* Copyright 2019 The Kubernetes Authors. @@ -33,8 +32,7 @@ import ( fedcorev1a1 "github.com/kubewharf/kubeadmiral/pkg/apis/core/v1alpha1" "github.com/kubewharf/kubeadmiral/pkg/controllers/common" - schemautil "github.com/kubewharf/kubeadmiral/pkg/controllers/util/schema" - utilunstructured "github.com/kubewharf/kubeadmiral/pkg/controllers/util/unstructured" + utilunstructured "github.com/kubewharf/kubeadmiral/pkg/util/unstructured" ) const ( @@ -60,32 +58,32 @@ func RetainOrMergeClusterFields( mergeAnnotations(desiredObj, clusterObj) mergeLabels(desiredObj, clusterObj) - switch { - case schemautil.IsServiceGvk(targetGvk): + switch targetGvk { + case common.ServiceGVK: if err := retainServiceFields(desiredObj, clusterObj); err != nil { return err } - case schemautil.IsServiceAccountGvk(targetGvk): + case common.ServiceAccountGVK: if err := retainServiceAccountFields(desiredObj, clusterObj); err != nil { return err } - case schemautil.IsJobGvk(targetGvk): + case common.JobGVK: if err := retainJobFields(desiredObj, clusterObj); err != nil { return err } - case schemautil.IsPersistentVolumeGvk(targetGvk): + case common.PersistentVolumeGVK: if err := retainPersistentVolumeFields(desiredObj, clusterObj); err != nil { return err } - case schemautil.IsPersistentVolumeClaimGvk(targetGvk): + case common.PersistentVolumeClaimGVK: if err := retainPersistentVolumeClaimFields(desiredObj, clusterObj); err != nil { return err } - case schemautil.IsPodGvk(targetGvk): + case common.PodGVK: if err := retainPodFields(desiredObj, clusterObj); err != nil { return err } - case targetGvk == schema.GroupVersionKind{Group: "argoproj.io", Version: "v1alpha1", Kind: "Workflow"}: + case schema.GroupVersionKind{Group: "argoproj.io", Version: "v1alpha1", Kind: "Workflow"}: // TODO: this is a temporary hack to support Argo Workflow. // We should come up with an extensible framework to support CRDs in the future. if err := retainArgoWorkflow(desiredObj, clusterObj); err != nil { diff --git a/pkg/controllers/sync/dispatch/unmanaged.go b/pkg/controllers/sync/dispatch/unmanaged.go index 683dd493..b291838e 100644 --- a/pkg/controllers/sync/dispatch/unmanaged.go +++ b/pkg/controllers/sync/dispatch/unmanaged.go @@ -1,4 +1,3 @@ -//go:build exclude /* Copyright 2019 The Kubernetes Authors. @@ -35,8 +34,7 @@ import ( fedcorev1a1 "github.com/kubewharf/kubeadmiral/pkg/apis/core/v1alpha1" "github.com/kubewharf/kubeadmiral/pkg/controllers/common" - "github.com/kubewharf/kubeadmiral/pkg/controllers/util" - "github.com/kubewharf/kubeadmiral/pkg/controllers/util/finalizers" + "github.com/kubewharf/kubeadmiral/pkg/util/finalizers" "github.com/kubewharf/kubeadmiral/pkg/util/managedlabel" ) @@ -96,7 +94,7 @@ func (d *unmanagedDispatcherImpl) Delete(ctx context.Context, clusterName string const opContinuous = "Deleting" go d.dispatcher.clusterOperation(ctx, clusterName, op, func(client dynamic.Interface) bool { keyedLogger := klog.FromContext(ctx).WithValues("cluster-name", clusterName) - targetName := d.targetNameForCluster(clusterName) + targetName := d.targetName keyedLogger.V(1).Info("Deleting target object in cluster") if d.recorder != nil { d.recorder.recordEvent(clusterName, op, opContinuous) @@ -216,15 +214,11 @@ func (d *unmanagedDispatcherImpl) wrapOperationError(err error, clusterName, ope err, operation, d.targetGVR.String(), - d.targetNameForCluster(clusterName).String(), + d.targetName.String(), clusterName, ) } -func (d *unmanagedDispatcherImpl) targetNameForCluster(clusterName string) common.QualifiedName { - return util.QualifiedNameForCluster(clusterName, d.targetName) -} - func wrapOperationError(err error, operation, targetGVR, targetName, clusterName string) error { return errors.Wrapf(err, "Failed to "+eventTemplate, operation, targetGVR, targetName, clusterName) } diff --git a/pkg/controllers/sync/placement.go b/pkg/controllers/sync/placement.go index a60889f1..c550873d 100644 --- a/pkg/controllers/sync/placement.go +++ b/pkg/controllers/sync/placement.go @@ -1,4 +1,3 @@ -//go:build exclude /* Copyright 2019 The Kubernetes Authors. diff --git a/pkg/controllers/sync/resource.go b/pkg/controllers/sync/resource.go index 4bca9fe4..4cde6423 100644 --- a/pkg/controllers/sync/resource.go +++ b/pkg/controllers/sync/resource.go @@ -1,4 +1,3 @@ -//go:build exclude /* Copyright 2019 The Kubernetes Authors. @@ -39,10 +38,9 @@ import ( "github.com/kubewharf/kubeadmiral/pkg/controllers/common" "github.com/kubewharf/kubeadmiral/pkg/controllers/sync/dispatch" "github.com/kubewharf/kubeadmiral/pkg/controllers/sync/version" - "github.com/kubewharf/kubeadmiral/pkg/controllers/util" - "github.com/kubewharf/kubeadmiral/pkg/controllers/util/finalizers" - schemautil "github.com/kubewharf/kubeadmiral/pkg/controllers/util/schema" + "github.com/kubewharf/kubeadmiral/pkg/util/finalizers" "github.com/kubewharf/kubeadmiral/pkg/util/managedlabel" + overridesutil "github.com/kubewharf/kubeadmiral/pkg/util/overrides" ) // FederatedResource encapsulates the behavior of a logical federated @@ -69,7 +67,7 @@ type federatedResource struct { federatedObject fedcorev1a1.GenericFederatedObject template *unstructured.Unstructured versionManager *version.VersionManager - overridesMap util.OverridesMap + overridesMap overridesutil.OverridesMap versionMap map[string]string eventRecorder record.EventRecorder } @@ -154,7 +152,8 @@ func (r *federatedResource) ComputePlacement(clusters []*fedcorev1a1.FederatedCl func (r *federatedResource) ObjectForCluster(clusterName string) (*unstructured.Unstructured, error) { obj := r.template.DeepCopy() - if schemautil.IsJobGvk(r.TargetGVK()) { + switch r.TargetGVK() { + case common.JobGVK: if err := dropJobFields(obj); err != nil { return nil, err } @@ -162,15 +161,11 @@ func (r *federatedResource) ObjectForCluster(clusterName string) (*unstructured. if err := addRetainObjectFinalizer(obj); err != nil { return nil, err } - } - - if schemautil.IsServiceGvk(r.TargetGVK()) { + case common.ServiceGVK: if err := dropServiceFields(obj); err != nil { return nil, err } - } - - if schemautil.IsPodGvk(r.TargetGVK()) { + case common.PodGVK: if err := dropPodFields(obj); err != nil { return nil, err } @@ -234,7 +229,7 @@ func (r *federatedResource) ApplyOverrides( return err } if overrides != nil { - if err := util.ApplyJsonPatch(obj, overrides); err != nil { + if err := overridesutil.ApplyJsonPatch(obj, overrides); err != nil { return err } } @@ -291,7 +286,7 @@ func (r *federatedResource) overridesForCluster(clusterName string) (fedcorev1a1 return lhs < rhs }) - r.overridesMap = make(util.OverridesMap) + r.overridesMap = make(overridesutil.OverridesMap) // Merge overrides in the specified order for _, controllerOverride := range overrides { diff --git a/pkg/controllers/sync/status/status.go b/pkg/controllers/sync/status/status.go index 5613fb89..393c078a 100644 --- a/pkg/controllers/sync/status/status.go +++ b/pkg/controllers/sync/status/status.go @@ -1,4 +1,3 @@ -//go:build exclude /* Copyright 2019 The Kubernetes Authors. diff --git a/pkg/controllers/sync/version/adapter.go b/pkg/controllers/sync/version/adapter.go index 30d1992b..013d343a 100644 --- a/pkg/controllers/sync/version/adapter.go +++ b/pkg/controllers/sync/version/adapter.go @@ -1,4 +1,3 @@ -//go:build exclude /* Copyright 2018 The Kubernetes Authors. diff --git a/pkg/controllers/sync/version/cluster.go b/pkg/controllers/sync/version/cluster.go index 7ce77573..dd37cf70 100644 --- a/pkg/controllers/sync/version/cluster.go +++ b/pkg/controllers/sync/version/cluster.go @@ -1,4 +1,3 @@ -//go:build exclude /* Copyright 2018 The Kubernetes Authors. diff --git a/pkg/controllers/sync/version/manager.go b/pkg/controllers/sync/version/manager.go index ab32055a..4ebc92e6 100644 --- a/pkg/controllers/sync/version/manager.go +++ b/pkg/controllers/sync/version/manager.go @@ -1,4 +1,3 @@ -//go:build exclude /* Copyright 2018 The Kubernetes Authors. @@ -38,7 +37,7 @@ import ( fedcorev1a1 "github.com/kubewharf/kubeadmiral/pkg/apis/core/v1alpha1" fedcorev1a1client "github.com/kubewharf/kubeadmiral/pkg/client/clientset/versioned/typed/core/v1alpha1" "github.com/kubewharf/kubeadmiral/pkg/controllers/common" - "github.com/kubewharf/kubeadmiral/pkg/controllers/util" + "github.com/kubewharf/kubeadmiral/pkg/util/propagatedversion" ) // VersionedResource defines the methods a federated resource must @@ -195,7 +194,7 @@ func (m *VersionManager) Update( ClusterVersions: clusterVersions, } - if oldStatus != nil && util.PropagatedVersionStatusEquivalent(oldStatus, status) { + if oldStatus != nil && propagatedversion.PropagatedVersionStatusEquivalent(oldStatus, status) { m.Unlock() m.logger.WithValues("version-qualified-name", qualifiedName). V(4).Info("No update necessary") @@ -439,6 +438,6 @@ func VersionMapToClusterVersions(versionMap map[string]string) []fedcorev1a1.Clu Version: version, }) } - util.SortClusterVersions(clusterVersions) + propagatedversion.SortClusterVersions(clusterVersions) return clusterVersions } diff --git a/pkg/controllers/sync/version/namespaced.go b/pkg/controllers/sync/version/namespaced.go index a602cf56..2260b947 100644 --- a/pkg/controllers/sync/version/namespaced.go +++ b/pkg/controllers/sync/version/namespaced.go @@ -1,4 +1,3 @@ -//go:build exclude /* Copyright 2018 The Kubernetes Authors. diff --git a/pkg/controllers/util/schema/gvk.go b/pkg/controllers/util/schema/gvk.go deleted file mode 100644 index 9abf7b25..00000000 --- a/pkg/controllers/util/schema/gvk.go +++ /dev/null @@ -1,49 +0,0 @@ -//go:build exclude -/* -Copyright 2023 The KubeAdmiral Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package schema - -import ( - batchv1 "k8s.io/api/batch/v1" - "k8s.io/apimachinery/pkg/runtime/schema" - - "github.com/kubewharf/kubeadmiral/pkg/controllers/common" -) - -func IsServiceGvk(gvk schema.GroupVersionKind) bool { - return gvk.Group == "" && gvk.Kind == common.ServiceKind -} - -func IsServiceAccountGvk(gvk schema.GroupVersionKind) bool { - return gvk.Group == "" && gvk.Kind == common.ServiceAccountKind -} - -func IsJobGvk(gvk schema.GroupVersionKind) bool { - return gvk.Group == batchv1.GroupName && gvk.Kind == common.JobKind -} - -func IsPersistentVolumeGvk(gvk schema.GroupVersionKind) bool { - return gvk.Group == "" && gvk.Kind == common.PersistentVolumeKind -} - -func IsPersistentVolumeClaimGvk(gvk schema.GroupVersionKind) bool { - return gvk.Group == "" && gvk.Kind == common.PersistentVolumeClaimKind -} - -func IsPodGvk(gvk schema.GroupVersionKind) bool { - return gvk.Group == "" && gvk.Kind == common.PodKind -} diff --git a/pkg/controllers/util/cascadingdeleteannotation.go b/pkg/util/cascadingdeletion/annotation.go similarity index 97% rename from pkg/controllers/util/cascadingdeleteannotation.go rename to pkg/util/cascadingdeletion/annotation.go index d91c371f..6ba0e6fa 100644 --- a/pkg/controllers/util/cascadingdeleteannotation.go +++ b/pkg/util/cascadingdeletion/annotation.go @@ -1,4 +1,3 @@ -//go:build exclude /* Copyright 2023 The KubeAdmiral Authors. @@ -15,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package util +package cascadingdeletion import ( fedcorev1a1 "github.com/kubewharf/kubeadmiral/pkg/apis/core/v1alpha1" diff --git a/pkg/util/eventsink/eventsink.go b/pkg/util/eventsink/eventsink.go index 759fe77c..21af7a0d 100644 --- a/pkg/util/eventsink/eventsink.go +++ b/pkg/util/eventsink/eventsink.go @@ -20,15 +20,14 @@ import ( "fmt" corev1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/meta" "k8s.io/apimachinery/pkg/runtime" "k8s.io/client-go/kubernetes" corev1client "k8s.io/client-go/kubernetes/typed/core/v1" "k8s.io/client-go/tools/record" "k8s.io/klog/v2" + fedcorev1a1 "github.com/kubewharf/kubeadmiral/pkg/apis/core/v1alpha1" "github.com/kubewharf/kubeadmiral/pkg/client/generic/scheme" - "github.com/kubewharf/kubeadmiral/pkg/controllers/common" ) type EventRecorderMux struct { @@ -67,27 +66,28 @@ func (mux *EventRecorderMux) WithTransform( func (mux *EventRecorderMux) WithDefederateTransformer(recorder record.EventRecorder) *EventRecorderMux { return mux.WithTransform(recorder, func(obj runtime.Object) runtime.Object { - accessor, err := meta.Accessor(obj) - if err != nil { - klog.Errorf("Could not construct reference to: '%#v' due to: '%v'.", obj, err) + fedObject, ok := obj.(fedcorev1a1.GenericFederatedObject) + if !ok { return nil } - if _, ok := accessor.GetAnnotations()[common.FederatedObjectAnnotation]; !ok { - // not a federated object + templateMeta, err := fedObject.GetSpec().GetTemplateMetadata() + if err != nil { return nil } - for _, owner := range accessor.GetOwnerReferences() { - if !(owner.Controller != nil && *owner.Controller) { + for _, owner := range fedObject.GetOwnerReferences() { + ownerIsSourceObject := owner.Controller != nil && *owner.Controller && + owner.APIVersion == templateMeta.APIVersion && owner.Kind == templateMeta.Kind && + owner.Name == templateMeta.Name + if !ownerIsSourceObject { continue } - namespace := accessor.GetNamespace() return &corev1.ObjectReference{ APIVersion: owner.APIVersion, Kind: owner.Kind, - Namespace: namespace, + Namespace: fedObject.GetNamespace(), Name: owner.Name, UID: owner.UID, ResourceVersion: "", // this value should not be used diff --git a/pkg/util/eventsink/eventsink_test.go b/pkg/util/eventsink/eventsink_test.go index 624103ab..4aa991bd 100644 --- a/pkg/util/eventsink/eventsink_test.go +++ b/pkg/util/eventsink/eventsink_test.go @@ -25,10 +25,11 @@ import ( appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + k8sjson "k8s.io/apimachinery/pkg/util/json" "k8s.io/client-go/tools/record" "k8s.io/utils/pointer" - fedtypesv1a1 "github.com/kubewharf/kubeadmiral/pkg/apis/types/v1alpha1" + fedcorev1a1 "github.com/kubewharf/kubeadmiral/pkg/apis/core/v1alpha1" "github.com/kubewharf/kubeadmiral/pkg/client/generic/scheme" "github.com/kubewharf/kubeadmiral/pkg/controllers/common" ) @@ -65,18 +66,17 @@ func (list *eventList) freeze() []*corev1.Event { func TestDefederateTransformer(t *testing.T) { t.Run("should emit event for source object", func(t *testing.T) { sourceObj := appsv1.Deployment{ + TypeMeta: metav1.TypeMeta{ + APIVersion: appsv1.SchemeGroupVersion.String(), + Kind: common.DeploymentKind, + }, ObjectMeta: metav1.ObjectMeta{ Name: "test-dp", UID: "testuid", }, } - testObj := fedtypesv1a1.FederatedDeployment{ - ObjectMeta: metav1.ObjectMeta{ - Annotations: map[string]string{ - common.FederatedObjectAnnotation: "true", - }, - }, - } + testObj := fedcorev1a1.FederatedObject{} + testObj.SetOwnerReferences([]metav1.OwnerReference{ { APIVersion: sourceObj.APIVersion, @@ -87,6 +87,14 @@ func TestDefederateTransformer(t *testing.T) { }, }) + assert := assert.New(t) + if data, err := k8sjson.Marshal(&sourceObj); err != nil { + assert.NoError(err) + return + } else { + testObj.Spec.Template.Raw = data + } + events := &eventList{} testBroadcaster := record.NewBroadcasterForTests(0) testBroadcaster.StartEventWatcher(events.push) @@ -98,7 +106,6 @@ func TestDefederateTransformer(t *testing.T) { mux.Event(&testObj, corev1.EventTypeWarning, "testReason", "testMessage") testBroadcaster.Shutdown() - assert := assert.New(t) assert.Eventually( func() bool { return events.len() == 1 }, time.Second, @@ -114,14 +121,26 @@ func TestDefederateTransformer(t *testing.T) { assert.Equal(sourceObj.UID, eventObj.UID) assert.Equal(sourceObj.APIVersion, eventObj.APIVersion) }) - t.Run("should not emit event if object is not federated", func(t *testing.T) { - testObj := fedtypesv1a1.FederatedDeployment{ + t.Run("should not emit event if source owner reference does not exist", func(t *testing.T) { + sourceObj := appsv1.Deployment{ + TypeMeta: metav1.TypeMeta{ + APIVersion: appsv1.SchemeGroupVersion.String(), + Kind: common.DeploymentKind, + }, ObjectMeta: metav1.ObjectMeta{ - Annotations: map[string]string{ - common.FederatedObjectAnnotation: "true", - }, + Name: "test-dp", + UID: "testuid", }, } + testObj := fedcorev1a1.FederatedObject{} + + assert := assert.New(t) + if data, err := k8sjson.Marshal(&sourceObj); err != nil { + assert.NoError(err) + return + } else { + testObj.Spec.Template.Raw = data + } events := &eventList{} testBroadcaster := record.NewBroadcasterForTests(0) @@ -134,20 +153,41 @@ func TestDefederateTransformer(t *testing.T) { mux.Event(&testObj, corev1.EventTypeWarning, "testReason", "testMessage") testBroadcaster.Shutdown() - assert := assert.New(t) assert.Never( func() bool { return events.len() > 0 }, time.Second, 100*time.Millisecond, ) }) - t.Run("should not emit event if source owner reference does not exist", func(t *testing.T) { - testObj := fedtypesv1a1.FederatedDeployment{ + t.Run("should not emit event if owner reference is not the source object", func(t *testing.T) { + sourceObj := appsv1.Deployment{ + TypeMeta: metav1.TypeMeta{ + APIVersion: appsv1.SchemeGroupVersion.String(), + Kind: common.DeploymentKind, + }, ObjectMeta: metav1.ObjectMeta{ - Annotations: map[string]string{ - common.FederatedObjectAnnotation: "true", - }, + Name: "test-dp", + UID: "testuid", + }, + } + testObj := fedcorev1a1.FederatedObject{} + + testObj.SetOwnerReferences([]metav1.OwnerReference{ + { + APIVersion: "v1", + Kind: "Secret", + Name: "test-secret", + UID: "testuid", + Controller: pointer.Bool(true), }, + }) + + assert := assert.New(t) + if data, err := k8sjson.Marshal(&sourceObj); err != nil { + assert.NoError(err) + return + } else { + testObj.Spec.Template.Raw = data } events := &eventList{} @@ -161,7 +201,6 @@ func TestDefederateTransformer(t *testing.T) { mux.Event(&testObj, corev1.EventTypeWarning, "testReason", "testMessage") testBroadcaster.Shutdown() - assert := assert.New(t) assert.Never( func() bool { return events.len() > 0 }, time.Second, diff --git a/pkg/controllers/util/meta.go b/pkg/util/meta/meta.go similarity index 99% rename from pkg/controllers/util/meta.go rename to pkg/util/meta/meta.go index eccfa583..3628a220 100644 --- a/pkg/controllers/util/meta.go +++ b/pkg/util/meta/meta.go @@ -18,7 +18,7 @@ This file may have been modified by The KubeAdmiral Authors are Copyright 2023 The KubeAdmiral Authors. */ -package util +package meta import ( "encoding/json" diff --git a/pkg/controllers/util/meta_test.go b/pkg/util/meta/meta_test.go similarity index 99% rename from pkg/controllers/util/meta_test.go rename to pkg/util/meta/meta_test.go index a42e7f1b..2eedc6c0 100644 --- a/pkg/controllers/util/meta_test.go +++ b/pkg/util/meta/meta_test.go @@ -18,7 +18,7 @@ This file may have been modified by The KubeAdmiral Authors are Copyright 2023 The KubeAdmiral Authors. */ -package util +package meta import ( "testing" diff --git a/pkg/util/overrides/overrides.go b/pkg/util/overrides/overrides.go new file mode 100644 index 00000000..1b992793 --- /dev/null +++ b/pkg/util/overrides/overrides.go @@ -0,0 +1,84 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +This file may have been modified by The KubeAdmiral Authors +("KubeAdmiral Modifications"). All KubeAdmiral Modifications +are Copyright 2023 The KubeAdmiral Authors. +*/ + +package overrides + +import ( + "encoding/json" + + jsonpatch "github.com/evanphx/json-patch" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/util/sets" + + fedcorev1a1 "github.com/kubewharf/kubeadmiral/pkg/apis/core/v1alpha1" +) + +// Namespace and name may not be overridden since these fields are the +// primary mechanism of association between a federated resource in +// the host cluster and the target resources in the member clusters. +// +// Kind should always be sourced from the FTC and not vary across +// member clusters. +// +// apiVersion can be overridden to support managing resources like +// Ingress which can exist in different groups at different +// versions. Users will need to take care not to abuse this +// capability. +var invalidPaths = sets.NewString( + "/metadata/namespace", + "/metadata/name", + "/metadata/generateName", + "/kind", +) + +// Mapping of clusterName to overrides for the cluster +type OverridesMap map[string]fedcorev1a1.OverridePatches + +// ApplyJsonPatch applies the override on to the given unstructured object. +func ApplyJsonPatch(obj *unstructured.Unstructured, overrides fedcorev1a1.OverridePatches) error { + // TODO: Do the defaulting of "op" field to "replace" in API defaulting + for i, overrideItem := range overrides { + if overrideItem.Op == "" { + overrides[i].Op = "replace" + } + } + jsonPatchBytes, err := json.Marshal(overrides) + if err != nil { + return err + } + + patch, err := jsonpatch.DecodePatch(jsonPatchBytes) + if err != nil { + return err + } + + objectJSONBytes, err := obj.MarshalJSON() + if err != nil { + return err + } + + patchedObjectJSONBytes, err := patch.Apply(objectJSONBytes) + if err != nil { + return err + } + + err = obj.UnmarshalJSON(patchedObjectJSONBytes) + return err +} diff --git a/pkg/controllers/util/propagatedversion.go b/pkg/util/propagatedversion/propagatedversion.go similarity index 99% rename from pkg/controllers/util/propagatedversion.go rename to pkg/util/propagatedversion/propagatedversion.go index f1f446df..06367c10 100644 --- a/pkg/controllers/util/propagatedversion.go +++ b/pkg/util/propagatedversion/propagatedversion.go @@ -1,4 +1,3 @@ -//go:build exclude /* Copyright 2018 The Kubernetes Authors. @@ -19,7 +18,7 @@ This file may have been modified by The KubeAdmiral Authors are Copyright 2023 The KubeAdmiral Authors. */ -package util +package propagatedversion import ( "fmt" From 98cee80400c5e398edf5421e8635a36a0195f1cf Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Gary=20Liu=20=28=E5=88=98=E5=B9=BF=E6=BA=90=29?= Date: Thu, 20 Jul 2023 11:05:20 +0000 Subject: [PATCH 121/173] fix: make federate controller compilable --- Makefile | 2 +- pkg/controllers/common/constants.go | 2 ++ pkg/controllers/federate/util.go | 12 +++++------- pkg/controllers/util/federatedstatus.go | 3 +-- pkg/util/propagatedversion/propagatedversion.go | 3 ++- 5 files changed, 11 insertions(+), 11 deletions(-) diff --git a/Makefile b/Makefile index ddfc868b..86e307ac 100644 --- a/Makefile +++ b/Makefile @@ -31,7 +31,7 @@ all: build # make build BUILD_PLATFORMS=linux/amd64,darwin/amd64 .PHONY: build build: - BUILD_FLAGS=$(BUILD_FLAGS) TARGET_NAME=$(TARGET_NAME) GOPROXY=$(GOPROXY) bash hack/make-rules/build.sh + BUILD_FLAGS="$(BUILD_FLAGS)" TARGET_NAME="$(TARGET_NAME)" GOPROXY="$(GOPROXY)" bash hack/make-rules/build.sh # Start a local kubeadmiral cluster for developers. # diff --git a/pkg/controllers/common/constants.go b/pkg/controllers/common/constants.go index 12d280d8..68d38c5f 100644 --- a/pkg/controllers/common/constants.go +++ b/pkg/controllers/common/constants.go @@ -110,6 +110,8 @@ const ( // TemplateGeneratorMergePatchAnnotation indicates the merge patch document capable of converting // the source object to the template object. TemplateGeneratorMergePatchAnnotation = FederateControllerPrefix + "template-generator-merge-patch" + + LatestReplicasetDigestsAnnotation = DefaultPrefix + "latest-replicaset-digests" ) // PropagatedAnnotationKeys and PropagatedLabelKeys are used to store the keys of annotations and labels that are present diff --git a/pkg/controllers/federate/util.go b/pkg/controllers/federate/util.go index bdbdd377..cab26805 100644 --- a/pkg/controllers/federate/util.go +++ b/pkg/controllers/federate/util.go @@ -35,9 +35,10 @@ import ( "github.com/kubewharf/kubeadmiral/pkg/controllers/nsautoprop" "github.com/kubewharf/kubeadmiral/pkg/controllers/override" "github.com/kubewharf/kubeadmiral/pkg/controllers/scheduler" - "github.com/kubewharf/kubeadmiral/pkg/controllers/util" + "github.com/kubewharf/kubeadmiral/pkg/util/adoption" annotationutil "github.com/kubewharf/kubeadmiral/pkg/util/annotation" "github.com/kubewharf/kubeadmiral/pkg/util/naming" + "github.com/kubewharf/kubeadmiral/pkg/util/orphaning" "github.com/kubewharf/kubeadmiral/pkg/util/pendingcontrollers" ) @@ -251,8 +252,8 @@ var ( scheduler.SchedulingModeAnnotation, scheduler.StickyClusterAnnotation, nsautoprop.NoAutoPropagationAnnotation, - util.OrphanManagedResourcesAnnotation, - util.ConflictResolutionAnnotation, + orphaning.OrphanManagedResourcesAnnotation, + adoption.ConflictResolutionAnnotation, scheduler.TolerationsAnnotations, scheduler.PlacementsAnnotations, scheduler.ClusterSelectorAnnotations, @@ -263,12 +264,9 @@ var ( common.FollowersAnnotation, ) - // TODO: Do we need to specify the internal annotations here? // List of annotations that should be ignored on the source object ignoredAnnotationSet = sets.New( - util.ConflictResolutionInternalAnnotation, - util.OrphanManagedResourcesInternalAnnotation, - common.EnableFollowerSchedulingAnnotation, + common.LatestReplicasetDigestsAnnotation, ) federatedLabelSet = sets.New[string]( diff --git a/pkg/controllers/util/federatedstatus.go b/pkg/controllers/util/federatedstatus.go index 87d1a911..dff29dfc 100644 --- a/pkg/controllers/util/federatedstatus.go +++ b/pkg/controllers/util/federatedstatus.go @@ -41,8 +41,7 @@ const ( const ( // annotations for federatedDeploymentStatus - LatestReplicasetDigestsAnnotation = common.DefaultPrefix + "latest-replicaset-digests" - AggregatedUpdatedReplicas = common.DefaultPrefix + "aggregated-updated-replicas" + AggregatedUpdatedReplicas = common.DefaultPrefix + "aggregated-updated-replicas" ) // FederatedResource is a generic representation of a federated type diff --git a/pkg/util/propagatedversion/propagatedversion.go b/pkg/util/propagatedversion/propagatedversion.go index 06367c10..2cffbb0d 100644 --- a/pkg/util/propagatedversion/propagatedversion.go +++ b/pkg/util/propagatedversion/propagatedversion.go @@ -30,6 +30,7 @@ import ( "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" fedcorev1a1 "github.com/kubewharf/kubeadmiral/pkg/apis/core/v1alpha1" + "github.com/kubewharf/kubeadmiral/pkg/util/meta" ) const ( @@ -64,7 +65,7 @@ func ObjectNeedsUpdate( // If versions match and the version is sourced from the // generation field, a further check of metadata equivalency is // required. - return strings.HasPrefix(targetVersion, generationPrefix) && !ObjectMetaObjEquivalent(desiredObj, clusterObj) + return strings.HasPrefix(targetVersion, generationPrefix) && !meta.ObjectMetaObjEquivalent(desiredObj, clusterObj) } // SortClusterVersions ASCII sorts the given cluster versions slice From 45f4c46a161c48944a3962da2a3536c9e5545d85 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Gary=20Liu=20=28=E5=88=98=E5=B9=BF=E6=BA=90=29?= Date: Thu, 20 Jul 2023 11:23:45 +0000 Subject: [PATCH 122/173] fix: handle clusters marked for deletion during downtime --- pkg/controllers/sync/accessor.go | 5 ++--- pkg/controllers/sync/controller.go | 7 +++---- 2 files changed, 5 insertions(+), 7 deletions(-) diff --git a/pkg/controllers/sync/accessor.go b/pkg/controllers/sync/accessor.go index d98c53d8..be344120 100644 --- a/pkg/controllers/sync/accessor.go +++ b/pkg/controllers/sync/accessor.go @@ -26,7 +26,6 @@ import ( corev1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/labels" pkgruntime "k8s.io/apimachinery/pkg/runtime" "k8s.io/client-go/tools/record" @@ -146,8 +145,8 @@ func (a *resourceAccessor) FederatedResource( } federatedObject = federatedObject.DeepCopyGenericFederatedObject() - template := &unstructured.Unstructured{} - if err := template.UnmarshalJSON(federatedObject.GetSpec().Template.Raw); err != nil { + template, err := federatedObject.GetSpec().GetTemplateAsUnstructured() + if err != nil { return nil, fmt.Errorf("failed to unmarshal template: %w", err) } diff --git a/pkg/controllers/sync/controller.go b/pkg/controllers/sync/controller.go index 73b5e6c7..2907289a 100644 --- a/pkg/controllers/sync/controller.go +++ b/pkg/controllers/sync/controller.go @@ -210,8 +210,7 @@ func NewSyncController( &informermanager.ClusterEventHandler{ Predicate: func(oldCluster, newCluster *fedcorev1a1.FederatedCluster) bool { // Enqueue cluster when it's marked for deletion to ensure cascading deletion - return oldCluster != nil && newCluster != nil && - oldCluster.GetDeletionTimestamp().IsZero() && !newCluster.GetDeletionTimestamp().IsZero() + return !newCluster.GetDeletionTimestamp().IsZero() }, Callback: func(cluster *fedcorev1a1.FederatedCluster) { s.clusterCascadingDeletionWorker.Enqueue(common.NewQualifiedName(cluster)) @@ -219,7 +218,7 @@ func NewSyncController( }, &informermanager.ClusterEventHandler{ Predicate: func(oldCluster, newCluster *fedcorev1a1.FederatedCluster) bool { - // Reconcile all federated objects when cluster becomes available + // Reconcile all federated objects when cluster becomes ready return oldCluster != nil && newCluster != nil && !clusterutil.IsClusterReady(&oldCluster.Status) && clusterutil.IsClusterReady(&newCluster.Status) }, @@ -229,7 +228,7 @@ func NewSyncController( }, &informermanager.ClusterEventHandler{ Predicate: func(oldCluster, newCluster *fedcorev1a1.FederatedCluster) bool { - // Reconcile all federated objects when cluster becomes unavailable + // Reconcile all federated objects when cluster becomes unready return oldCluster != nil && newCluster != nil && clusterutil.IsClusterReady(&oldCluster.Status) && !clusterutil.IsClusterReady(&newCluster.Status) }, From 1e4f10be11cae7917a674ed5adcccd72ee67c07b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Gary=20Liu=20=28=E5=88=98=E5=B9=BF=E6=BA=90=29?= Date: Thu, 20 Jul 2023 11:24:26 +0000 Subject: [PATCH 123/173] fix: list PropagatedVersions with rv=0 --- pkg/controllers/sync/version/manager.go | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/pkg/controllers/sync/version/manager.go b/pkg/controllers/sync/version/manager.go index 4ebc92e6..23f6449f 100644 --- a/pkg/controllers/sync/version/manager.go +++ b/pkg/controllers/sync/version/manager.go @@ -233,7 +233,10 @@ func (m *VersionManager) list(ctx context.Context) (runtimeclient.ObjectList, bo var versionList runtimeclient.ObjectList err := wait.PollImmediateInfiniteWithContext(ctx, 1*time.Second, func(ctx context.Context) (bool, error) { var err error - versionList, err = m.adapter.List(ctx, m.client, m.namespace, metav1.ListOptions{}) + versionList, err = m.adapter.List( + ctx, m.client, m.namespace, metav1.ListOptions{ + ResourceVersion: "0", + }) if err != nil { m.logger.Error(err, "Failed to list propagated versions") // Do not return the error to allow the operation to be retried. From b446e2bd4a037a9ca4274b18384d79084b2d9429 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Gary=20Liu=20=28=E5=88=98=E5=B9=BF=E6=BA=90=29?= Date: Fri, 21 Jul 2023 02:50:00 +0000 Subject: [PATCH 124/173] chore: remove create-crd-for-ftc flag --- pkg/controllers/context/context.go | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/pkg/controllers/context/context.go b/pkg/controllers/context/context.go index 90d28f89..772bdfbb 100644 --- a/pkg/controllers/context/context.go +++ b/pkg/controllers/context/context.go @@ -77,8 +77,7 @@ func (c *Context) StartFactories(ctx context.Context) { } type ComponentConfig struct { - NSAutoPropExcludeRegexp *regexp.Regexp - FederatedTypeConfigCreateCRDsForFTCs bool - ClusterJoinTimeout time.Duration - MemberObjectEnqueueDelay time.Duration + NSAutoPropExcludeRegexp *regexp.Regexp + ClusterJoinTimeout time.Duration + MemberObjectEnqueueDelay time.Duration } From a4a0a99efedac963086f3427f46eb8cd2c1f4567 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Gary=20Liu=20=28=E5=88=98=E5=B9=BF=E6=BA=90=29?= Date: Fri, 21 Jul 2023 04:07:39 +0000 Subject: [PATCH 125/173] fix(sync): wait for fedObject informer sync --- pkg/controllers/sync/accessor.go | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/pkg/controllers/sync/accessor.go b/pkg/controllers/sync/accessor.go index be344120..2c57250f 100644 --- a/pkg/controllers/sync/accessor.go +++ b/pkg/controllers/sync/accessor.go @@ -116,6 +116,14 @@ func (a *resourceAccessor) Run(ctx context.Context) { } func (a *resourceAccessor) HasSynced() bool { + if !a.fedObjectInformer.Informer().HasSynced() { + a.logger.V(3).Info("FederatedObject informer not synced") + return false + } + if !a.clusterFedObjectInformer.Informer().HasSynced() { + a.logger.V(3).Info("ClusterFederatedObject informer not synced") + return false + } if !a.versionManager.HasSynced() { a.logger.V(3).Info("Version manager not synced") return false From fdfffa827ac7a53a891a8b661427d32f5caa42ec Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Gary=20Liu=20=28=E5=88=98=E5=B9=BF=E6=BA=90=29?= Date: Fri, 21 Jul 2023 04:08:02 +0000 Subject: [PATCH 126/173] fix(sync): include target name in start reconcile log --- pkg/controllers/sync/controller.go | 26 ++++++++++++++++---------- 1 file changed, 16 insertions(+), 10 deletions(-) diff --git a/pkg/controllers/sync/controller.go b/pkg/controllers/sync/controller.go index 2907289a..44f84594 100644 --- a/pkg/controllers/sync/controller.go +++ b/pkg/controllers/sync/controller.go @@ -268,9 +268,12 @@ func (s *SyncController) Run(ctx context.Context) { }() if !cache.WaitForNamedCacheSync(SyncControllerName, ctx.Done(), s.HasSynced) { + s.logger.Error(nil, "Timed out waiting for cache sync") return } + s.logger.Info("Caches are synced") + s.worker.Run(ctx) s.clusterCascadingDeletionWorker.Run(ctx) @@ -284,6 +287,10 @@ func (s *SyncController) Run(ctx context.Context) { // Check whether all data stores are in sync. False is returned if any of the informer/stores is not yet // synced with the corresponding api server. func (s *SyncController) HasSynced() bool { + if !s.ftcManager.HasSynced() { + s.logger.V(3).Info("FederatedTypeConfigManager not synced") + return false + } if !s.fedInformerManager.HasSynced() { s.logger.V(3).Info("FederatedInformerManager not synced") return false @@ -318,16 +325,7 @@ func (s *SyncController) enqueueAllObjects() { } func (s *SyncController) reconcile(ctx context.Context, federatedName common.QualifiedName) (status worker.Result) { - keyedLogger := s.logger.WithValues("federated-name", federatedName.String()) - ctx = klog.NewContext(ctx, keyedLogger) - - s.metrics.Rate("sync.throughput", 1) - keyedLogger.V(3).Info("Starting to reconcile") - startTime := time.Now() - defer func() { - s.metrics.Duration("sync.latency", startTime) - keyedLogger.WithValues("duration", time.Since(startTime), "status", status).V(3).Info("Finished reconciling") - }() + ctx, keyedLogger := logging.InjectLogger(ctx, s.logger.WithValues("federated-name", federatedName.String())) fedResource, err := s.fedAccessor.FederatedResource(federatedName) if err != nil { @@ -344,6 +342,14 @@ func (s *SyncController) reconcile(ctx context.Context, federatedName common.Qua "gvk", fedResource.TargetGVK().String(), ) + s.metrics.Rate("sync.throughput", 1) + keyedLogger.V(3).Info("Starting to reconcile") + startTime := time.Now() + defer func() { + s.metrics.Duration("sync.latency", startTime) + keyedLogger.WithValues("duration", time.Since(startTime), "status", status.String()).V(3).Info("Finished reconciling") + }() + if fedResource.Object().GetDeletionTimestamp() != nil { return s.ensureDeletion(ctx, fedResource) } From 36c4fe66a0a591e2f123a97ff2476e677e42e2a6 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Gary=20Liu=20=28=E5=88=98=E5=B9=BF=E6=BA=90=29?= Date: Fri, 21 Jul 2023 13:29:27 +0000 Subject: [PATCH 127/173] chore: standardise logging levels in verison manager --- pkg/controllers/sync/version/manager.go | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/pkg/controllers/sync/version/manager.go b/pkg/controllers/sync/version/manager.go index 23f6449f..d081f0c1 100644 --- a/pkg/controllers/sync/version/manager.go +++ b/pkg/controllers/sync/version/manager.go @@ -261,7 +261,7 @@ func (m *VersionManager) load(ctx context.Context, versionList runtimeclient.Obj for _, obj := range objs { select { case <-ctx.Done(): - m.logger.V(4).Info("Halting version manager load due to closed stop channel") + m.logger.Info("Halting version manager load due to closed stop channel") return false default: } @@ -272,7 +272,7 @@ func (m *VersionManager) load(ctx context.Context, versionList runtimeclient.Obj m.Lock() m.hasSynced = true m.Unlock() - m.logger.V(4).Info("Version manager synced") + m.logger.Info("Version manager synced") return true } @@ -317,10 +317,10 @@ func (m *VersionManager) writeVersion(obj runtimeclient.Object, qualifiedName co createdObj := obj.DeepCopyObject().(runtimeclient.Object) setResourceVersion(createdObj, "") - keyedLogger.V(2).Info("Creating resourceVersion") + keyedLogger.V(1).Info("Creating resourceVersion") createdObj, err = m.adapter.Create(context.TODO(), m.client, createdObj, metav1.CreateOptions{}) if apierrors.IsAlreadyExists(err) { - keyedLogger.V(3).Info("ResourceVersion was created by another process. Will refresh the resourceVersion and attempt to update") + keyedLogger.V(1).Info("ResourceVersion was created by another process. Will refresh the resourceVersion and attempt to update") refreshVersion = true return false, nil } @@ -344,15 +344,15 @@ func (m *VersionManager) writeVersion(obj runtimeclient.Object, qualifiedName co updatedObj := obj.DeepCopyObject().(runtimeclient.Object) setResourceVersion(updatedObj, resourceVersion) - keyedLogger.V(2).Info("Updating the status") + keyedLogger.V(1).Info("Updating the status") updatedObj, err = m.adapter.UpdateStatus(context.TODO(), m.client, updatedObj, metav1.UpdateOptions{}) if apierrors.IsConflict(err) { - keyedLogger.V(3).Info("ResourceVersion was updated by another process. Will refresh the resourceVersion and retry the update") + keyedLogger.V(1).Info("ResourceVersion was updated by another process. Will refresh the resourceVersion and retry the update") refreshVersion = true return false, nil } if apierrors.IsNotFound(err) { - keyedLogger.V(3).Info("ResourceVersion was deleted by another process. Will clear the resourceVersion and retry the update") + keyedLogger.V(1).Info("ResourceVersion was deleted by another process. Will clear the resourceVersion and retry the update") resourceVersion = "" return false, nil } From 9e26bbe3f33f5c303c4a7d6cf4afa2aa13f48fe8 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Gary=20Liu=20=28=E5=88=98=E5=B9=BF=E6=BA=90=29?= Date: Fri, 21 Jul 2023 14:49:15 +0000 Subject: [PATCH 128/173] fix(sync): nil deref in setFederatedStatus --- pkg/controllers/sync/controller.go | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/pkg/controllers/sync/controller.go b/pkg/controllers/sync/controller.go index 44f84594..4603beb3 100644 --- a/pkg/controllers/sync/controller.go +++ b/pkg/controllers/sync/controller.go @@ -550,6 +550,8 @@ func (s *SyncController) setFederatedStatus( } obj := fedResource.Object() + objNamespace := obj.GetNamespace() + objName := obj.GetName() keyedLogger := klog.FromContext(ctx) // If the underlying resource has changed, attempt to retrieve and @@ -567,7 +569,7 @@ func (s *SyncController) setFederatedStatus( return true, nil } if apierrors.IsConflict(err) { - obj, err = fedobjectadapters.Get(ctx, s.fedClient.CoreV1alpha1(), obj.GetNamespace(), obj.GetName(), metav1.GetOptions{}) + obj, err = fedobjectadapters.Get(ctx, s.fedClient.CoreV1alpha1(), objNamespace, objName, metav1.GetOptions{}) if err != nil { return false, errors.Wrapf(err, "failed to retrieve resource") } From 47a542358a1489ea8380da379d5830389f2d8c7c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Gary=20Liu=20=28=E5=88=98=E5=B9=BF=E6=BA=90=29?= Date: Fri, 21 Jul 2023 14:49:49 +0000 Subject: [PATCH 129/173] fix(sync): empty GVK for federated object --- pkg/controllers/sync/resource.go | 6 ++++++ pkg/controllers/sync/version/manager.go | 11 +++++++---- 2 files changed, 13 insertions(+), 4 deletions(-) diff --git a/pkg/controllers/sync/resource.go b/pkg/controllers/sync/resource.go index 4cde6423..516a16b5 100644 --- a/pkg/controllers/sync/resource.go +++ b/pkg/controllers/sync/resource.go @@ -25,6 +25,7 @@ import ( "crypto/md5" "encoding/hex" "fmt" + "reflect" "sort" "sync" @@ -88,6 +89,11 @@ func (r *federatedResource) TargetGVR() schema.GroupVersionResource { return r.typeConfig.GetSourceTypeGVR() } +func (r *federatedResource) FederatedGVK() schema.GroupVersionKind { + // NOTE: remember to update this method when we switch to a different apiVersion. + return fedcorev1a1.SchemeGroupVersion.WithKind(reflect.TypeOf(r.federatedObject).Elem().Name()) +} + func (r *federatedResource) TypeConfig() *fedcorev1a1.FederatedTypeConfig { return r.typeConfig } diff --git a/pkg/controllers/sync/version/manager.go b/pkg/controllers/sync/version/manager.go index d081f0c1..74056c15 100644 --- a/pkg/controllers/sync/version/manager.go +++ b/pkg/controllers/sync/version/manager.go @@ -29,6 +29,7 @@ import ( apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/util/sets" "k8s.io/apimachinery/pkg/util/wait" "k8s.io/klog/v2" @@ -47,6 +48,7 @@ type VersionedResource interface { Object() fedcorev1a1.GenericFederatedObject TemplateVersion() (string, error) OverrideVersion() (string, error) + FederatedGVK() schema.GroupVersionKind } type VersionManager struct { @@ -197,12 +199,12 @@ func (m *VersionManager) Update( if oldStatus != nil && propagatedversion.PropagatedVersionStatusEquivalent(oldStatus, status) { m.Unlock() m.logger.WithValues("version-qualified-name", qualifiedName). - V(4).Info("No update necessary") + V(4).Info("No need to update propagated version status") return nil } if obj == nil { - ownerReference := ownerReferenceForFederatedObject(resource.Object()) + ownerReference := ownerReferenceForFederatedObject(resource) obj = m.adapter.NewVersion(qualifiedName, ownerReference, status) m.versions[key] = obj } else { @@ -400,8 +402,9 @@ func setResourceVersion(obj runtimeclient.Object, resourceVersion string) { obj.SetResourceVersion(resourceVersion) } -func ownerReferenceForFederatedObject(obj fedcorev1a1.GenericFederatedObject) metav1.OwnerReference { - gvk := obj.GetObjectKind().GroupVersionKind() +func ownerReferenceForFederatedObject(resource VersionedResource) metav1.OwnerReference { + gvk := resource.FederatedGVK() + obj := resource.Object() return metav1.OwnerReference{ APIVersion: gvk.GroupVersion().String(), Kind: gvk.Kind, From 4b258c4f533558439afd34929c6dd27556aa57ab Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Gary=20Liu=20=28=E5=88=98=E5=B9=BF=E6=BA=90=29?= Date: Sat, 22 Jul 2023 04:45:39 +0000 Subject: [PATCH 130/173] fix(sync): cluster enqueue conditions --- pkg/controllers/sync/controller.go | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-) diff --git a/pkg/controllers/sync/controller.go b/pkg/controllers/sync/controller.go index 4603beb3..6a65b947 100644 --- a/pkg/controllers/sync/controller.go +++ b/pkg/controllers/sync/controller.go @@ -209,8 +209,8 @@ func NewSyncController( if err := s.fedInformerManager.AddClusterEventHandlers( &informermanager.ClusterEventHandler{ Predicate: func(oldCluster, newCluster *fedcorev1a1.FederatedCluster) bool { - // Enqueue cluster when it's marked for deletion to ensure cascading deletion - return !newCluster.GetDeletionTimestamp().IsZero() + // Enqueue cluster when it's added or marked for deletion to ensure cascading deletion + return oldCluster == nil || newCluster != nil && !newCluster.GetDeletionTimestamp().IsZero() }, Callback: func(cluster *fedcorev1a1.FederatedCluster) { s.clusterCascadingDeletionWorker.Enqueue(common.NewQualifiedName(cluster)) @@ -219,8 +219,9 @@ func NewSyncController( &informermanager.ClusterEventHandler{ Predicate: func(oldCluster, newCluster *fedcorev1a1.FederatedCluster) bool { // Reconcile all federated objects when cluster becomes ready - return oldCluster != nil && newCluster != nil && - !clusterutil.IsClusterReady(&oldCluster.Status) && clusterutil.IsClusterReady(&newCluster.Status) + newClusterIsReady := newCluster != nil && clusterutil.IsClusterReady(&newCluster.Status) + oldClusterIsUnready := oldCluster == nil || !clusterutil.IsClusterReady(&oldCluster.Status) + return newClusterIsReady && oldClusterIsUnready }, Callback: func(cluster *fedcorev1a1.FederatedCluster) { s.clusterReadinessTransitionQueue.AddAfter(struct{}{}, s.clusterAvailableDelay) @@ -229,8 +230,9 @@ func NewSyncController( &informermanager.ClusterEventHandler{ Predicate: func(oldCluster, newCluster *fedcorev1a1.FederatedCluster) bool { // Reconcile all federated objects when cluster becomes unready - return oldCluster != nil && newCluster != nil && - clusterutil.IsClusterReady(&oldCluster.Status) && !clusterutil.IsClusterReady(&newCluster.Status) + oldClusterIsReady := oldCluster != nil && clusterutil.IsClusterReady(&oldCluster.Status) + newClusterIsUnready := newCluster == nil || !clusterutil.IsClusterReady(&newCluster.Status) + return oldClusterIsReady && newClusterIsUnready }, Callback: func(cluster *fedcorev1a1.FederatedCluster) { s.clusterReadinessTransitionQueue.AddAfter(struct{}{}, s.clusterUnavailableDelay) From 636db499f4e02e273e76bf79bdfa7fd4e748f34a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Gary=20Liu=20=28=E5=88=98=E5=B9=BF=E6=BA=90=29?= Date: Sat, 22 Jul 2023 04:47:09 +0000 Subject: [PATCH 131/173] fix(sync): error handling for getting from cluster store --- pkg/controllers/sync/controller.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/pkg/controllers/sync/controller.go b/pkg/controllers/sync/controller.go index 6a65b947..164b5aa5 100644 --- a/pkg/controllers/sync/controller.go +++ b/pkg/controllers/sync/controller.go @@ -432,13 +432,13 @@ func (s *SyncController) syncToClusters(ctx context.Context, fedResource Federat } clusterObjAny, err := lister.Get(fedResource.TargetName().String()) - if err != nil || clusterObjAny == nil { + if err == nil { + clusterObj = clusterObjAny.(*unstructured.Unstructured) + } else if !apierrors.IsNotFound(err) { wrappedErr := fmt.Errorf("failed to get cluster object: %w", err) dispatcher.RecordClusterError(fedcorev1a1.CachedRetrievalFailed, clusterName, wrappedErr) continue } - - clusterObj = clusterObjAny.(*unstructured.Unstructured) } // Resource should not exist in the named cluster From 15161dfd1bbbf43a7a579b6406b68a8b5f438b9f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Gary=20Liu=20=28=E5=88=98=E5=B9=BF=E6=BA=90=29?= Date: Sat, 22 Jul 2023 04:48:14 +0000 Subject: [PATCH 132/173] fix(sync): error formatting --- pkg/controllers/sync/controller.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/controllers/sync/controller.go b/pkg/controllers/sync/controller.go index 164b5aa5..a02e9b54 100644 --- a/pkg/controllers/sync/controller.go +++ b/pkg/controllers/sync/controller.go @@ -941,7 +941,7 @@ func (s *SyncController) reconcileClusterForCascadingDeletion(ctx context.Contex if hasSynced() { objects, err := resourceLister.List(labels.Everything()) if err != nil { - remainingByGVK[gvk] = fmt.Sprintf("Unknown (failed to list from cluster lister: %w)", err) + remainingByGVK[gvk] = fmt.Sprintf("Unknown (failed to list from cluster lister: %v)", err) } else if len(objects) > 0 { remainingByGVK[gvk] = strconv.Itoa(len(objects)) } From 1c34a1cf5722f47da8140f367d9979f0c3f45516 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Gary=20Liu=20=28=E5=88=98=E5=B9=BF=E6=BA=90=29?= Date: Sat, 22 Jul 2023 06:25:12 +0000 Subject: [PATCH 133/173] fix(sync): wait for ftcmanager cache sync --- pkg/controllers/sync/accessor.go | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/pkg/controllers/sync/accessor.go b/pkg/controllers/sync/accessor.go index 2c57250f..3e38085e 100644 --- a/pkg/controllers/sync/accessor.go +++ b/pkg/controllers/sync/accessor.go @@ -116,6 +116,10 @@ func (a *resourceAccessor) Run(ctx context.Context) { } func (a *resourceAccessor) HasSynced() bool { + if !a.ftcManager.HasSynced() { + a.logger.V(3).Info("FederatedTypeConfigManager not synced") + return false + } if !a.fedObjectInformer.Informer().HasSynced() { a.logger.V(3).Info("FederatedObject informer not synced") return false From f3961ace2b9ffeb6ef45accd1767551ee1d9c0f7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Gary=20Liu=20=28=E5=88=98=E5=B9=BF=E6=BA=90=29?= Date: Sat, 22 Jul 2023 06:44:20 +0000 Subject: [PATCH 134/173] fix(sync): retry when GetResourceFTC fails for cluster --- pkg/controllers/sync/controller.go | 2 ++ 1 file changed, 2 insertions(+) diff --git a/pkg/controllers/sync/controller.go b/pkg/controllers/sync/controller.go index a02e9b54..5a5da21e 100644 --- a/pkg/controllers/sync/controller.go +++ b/pkg/controllers/sync/controller.go @@ -426,6 +426,7 @@ func (s *SyncController) syncToClusters(ctx context.Context, fedResource Federat // Find out if this is ok. lister, hasSynced, exists := s.fedInformerManager.GetResourceLister(fedResource.TargetGVK(), clusterName) if !exists || !hasSynced() { + shouldRecheckAfterDispatch = true wrappedErr := fmt.Errorf("cluster cache is not synced") dispatcher.RecordClusterError(fedcorev1a1.CachedRetrievalFailed, clusterName, wrappedErr) continue @@ -933,6 +934,7 @@ func (s *SyncController) reconcileClusterForCascadingDeletion(ctx context.Contex gvk := ftc.GetSourceTypeGVK().String() resourceLister, hasSynced, exists := s.fedInformerManager.GetResourceLister(ftc.GetSourceTypeGVK(), cluster.Name) if !exists { + remainingByGVK[gvk] = fmt.Sprintf("failed to get resource lister for %s", gvk) continue } From 9927a61c74cfb5077c4689fdd7771bcd7b989823 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Gary=20Liu=20=28=E5=88=98=E5=B9=BF=E6=BA=90=29?= Date: Sat, 22 Jul 2023 07:06:14 +0000 Subject: [PATCH 135/173] chore(ftc): remove unused fields --- ...e.kubeadmiral.io_federatedtypeconfigs.yaml | 18 -------- .../extensions_federatedtypeconfig.go | 8 ---- .../v1alpha1/types_federatedtypeconfig.go | 18 -------- .../core/v1alpha1/zz_generated.deepcopy.go | 42 ------------------- 4 files changed, 86 deletions(-) diff --git a/config/crds/core.kubeadmiral.io_federatedtypeconfigs.yaml b/config/crds/core.kubeadmiral.io_federatedtypeconfigs.yaml index 14d563ac..923f2ff9 100644 --- a/config/crds/core.kubeadmiral.io_federatedtypeconfigs.yaml +++ b/config/crds/core.kubeadmiral.io_federatedtypeconfigs.yaml @@ -68,24 +68,6 @@ spec: description: Path to a numeric field that reflects the number of replicas that the object currently has. E.g. `status.replicas` for Deployment and ReplicaSet. type: string type: object - revisionHistory: - description: Configuration for RevisionHistory. If left empty, the RevisionHistory feature will be disabled. - properties: - enabled: - description: Whether or not preserve a RevisionHistory for the federated object during updates. - type: boolean - required: - - enabled - type: object - rolloutPlan: - description: Configuration for RolloutPlan. If left empty, the RolloutPlan feature will be disabled. - properties: - enabled: - description: Whether or not to synchronize the rollout process across clusters. - type: boolean - required: - - enabled - type: object sourceType: description: The API resource type to be federated. properties: diff --git a/pkg/apis/core/v1alpha1/extensions_federatedtypeconfig.go b/pkg/apis/core/v1alpha1/extensions_federatedtypeconfig.go index dbecfe7b..486f8766 100644 --- a/pkg/apis/core/v1alpha1/extensions_federatedtypeconfig.go +++ b/pkg/apis/core/v1alpha1/extensions_federatedtypeconfig.go @@ -76,14 +76,6 @@ func (f *FederatedTypeConfig) GetPolicyRcEnabled() bool { return true // TODO: should this be configurable? } -func (f *FederatedTypeConfig) GetRevisionHistoryEnabled() bool { - return f.Spec.RevisionHistory != nil && f.Spec.RevisionHistory.Enabled -} - -func (f *FederatedTypeConfig) GetRolloutPlanEnabled() bool { - return f.Spec.RolloutPlan != nil && f.Spec.RolloutPlan.Enabled -} - func (f *FederatedTypeConfig) GetControllers() [][]string { return f.Spec.Controllers } diff --git a/pkg/apis/core/v1alpha1/types_federatedtypeconfig.go b/pkg/apis/core/v1alpha1/types_federatedtypeconfig.go index 641622f4..39b87d80 100644 --- a/pkg/apis/core/v1alpha1/types_federatedtypeconfig.go +++ b/pkg/apis/core/v1alpha1/types_federatedtypeconfig.go @@ -56,12 +56,6 @@ type FederatedTypeConfigSpec struct { // Configuration for StatusAggregation. If left empty, the StatusAggregation feature will be disabled. // +optional StatusAggregation *StatusAggregationConfig `json:"statusAggregation,omitempty"` - // Configuration for RevisionHistory. If left empty, the RevisionHistory feature will be disabled. - // +optional - RevisionHistory *RevisionHistoryConfig `json:"revisionHistory,omitempty"` - // Configuration for RolloutPlan. If left empty, the RolloutPlan feature will be disabled. - // +optional - RolloutPlan *RolloutPlanConfig `json:"rolloutPlan,omitempty"` // Configuration for StatusCollection. If left empty, the StatusCollection feature will be disabled. // +optional StatusCollection *StatusCollectionConfig `json:"statusCollection,omitempty"` @@ -124,18 +118,6 @@ type StatusAggregationConfig struct { Enabled bool `json:"enabled"` } -// RevisionHistoryConfig defines the configurations for the RevisionHistory feature. -type RevisionHistoryConfig struct { - // Whether or not preserve a RevisionHistory for the federated object during updates. - Enabled bool `json:"enabled"` -} - -// RolloutPlanConfig defines the configurations for the RolloutPlan feature. -type RolloutPlanConfig struct { - // Whether or not to synchronize the rollout process across clusters. - Enabled bool `json:"enabled"` -} - // AutoMigrationConfig defines the configurations for the AutoMigration feature. type AutoMigrationConfig struct { // Whether or not to automatically migrate unschedulable pods to a different cluster. diff --git a/pkg/apis/core/v1alpha1/zz_generated.deepcopy.go b/pkg/apis/core/v1alpha1/zz_generated.deepcopy.go index 5457a18e..d14044b6 100644 --- a/pkg/apis/core/v1alpha1/zz_generated.deepcopy.go +++ b/pkg/apis/core/v1alpha1/zz_generated.deepcopy.go @@ -846,16 +846,6 @@ func (in *FederatedTypeConfigSpec) DeepCopyInto(out *FederatedTypeConfigSpec) { *out = new(StatusAggregationConfig) **out = **in } - if in.RevisionHistory != nil { - in, out := &in.RevisionHistory, &out.RevisionHistory - *out = new(RevisionHistoryConfig) - **out = **in - } - if in.RolloutPlan != nil { - in, out := &in.RolloutPlan, &out.RolloutPlan - *out = new(RolloutPlanConfig) - **out = **in - } if in.StatusCollection != nil { in, out := &in.StatusCollection, &out.StatusCollection *out = new(StatusCollectionConfig) @@ -1705,38 +1695,6 @@ func (in *Resources) DeepCopy() *Resources { return out } -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *RevisionHistoryConfig) DeepCopyInto(out *RevisionHistoryConfig) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RevisionHistoryConfig. -func (in *RevisionHistoryConfig) DeepCopy() *RevisionHistoryConfig { - if in == nil { - return nil - } - out := new(RevisionHistoryConfig) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *RolloutPlanConfig) DeepCopyInto(out *RolloutPlanConfig) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RolloutPlanConfig. -func (in *RolloutPlanConfig) DeepCopy() *RolloutPlanConfig { - if in == nil { - return nil - } - out := new(RolloutPlanConfig) - in.DeepCopyInto(out) - return out -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *SchedulerPluginWebhookConfiguration) DeepCopyInto(out *SchedulerPluginWebhookConfiguration) { *out = *in From 3e68e33bd1fa117e9232af2db9c2863a2b675f6e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Gary=20Liu=20=28=E5=88=98=E5=B9=BF=E6=BA=90=29?= Date: Sat, 22 Jul 2023 07:09:11 +0000 Subject: [PATCH 136/173] fix(sync): enqueue all fo and cfo on ftc change --- pkg/controllers/sync/controller.go | 30 +++++++++++++++++++++++++++++- 1 file changed, 29 insertions(+), 1 deletion(-) diff --git a/pkg/controllers/sync/controller.go b/pkg/controllers/sync/controller.go index 5a5da21e..e8607332 100644 --- a/pkg/controllers/sync/controller.go +++ b/pkg/controllers/sync/controller.go @@ -117,6 +117,7 @@ type SyncController struct { clusterAvailableDelay time.Duration clusterUnavailableDelay time.Duration reconcileOnClusterChangeDelay time.Duration + reconcileOnFTCChangeDelay time.Duration memberObjectEnqueueDelay time.Duration recheckAfterDispatchDelay time.Duration ensureDeletionRecheckDelay time.Duration @@ -154,6 +155,7 @@ func NewSyncController( clusterAvailableDelay: clusterAvailableDelay, clusterUnavailableDelay: clusterUnavailableDelay, reconcileOnClusterChangeDelay: time.Second * 3, + reconcileOnFTCChangeDelay: time.Second * 3, memberObjectEnqueueDelay: time.Second * 10, recheckAfterDispatchDelay: time.Second * 10, ensureDeletionRecheckDelay: time.Second * 5, @@ -184,6 +186,16 @@ func NewSyncController( // Build queue for triggering cluster reconciliations. s.clusterReadinessTransitionQueue = workqueue.NewNamedDelayingQueue("sync-controller-cluster-readiness-transition-queue") + if err := s.ftcManager.AddFTCUpdateHandler(func(lastObserved, latest *fedcorev1a1.FederatedTypeConfig) { + isNewFTC := lastObserved == nil && latest != nil + ftcPathDefinitionsChanged := lastObserved != nil && latest != nil && lastObserved.Spec.PathDefinition != latest.Spec.PathDefinition + if isNewFTC || ftcPathDefinitionsChanged { + s.enqueueForGVK(latest.GetSourceTypeGVK()) + } + }); err != nil { + return nil, fmt.Errorf("failed to add FTC update handler: %w", err) + } + if err := s.fedInformerManager.AddEventHandlerGenerator(&informermanager.EventHandlerGenerator{ Predicate: informermanager.RegisterOncePredicate, Generator: func(ftc *fedcorev1a1.FederatedTypeConfig) cache.ResourceEventHandler { @@ -317,7 +329,7 @@ func (s *SyncController) getClusterClient(clusterName string) (dynamic.Interface return nil, fmt.Errorf("client does not exist for cluster") } -// The function triggers reconciliation of all target federated resources. +// Triggers reconciliation of all target federated resources. func (s *SyncController) enqueueAllObjects() { s.logger.V(2).Info("Enqueuing all federated resources") s.fedAccessor.VisitFederatedResources(func(obj fedcorev1a1.GenericFederatedObject) { @@ -326,6 +338,22 @@ func (s *SyncController) enqueueAllObjects() { }) } +// Triggers reconciliation of all target federated resources of the given gvk. +func (s *SyncController) enqueueForGVK(gvk schema.GroupVersionKind) { + s.logger.V(2).Info("Enqueuing federated resources for gvk", "gvk", gvk.String()) + s.fedAccessor.VisitFederatedResources(func(obj fedcorev1a1.GenericFederatedObject) { + templateMeta, err := obj.GetSpec().GetTemplateMetadata() + if err != nil { + s.logger.Error(err, "failed to get template metadata") + return + } + if templateMeta.GroupVersionKind() == gvk { + qualifiedName := common.NewQualifiedName(obj) + s.worker.EnqueueWithDelay(qualifiedName, s.reconcileOnFTCChangeDelay) + } + }) +} + func (s *SyncController) reconcile(ctx context.Context, federatedName common.QualifiedName) (status worker.Result) { ctx, keyedLogger := logging.InjectLogger(ctx, s.logger.WithValues("federated-name", federatedName.String())) From d21907fff379920d07cb702d37231faf79cfde38 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Gary=20Liu=20=28=E5=88=98=E5=B9=BF=E6=BA=90=29?= Date: Sat, 22 Jul 2023 09:49:11 +0000 Subject: [PATCH 137/173] fix(queue): add missing Done after Get --- pkg/controllers/status/controller.go | 3 ++- pkg/controllers/statusaggregator/controller.go | 3 ++- pkg/controllers/sync/controller.go | 3 ++- 3 files changed, 6 insertions(+), 3 deletions(-) diff --git a/pkg/controllers/status/controller.go b/pkg/controllers/status/controller.go index ed94f7c2..da341185 100644 --- a/pkg/controllers/status/controller.go +++ b/pkg/controllers/status/controller.go @@ -241,11 +241,12 @@ func NewStatusController( func (s *StatusController) Run(ctx context.Context) { go func() { for { - _, shutdown := s.clusterQueue.Get() + item, shutdown := s.clusterQueue.Get() if shutdown { break } s.reconcileOnClusterChange() + s.clusterQueue.Done(item) } }() diff --git a/pkg/controllers/statusaggregator/controller.go b/pkg/controllers/statusaggregator/controller.go index 46c96dd4..51fb032d 100644 --- a/pkg/controllers/statusaggregator/controller.go +++ b/pkg/controllers/statusaggregator/controller.go @@ -208,11 +208,12 @@ func (a *StatusAggregator) Run(stopChan <-chan struct{}) { a.informer.Start() go func() { for { - _, shutdown := a.clusterQueue.Get() + item, shutdown := a.clusterQueue.Get() if shutdown { break } a.reconcileOnClusterChange() + a.clusterQueue.Done(item) } }() if !cache.WaitForNamedCacheSync(a.name, stopChan, a.HasSynced) { diff --git a/pkg/controllers/sync/controller.go b/pkg/controllers/sync/controller.go index e8607332..88d50222 100644 --- a/pkg/controllers/sync/controller.go +++ b/pkg/controllers/sync/controller.go @@ -273,11 +273,12 @@ func (s *SyncController) Run(ctx context.Context) { s.fedAccessor.Run(ctx) go func() { for { - _, shutdown := s.clusterReadinessTransitionQueue.Get() + item, shutdown := s.clusterReadinessTransitionQueue.Get() if shutdown { break } s.enqueueAllObjects() + s.clusterReadinessTransitionQueue.Done(item) } }() From ae7c11b1ae75246a5cf363b144245d8529edcc37 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Gary=20Liu=20=28=E5=88=98=E5=B9=BF=E6=BA=90=29?= Date: Sat, 22 Jul 2023 10:13:55 +0000 Subject: [PATCH 138/173] fix(informermanager): handle DeletedFinalStateUnknown --- pkg/util/informermanager/federatedinformermanager.go | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/pkg/util/informermanager/federatedinformermanager.go b/pkg/util/informermanager/federatedinformermanager.go index af046f0d..9a5fd79c 100644 --- a/pkg/util/informermanager/federatedinformermanager.go +++ b/pkg/util/informermanager/federatedinformermanager.go @@ -478,6 +478,12 @@ func (m *federatedInformerManager) Start(ctx context.Context) { } }, DeleteFunc: func(obj interface{}) { + if deleted, ok := obj.(*cache.DeletedFinalStateUnknown); ok { + obj = deleted.Obj + if obj == nil { + return + } + } cluster := obj.(*fedcorev1a1.FederatedCluster) if predicate(cluster, nil) { callback(cluster) From 396f752523b8281d09d02d4793c780861a532d3e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Gary=20Liu=20=28=E5=88=98=E5=B9=BF=E6=BA=90=29?= Date: Sat, 22 Jul 2023 08:02:09 +0000 Subject: [PATCH 139/173] fix(sync): trigger cascading deletion when cluster marked for deletion --- pkg/controllers/sync/controller.go | 49 +++++++++++++++++++++--------- 1 file changed, 34 insertions(+), 15 deletions(-) diff --git a/pkg/controllers/sync/controller.go b/pkg/controllers/sync/controller.go index 88d50222..7e9665be 100644 --- a/pkg/controllers/sync/controller.go +++ b/pkg/controllers/sync/controller.go @@ -99,9 +99,8 @@ type SyncController struct { // For handling cascading deletion. clusterCascadingDeletionWorker worker.ReconcileWorker[common.QualifiedName] - // For triggering reconciliation of all target resources. This is - // used when a new cluster becomes available. - clusterReadinessTransitionQueue workqueue.DelayingInterface + // For triggering reconciliation of all target resources. + reconcileAllResourcesQueue workqueue.DelayingInterface fedClient fedclient.Interface @@ -183,8 +182,8 @@ func NewSyncController( metrics, ) - // Build queue for triggering cluster reconciliations. - s.clusterReadinessTransitionQueue = workqueue.NewNamedDelayingQueue("sync-controller-cluster-readiness-transition-queue") + // Build queue for triggering reconciliation of all federated resources.. + s.reconcileAllResourcesQueue = workqueue.NewNamedDelayingQueue(SyncControllerName + "-reconcile-all-resources-queue") if err := s.ftcManager.AddFTCUpdateHandler(func(lastObserved, latest *fedcorev1a1.FederatedTypeConfig) { isNewFTC := lastObserved == nil && latest != nil @@ -222,7 +221,7 @@ func NewSyncController( &informermanager.ClusterEventHandler{ Predicate: func(oldCluster, newCluster *fedcorev1a1.FederatedCluster) bool { // Enqueue cluster when it's added or marked for deletion to ensure cascading deletion - return oldCluster == nil || newCluster != nil && !newCluster.GetDeletionTimestamp().IsZero() + return oldCluster == nil || newCluster != nil && oldCluster.GetDeletionTimestamp().IsZero() && !newCluster.GetDeletionTimestamp().IsZero() }, Callback: func(cluster *fedcorev1a1.FederatedCluster) { s.clusterCascadingDeletionWorker.Enqueue(common.NewQualifiedName(cluster)) @@ -236,18 +235,34 @@ func NewSyncController( return newClusterIsReady && oldClusterIsUnready }, Callback: func(cluster *fedcorev1a1.FederatedCluster) { - s.clusterReadinessTransitionQueue.AddAfter(struct{}{}, s.clusterAvailableDelay) + s.reconcileAllResourcesQueue.AddAfter(struct{}{}, s.clusterAvailableDelay) }, }, &informermanager.ClusterEventHandler{ Predicate: func(oldCluster, newCluster *fedcorev1a1.FederatedCluster) bool { // Reconcile all federated objects when cluster becomes unready - oldClusterIsReady := oldCluster != nil && clusterutil.IsClusterReady(&oldCluster.Status) - newClusterIsUnready := newCluster == nil || !clusterutil.IsClusterReady(&newCluster.Status) - return oldClusterIsReady && newClusterIsUnready + + if newCluster == nil { + // When the cluster is deleted + return true + } + if clusterutil.IsClusterReady(&newCluster.Status) { + return false + } + return oldCluster != nil && clusterutil.IsClusterReady(&oldCluster.Status) }, Callback: func(cluster *fedcorev1a1.FederatedCluster) { - s.clusterReadinessTransitionQueue.AddAfter(struct{}{}, s.clusterUnavailableDelay) + s.reconcileAllResourcesQueue.AddAfter(struct{}{}, s.clusterUnavailableDelay) + }, + }, + &informermanager.ClusterEventHandler{ + Predicate: func(oldCluster, newCluster *fedcorev1a1.FederatedCluster) bool { + // Trigger cascading deletion when cluster is marked for deletion + return newCluster != nil && !newCluster.GetDeletionTimestamp().IsZero() && + (oldCluster == nil || oldCluster.GetDeletionTimestamp().IsZero()) + }, + Callback: func(cluster *fedcorev1a1.FederatedCluster) { + s.reconcileAllResourcesQueue.Add(struct{}{}) }, }, ); err != nil { @@ -273,12 +288,12 @@ func (s *SyncController) Run(ctx context.Context) { s.fedAccessor.Run(ctx) go func() { for { - item, shutdown := s.clusterReadinessTransitionQueue.Get() + item, shutdown := s.reconcileAllResourcesQueue.Get() if shutdown { break } s.enqueueAllObjects() - s.clusterReadinessTransitionQueue.Done(item) + s.reconcileAllResourcesQueue.Done(item) } }() @@ -295,7 +310,7 @@ func (s *SyncController) Run(ctx context.Context) { // Ensure all goroutines are cleaned up when the stop channel closes go func() { <-ctx.Done() - s.clusterReadinessTransitionQueue.ShutDown() + s.reconcileAllResourcesQueue.ShutDown() }() } @@ -909,10 +924,14 @@ func (s *SyncController) removeClusterFinalizer(ctx context.Context, cluster *fe return nil } -func (s *SyncController) reconcileClusterForCascadingDeletion(ctx context.Context, qualifiedName common.QualifiedName) worker.Result { +func (s *SyncController) reconcileClusterForCascadingDeletion(ctx context.Context, qualifiedName common.QualifiedName) (status worker.Result) { logger := s.logger.WithValues("cluster-name", qualifiedName.String(), "process", "cluster-cascading-deletion") ctx = klog.NewContext(ctx, logger) + start := time.Now() logger.V(3).Info("Starting to reconcile cluster for cascading deletion") + defer func() { + logger.V(3).Info("Finished reconciling cluster for cascading deletion", "duration", time.Since(start), "status", status.String()) + }() clusterLister := s.fedInformerManager.GetFederatedClusterLister() cluster, err := clusterLister.Get(qualifiedName.Name) From f6eed95d2c8f3ffc67c26fa318c1656ef558844c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Gary=20Liu=20=28=E5=88=98=E5=B9=BF=E6=BA=90=29?= Date: Thu, 27 Jul 2023 03:49:41 +0000 Subject: [PATCH 140/173] chore(sync): use memberObjectEnqueueDelay --- cmd/controller-manager/app/core.go | 1 + pkg/controllers/sync/controller.go | 4 ++-- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/cmd/controller-manager/app/core.go b/cmd/controller-manager/app/core.go index 9be76235..a105add5 100644 --- a/cmd/controller-manager/app/core.go +++ b/cmd/controller-manager/app/core.go @@ -224,6 +224,7 @@ func startSyncController( controllerCtx.TargetNamespace, controllerCtx.ClusterAvailableDelay, controllerCtx.ClusterUnavailableDelay, + controllerCtx.ComponentConfig.MemberObjectEnqueueDelay, klog.Background(), controllerCtx.WorkerCount, controllerCtx.Metrics, diff --git a/pkg/controllers/sync/controller.go b/pkg/controllers/sync/controller.go index 7e9665be..21a302ee 100644 --- a/pkg/controllers/sync/controller.go +++ b/pkg/controllers/sync/controller.go @@ -139,7 +139,7 @@ func NewSyncController( fedInformerManager informermanager.FederatedInformerManager, fedSystemNamespace, targetNamespace string, - clusterAvailableDelay, clusterUnavailableDelay time.Duration, + clusterAvailableDelay, clusterUnavailableDelay, memberObjectEnqueueDelay time.Duration, logger klog.Logger, workerCount int, @@ -155,7 +155,7 @@ func NewSyncController( clusterUnavailableDelay: clusterUnavailableDelay, reconcileOnClusterChangeDelay: time.Second * 3, reconcileOnFTCChangeDelay: time.Second * 3, - memberObjectEnqueueDelay: time.Second * 10, + memberObjectEnqueueDelay: memberObjectEnqueueDelay, recheckAfterDispatchDelay: time.Second * 10, ensureDeletionRecheckDelay: time.Second * 5, cascadingDeletionRecheckDelay: time.Second * 10, From e0e36de5d7f990e8edf07be128efe81d7fba3391 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Gary=20Liu=20=28=E5=88=98=E5=B9=BF=E6=BA=90=29?= Date: Thu, 27 Jul 2023 03:51:19 +0000 Subject: [PATCH 141/173] fix: make override controller compilable --- .../override/overridepolicy_controller.go | 24 +-- pkg/controllers/override/util.go | 8 +- pkg/controllers/override/util_test.go | 161 ++++++++++-------- 3 files changed, 106 insertions(+), 87 deletions(-) diff --git a/pkg/controllers/override/overridepolicy_controller.go b/pkg/controllers/override/overridepolicy_controller.go index 4c744196..8d6388b0 100644 --- a/pkg/controllers/override/overridepolicy_controller.go +++ b/pkg/controllers/override/overridepolicy_controller.go @@ -38,12 +38,13 @@ import ( fedinformers "github.com/kubewharf/kubeadmiral/pkg/client/informers/externalversions" fedcorev1a1informers "github.com/kubewharf/kubeadmiral/pkg/client/informers/externalversions/core/v1alpha1" "github.com/kubewharf/kubeadmiral/pkg/controllers/common" - "github.com/kubewharf/kubeadmiral/pkg/controllers/util" "github.com/kubewharf/kubeadmiral/pkg/stats" + "github.com/kubewharf/kubeadmiral/pkg/util/eventhandlers" "github.com/kubewharf/kubeadmiral/pkg/util/eventsink" "github.com/kubewharf/kubeadmiral/pkg/util/fedobjectadapters" "github.com/kubewharf/kubeadmiral/pkg/util/informermanager" "github.com/kubewharf/kubeadmiral/pkg/util/logging" + "github.com/kubewharf/kubeadmiral/pkg/util/meta" "github.com/kubewharf/kubeadmiral/pkg/util/pendingcontrollers" "github.com/kubewharf/kubeadmiral/pkg/util/worker" ) @@ -105,28 +106,28 @@ func NewOverridePolicyController( metrics, ) - if _, err := c.fedObjectInformer.Informer().AddEventHandler(util.NewTriggerOnAllChanges(func(o pkgruntime.Object) { + if _, err := c.fedObjectInformer.Informer().AddEventHandler(eventhandlers.NewTriggerOnAllChanges(func(o pkgruntime.Object) { fedObj := o.(*fedcorev1a1.FederatedObject) c.worker.Enqueue(common.QualifiedName{Namespace: fedObj.Namespace, Name: fedObj.Name}) })); err != nil { return nil, err } - if _, err := c.clusterFedObjectInformer.Informer().AddEventHandler(util.NewTriggerOnAllChanges(func(o pkgruntime.Object) { + if _, err := c.clusterFedObjectInformer.Informer().AddEventHandler(eventhandlers.NewTriggerOnAllChanges(func(o pkgruntime.Object) { fedObj := o.(*fedcorev1a1.ClusterFederatedObject) c.worker.Enqueue(common.QualifiedName{Name: fedObj.Name}) })); err != nil { return nil, err } - if _, err := c.overridePolicyInformer.Informer().AddEventHandler(util.NewTriggerOnAllChanges(func(o pkgruntime.Object) { + if _, err := c.overridePolicyInformer.Informer().AddEventHandler(eventhandlers.NewTriggerOnAllChanges(func(o pkgruntime.Object) { policy := o.(fedcorev1a1.GenericOverridePolicy) c.enqueueFedObjectsUsingPolicy(policy, OverridePolicyNameLabel) })); err != nil { return nil, err } - if _, err := c.clusterOverridePolicyInformer.Informer().AddEventHandler(util.NewTriggerOnAllChanges(func(o pkgruntime.Object) { + if _, err := c.clusterOverridePolicyInformer.Informer().AddEventHandler(eventhandlers.NewTriggerOnAllChanges(func(o pkgruntime.Object) { policy := o.(fedcorev1a1.GenericOverridePolicy) c.enqueueFedObjectsUsingPolicy(policy, OverridePolicyNameLabel) })); err != nil { @@ -190,12 +191,12 @@ func (c *Controller) enqueueFederatedObjectsForFTC(ftc *fedcorev1a1.FederatedTyp } for _, obj := range allObjects { - sourceGVK, err := obj.GetSpec().GetTemplateGVK() + sourceMetadata, err := obj.GetSpec().GetTemplateMetadata() if err != nil { - c.logger.Error(err, "Failed to get source GVK from FederatedObject, will not enqueue") + c.logger.Error(err, "Failed to get source metadata from FederatedObject, will not enqueue") continue } - if sourceGVK == ftc.GetSourceTypeGVK() { + if sourceMetadata.GroupVersionKind() == ftc.GetSourceTypeGVK() { c.worker.Enqueue(common.NewQualifiedName(obj)) } } @@ -289,11 +290,12 @@ func (c *Controller) reconcile(ctx context.Context, qualifiedName common.Qualifi return worker.StatusAllOK } - templateGVK, err := fedObject.GetSpec().GetTemplateGVK() + templateMetadata, err := fedObject.GetSpec().GetTemplateMetadata() if err != nil { - keyedLogger.Error(err, "Failed to get template gvk") + keyedLogger.Error(err, "Failed to get template metadata") return worker.StatusError } + templateGVK := templateMetadata.GroupVersionKind() ctx, keyedLogger = logging.InjectLoggerValues(ctx, "source-gvk", templateGVK.String()) typeConfig, exist := c.informerManager.GetResourceFTC(templateGVK) @@ -348,7 +350,7 @@ func (c *Controller) reconcile(ctx context.Context, qualifiedName common.Qualifi corev1.EventTypeWarning, EventReasonParseOverridePolicyFailed, "failed to parse overrides from %s %q: %v", - util.GetResourceKind(policy), + meta.GetResourceKind(policy), policy.GetKey(), err.Error(), ) diff --git a/pkg/controllers/override/util.go b/pkg/controllers/override/util.go index 519d41f7..485ed9cc 100644 --- a/pkg/controllers/override/util.go +++ b/pkg/controllers/override/util.go @@ -45,8 +45,8 @@ Returns the policy if found, whether a recheck is needed on error, and encounter func lookForMatchedPolicies( obj fedcorev1a1.GenericFederatedObject, isNamespaced bool, - overridePolicyStore fedcorev1a1listers.OverridePolicyLister, - clusterOverridePolicyStore fedcorev1a1listers.ClusterOverridePolicyLister, + overridePolicyLister fedcorev1a1listers.OverridePolicyLister, + clusterOverridePolicyLister fedcorev1a1listers.ClusterOverridePolicyLister, ) ([]fedcorev1a1.GenericOverridePolicy, bool, error) { policies := make([]fedcorev1a1.GenericOverridePolicy, 0) @@ -58,7 +58,7 @@ func lookForMatchedPolicies( return nil, false, fmt.Errorf("policy name cannot be empty") } - matchedPolicy, err := clusterOverridePolicyStore.Get(clusterPolicyName) + matchedPolicy, err := clusterOverridePolicyLister.Get(clusterPolicyName) if err != nil && !errors.IsNotFound(err) { return nil, true, err } @@ -75,7 +75,7 @@ func lookForMatchedPolicies( return nil, false, fmt.Errorf("policy name cannot be empty") } - matchedPolicy, err := overridePolicyStore.OverridePolicies(obj.GetNamespace()).Get(policyName) + matchedPolicy, err := overridePolicyLister.OverridePolicies(obj.GetNamespace()).Get(policyName) if err != nil && !errors.IsNotFound(err) { return nil, true, err } diff --git a/pkg/controllers/override/util_test.go b/pkg/controllers/override/util_test.go index d2d6f4d6..24c9919b 100644 --- a/pkg/controllers/override/util_test.go +++ b/pkg/controllers/override/util_test.go @@ -21,13 +21,14 @@ import ( "testing" "github.com/stretchr/testify/assert" - apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + jsonutil "k8s.io/apimachinery/pkg/util/json" "k8s.io/client-go/tools/cache" + apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" + fedcorev1a1 "github.com/kubewharf/kubeadmiral/pkg/apis/core/v1alpha1" - "github.com/kubewharf/kubeadmiral/pkg/controllers/util" + fedcorev1a1listers "github.com/kubewharf/kubeadmiral/pkg/client/listers/core/v1alpha1" ) func TestLookForMatchedPolicies(t *testing.T) { @@ -263,42 +264,48 @@ func TestLookForMatchedPolicies(t *testing.T) { for name, testCase := range testCases { t.Run(name, func(t *testing.T) { - obj := &unstructured.Unstructured{Object: make(map[string]interface{})} + var obj fedcorev1a1.GenericFederatedObject isNamespaced := testCase.obj.Namespace != "" if isNamespaced { - err := unstructured.SetNestedField(obj.Object, testCase.obj.Namespace, "metadata", "namespace") - if err != nil { - panic(err) + obj = &fedcorev1a1.FederatedObject{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: testCase.obj.Namespace, + }, + } + } else { + obj = &fedcorev1a1.ClusterFederatedObject{ + ObjectMeta: metav1.ObjectMeta{}, } } - err := unstructured.SetNestedStringMap(obj.Object, testCase.obj.Labels, "metadata", "labels") - if err != nil { - panic(err) - } + obj.SetLabels(testCase.obj.Labels) - overridePolicyStore := cache.NewStore(cache.DeletionHandlingMetaNamespaceKeyFunc) + overridePolicyIndexer := cache.NewIndexer(cache.DeletionHandlingMetaNamespaceKeyFunc, cache.Indexers{ + cache.NamespaceIndex: cache.MetaNamespaceIndexFunc, + }) + overridePolicyLister := fedcorev1a1listers.NewOverridePolicyLister(overridePolicyIndexer) for _, opMeta := range testCase.overridePolicies { op := &fedcorev1a1.OverridePolicy{ ObjectMeta: opMeta, } - err := overridePolicyStore.Add(op) + err := overridePolicyIndexer.Add(op) if err != nil { panic(err) } } - clusterOverridePolicyStore := cache.NewStore(cache.DeletionHandlingMetaNamespaceKeyFunc) + clusterOverridePolicyIndexer := cache.NewIndexer(cache.DeletionHandlingMetaNamespaceKeyFunc, cache.Indexers{}) + clusterOverridePolicyStore := fedcorev1a1listers.NewClusterOverridePolicyLister(clusterOverridePolicyIndexer) for _, copMeta := range testCase.clusterOverridePolicies { cop := &fedcorev1a1.ClusterOverridePolicy{ ObjectMeta: copMeta, } - err := clusterOverridePolicyStore.Add(cop) + err := clusterOverridePolicyIndexer.Add(cop) if err != nil { panic(err) } } - foundPolicies, needsRecheckOnError, err := lookForMatchedPolicies(obj, isNamespaced, overridePolicyStore, clusterOverridePolicyStore) + foundPolicies, needsRecheckOnError, err := lookForMatchedPolicies(obj, isNamespaced, overridePolicyLister, clusterOverridePolicyStore) if (err != nil) != testCase.isErrorExpected { t.Fatalf("err = %v, but isErrorExpected = %v", err, testCase.isErrorExpected) } @@ -325,7 +332,7 @@ func TestParseOverrides(t *testing.T) { testCases := map[string]struct { policy fedcorev1a1.GenericOverridePolicy clusters []*fedcorev1a1.FederatedCluster - expectedOverridesMap util.OverridesMap + expectedOverridesMap overridesMap isErrorExpected bool }{ "no clusters - should return no overrides": { @@ -341,7 +348,7 @@ func TestParseOverrides(t *testing.T) { }, }, clusters: nil, - expectedOverridesMap: make(util.OverridesMap), + expectedOverridesMap: make(overridesMap), isErrorExpected: false, }, "invalid clusterSelector - should return error": { @@ -381,7 +388,7 @@ func TestParseOverrides(t *testing.T) { }, }, }, - expectedOverridesMap: make(util.OverridesMap), + expectedOverridesMap: make(overridesMap), isErrorExpected: false, }, "single cluster multiple OverrideRules - should return overrides from matched rules in order": { @@ -462,17 +469,17 @@ func TestParseOverrides(t *testing.T) { }, }, }, - expectedOverridesMap: util.OverridesMap{ - "cluster1": fedtypesv1a1.OverridePatches{ + expectedOverridesMap: overridesMap{ + "cluster1": fedcorev1a1.OverridePatches{ { Op: "add", Path: "/a/b", - Value: float64(1), + Value: asJSON(float64(1)), }, { Op: "replace", Path: "/aa/bb", - Value: []interface{}{"banana", "mango"}, + Value: asJSON([]interface{}{"banana", "mango"}), }, { Op: "remove", @@ -481,7 +488,7 @@ func TestParseOverrides(t *testing.T) { { Op: "add", Path: "/ee/ff", - Value: "some string", + Value: asJSON("some string"), }, }, }, @@ -577,41 +584,41 @@ func TestParseOverrides(t *testing.T) { }, }, }, - expectedOverridesMap: util.OverridesMap{ - "cluster1": fedtypesv1a1.OverridePatches{ + expectedOverridesMap: overridesMap{ + "cluster1": fedcorev1a1.OverridePatches{ { Op: "add", Path: "/a/b", - Value: float64(1), + Value: asJSON(float64(1)), }, { Op: "replace", Path: "/aa/bb", - Value: []interface{}{"banana", "mango"}, + Value: asJSON([]interface{}{"banana", "mango"}), }, { Op: "replace", Path: "/c/d", - Value: float64(1), + Value: asJSON(float64(1)), }, { Op: "replace", Path: "/cc/dd", - Value: map[string]interface{}{ + Value: asJSON(map[string]interface{}{ "key": "value", - }, + }), }, }, - "cluster2": fedtypesv1a1.OverridePatches{ + "cluster2": fedcorev1a1.OverridePatches{ { Op: "add", Path: "/a/b", - Value: float64(1), + Value: asJSON(float64(1)), }, { Op: "replace", Path: "/aa/bb", - Value: []interface{}{"banana", "mango"}, + Value: asJSON([]interface{}{"banana", "mango"}), }, { Op: "remove", @@ -620,7 +627,7 @@ func TestParseOverrides(t *testing.T) { { Op: "add", Path: "/ee/ff", - Value: "some string", + Value: asJSON("some string"), }, }, }, @@ -1002,123 +1009,123 @@ func TestIsClusterMatched(t *testing.T) { func TestMergeOverrides(t *testing.T) { testCases := map[string]struct { - dst util.OverridesMap - src util.OverridesMap - expectedResult util.OverridesMap + dst overridesMap + src overridesMap + expectedResult overridesMap }{ "nil dst - result should be equivalent to src": { dst: nil, - src: util.OverridesMap{ - "cluster1": []fedtypesv1a1.OverridePatch{ + src: overridesMap{ + "cluster1": []fedcorev1a1.OverridePatch{ { Op: "replace", Path: "/spec/replicas", - Value: 1, + Value: asJSON(1), }, }, - "cluster2": []fedtypesv1a1.OverridePatch{ + "cluster2": []fedcorev1a1.OverridePatch{ { Op: "replace", Path: "/spec/replicas", - Value: 2, + Value: asJSON(2), }, }, }, - expectedResult: util.OverridesMap{ - "cluster1": []fedtypesv1a1.OverridePatch{ + expectedResult: overridesMap{ + "cluster1": []fedcorev1a1.OverridePatch{ { Op: "replace", Path: "/spec/replicas", - Value: 1, + Value: asJSON(1), }, }, - "cluster2": []fedtypesv1a1.OverridePatch{ + "cluster2": []fedcorev1a1.OverridePatch{ { Op: "replace", Path: "/spec/replicas", - Value: 2, + Value: asJSON(2), }, }, }, }, "non-nil dst - override patches for the same cluster should be appended from src": { - dst: util.OverridesMap{ - "cluster1": []fedtypesv1a1.OverridePatch{ + dst: overridesMap{ + "cluster1": []fedcorev1a1.OverridePatch{ { Op: "replace", Path: "/spec/replicas", - Value: 1, + Value: asJSON(1), }, }, }, - src: util.OverridesMap{ - "cluster1": []fedtypesv1a1.OverridePatch{ + src: overridesMap{ + "cluster1": []fedcorev1a1.OverridePatch{ { Op: "add", Path: "/spec/replicas", - Value: 10, + Value: asJSON(10), }, }, }, - expectedResult: util.OverridesMap{ - "cluster1": []fedtypesv1a1.OverridePatch{ + expectedResult: overridesMap{ + "cluster1": []fedcorev1a1.OverridePatch{ { Op: "replace", Path: "/spec/replicas", - Value: 1, + Value: asJSON(1), }, { Op: "add", Path: "/spec/replicas", - Value: 10, + Value: asJSON(10), }, }, }, }, "non-nil dst - existing overrides patches should be kept": { - dst: util.OverridesMap{ - "cluster1": []fedtypesv1a1.OverridePatch{ + dst: overridesMap{ + "cluster1": []fedcorev1a1.OverridePatch{ { Op: "replace", Path: "/spec/replicas", - Value: 1, + Value: asJSON(1), }, }, - "cluster2": []fedtypesv1a1.OverridePatch{ + "cluster2": []fedcorev1a1.OverridePatch{ { Op: "replace", Path: "/spec/replicas", - Value: 2, + Value: asJSON(2), }, }, }, - src: util.OverridesMap{ - "cluster1": []fedtypesv1a1.OverridePatch{ + src: overridesMap{ + "cluster1": []fedcorev1a1.OverridePatch{ { Op: "replace", Path: "/spec/replicas", - Value: 3, + Value: asJSON(3), }, }, }, - expectedResult: util.OverridesMap{ - "cluster1": []fedtypesv1a1.OverridePatch{ + expectedResult: overridesMap{ + "cluster1": []fedcorev1a1.OverridePatch{ { Op: "replace", Path: "/spec/replicas", - Value: 1, + Value: asJSON(1), }, { Op: "replace", Path: "/spec/replicas", - Value: 3, + Value: asJSON(3), }, }, - "cluster2": []fedtypesv1a1.OverridePatch{ + "cluster2": []fedcorev1a1.OverridePatch{ { Op: "replace", Path: "/spec/replicas", - Value: 2, + Value: asJSON(2), }, }, }, @@ -1132,3 +1139,13 @@ func TestMergeOverrides(t *testing.T) { }) } } + +func asJSON(value any) apiextensionsv1.JSON { + var ret apiextensionsv1.JSON + if data, err := jsonutil.Marshal(value); err != nil { + panic(err) + } else { + ret.Raw = data + } + return ret +} From 4af71c673f38b1b7bff1a29f572f672bfab29394 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Gary=20Liu=20=28=E5=88=98=E5=B9=BF=E6=BA=90=29?= Date: Thu, 27 Jul 2023 04:25:05 +0000 Subject: [PATCH 142/173] fix(nsautoprop): make it compilable --- pkg/controllers/nsautoprop/controller.go | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/pkg/controllers/nsautoprop/controller.go b/pkg/controllers/nsautoprop/controller.go index 8744e23a..c129a376 100644 --- a/pkg/controllers/nsautoprop/controller.go +++ b/pkg/controllers/nsautoprop/controller.go @@ -41,12 +41,14 @@ import ( "github.com/kubewharf/kubeadmiral/pkg/controllers/common" "github.com/kubewharf/kubeadmiral/pkg/controllers/util" "github.com/kubewharf/kubeadmiral/pkg/stats" + "github.com/kubewharf/kubeadmiral/pkg/util/adoption" annotationutil "github.com/kubewharf/kubeadmiral/pkg/util/annotation" "github.com/kubewharf/kubeadmiral/pkg/util/eventhandlers" "github.com/kubewharf/kubeadmiral/pkg/util/eventsink" "github.com/kubewharf/kubeadmiral/pkg/util/informermanager" "github.com/kubewharf/kubeadmiral/pkg/util/logging" "github.com/kubewharf/kubeadmiral/pkg/util/naming" + "github.com/kubewharf/kubeadmiral/pkg/util/orphaning" "github.com/kubewharf/kubeadmiral/pkg/util/pendingcontrollers" "github.com/kubewharf/kubeadmiral/pkg/util/worker" ) @@ -277,8 +279,8 @@ func (c *Controller) reconcile(ctx context.Context, qualifiedName common.Qualifi // Ensure we adopt pre-existing namespaces in member clusters isDirty, err = c.ensureAnnotation( fedNamespace, - util.ConflictResolutionInternalAnnotation, - string(util.ConflictResolutionAdopt), + adoption.ConflictResolutionInternalAnnotation, + string(adoption.ConflictResolutionAdopt), ) if err != nil { utilruntime.HandleError(err) @@ -289,8 +291,8 @@ func (c *Controller) reconcile(ctx context.Context, qualifiedName common.Qualifi // Ensure we don't delete adopted member namespaces when the federated namespace is deleted isDirty, err = c.ensureAnnotation( fedNamespace, - util.OrphanManagedResourcesInternalAnnotation, - string(util.OrphanManagedResourcesAdopted), + orphaning.OrphanManagedResourcesInternalAnnotation, + string(orphaning.OrphanManagedResourcesAdopted), ) if err != nil { utilruntime.HandleError(err) From 687789d58d1346d1f3f290167d4ac7c2a57b6cb2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Gary=20Liu=20=28=E5=88=98=E5=B9=BF=E6=BA=90=29?= Date: Thu, 27 Jul 2023 04:45:13 +0000 Subject: [PATCH 143/173] fix(status): make it compilable --- pkg/controllers/status/controller.go | 100 +++--------------------- pkg/controllers/util/federatedstatus.go | 29 ------- 2 files changed, 10 insertions(+), 119 deletions(-) diff --git a/pkg/controllers/status/controller.go b/pkg/controllers/status/controller.go index da341185..350b45da 100644 --- a/pkg/controllers/status/controller.go +++ b/pkg/controllers/status/controller.go @@ -26,7 +26,6 @@ import ( "fmt" "reflect" "sort" - "strconv" "strings" "time" @@ -412,7 +411,7 @@ func (s *StatusController) reconcile( if existingStatus != nil { hasRSDigestsAnnotation, err = annotation.HasAnnotationKeyValue( existingStatus, - util.LatestReplicasetDigestsAnnotation, + common.LatestReplicasetDigestsAnnotation, rsDigestsAnnotation, ) if err != nil { @@ -423,18 +422,16 @@ func (s *StatusController) reconcile( collectedStatus := newCollectedStatusObject(fedObject, clusterStatuses) if rsDigestsAnnotation != "" { - collectedStatus.SetAnnotations(map[string]string{util.LatestReplicasetDigestsAnnotation: rsDigestsAnnotation}) + collectedStatus.SetAnnotations(map[string]string{common.LatestReplicasetDigestsAnnotation: rsDigestsAnnotation}) } - replicasAnnotationUpdated := false - if targetIsDeployment { - replicasAnnotationUpdated, err = s.setReplicasAnnotations( + + if existingStatus == nil { + collectedStatus.GetLastUpdateTime().Time = time.Now() + _, err = collectedstatusadapters.Create( ctx, + s.fedClient.CoreV1alpha1(), collectedStatus, - fedObject, - clusterNames, - templateQualifiedName, - templateGVK, - typeConfig, + metav1.CreateOptions{}, ) if err != nil { keyedLogger.Error(err, "Failed to set annotations about replicas") @@ -458,7 +455,6 @@ func (s *StatusController) reconcile( } } else if !reflect.DeepEqual(existingStatus.GetGenericCollectedStatus().Clusters, collectedStatus.GetGenericCollectedStatus().Clusters) || !reflect.DeepEqual(collectedStatus.GetLabels(), existingStatus.GetLabels()) || - replicasAnnotationUpdated || (rsDigestsAnnotation != "" && !hasRSDigestsAnnotation) { collectedStatus.GetLastUpdateTime().Time = time.Now() existingStatus.GetGenericCollectedStatus().Clusters = collectedStatus.GetGenericCollectedStatus().Clusters @@ -507,11 +503,12 @@ func (s *StatusController) enqueueEnableCollectedStatusObject(qualifiedName comm return } - templateGVK, err := fedObject.GetSpec().GetTemplateGVK() + templateMetadata, err := fedObject.GetSpec().GetTemplateMetadata() if err != nil { keyedLogger.Error(err, "Failed to get template gvk") return } + templateGVK := templateMetadata.GroupVersionKind() typeConfig, exists := s.ftcManager.GetResourceFTC(templateGVK) if !exists || typeConfig == nil { @@ -692,83 +689,6 @@ func (s *StatusController) latestReplicasetDigests( return digests, nil } -func (s *StatusController) realUpdatedReplicas( - ctx context.Context, - clusterNames []string, - targetQualifiedName common.QualifiedName, - targetGVK schema.GroupVersionKind, - typeConfig *fedcorev1a1.FederatedTypeConfig, - revision string, -) (string, error) { - key := targetQualifiedName.String() - var updatedReplicas int64 - targetKind := typeConfig.Spec.SourceType.Kind - keyedLogger := klog.FromContext(ctx) - - for _, clusterName := range clusterNames { - clusterObj, exist, err := informermanager.GetClusterObject( - ctx, - s.ftcManager, - s.fedInformerManager, - clusterName, - targetQualifiedName, - targetGVK, - ) - if err != nil { - return "", errors.Wrapf(err, "Failed to get %s %q from cluster %q", targetKind, key, clusterName) - } - if !exist { - continue - } - // ignore digest errors for now since we want to try the best to collect the status - digest, err := util.ReplicaSetDigestFromObject(clusterObj) - if err != nil { - keyedLogger.WithValues("cluster-name", clusterName).Error(err, "Failed to get latestreplicaset digest") - continue - } - keyedLogger.WithValues("cluster-name", clusterName, "replicas-digest", digest). - V(4). - Info("Got latestreplicaset digest") - if digest.CurrentRevision != revision { - continue - } - if digest.ObservedGeneration < digest.Generation { - continue - } - updatedReplicas += digest.UpdatedReplicas - } - return strconv.FormatInt(updatedReplicas, 10), nil -} - -func (s *StatusController) setReplicasAnnotations( - ctx context.Context, - collectedStatus fedcorev1a1.GenericCollectedStatusObject, - fedObject fedcorev1a1.GenericFederatedObject, - clusterNames []string, - qualifedName common.QualifiedName, - targetGVK schema.GroupVersionKind, - typeConfig *fedcorev1a1.FederatedTypeConfig, -) (bool, error) { - revision, ok := fedObject.GetAnnotations()[common.CurrentRevisionAnnotation] - if !ok { - return false, nil - } - updatedReplicas, err := s.realUpdatedReplicas(ctx, clusterNames, qualifedName, targetGVK, typeConfig, revision) - if err != nil { - return false, err - } - - collectedStatusAnno := collectedStatus.GetAnnotations() - if collectedStatusAnno == nil { - collectedStatusAnno = make(map[string]string) - } - collectedStatusAnno[util.AggregatedUpdatedReplicas] = updatedReplicas - collectedStatusAnno[common.CurrentRevisionAnnotation] = revision - - collectedStatus.SetAnnotations(collectedStatusAnno) - return true, nil -} - func newCollectedStatusObject( fedObj fedcorev1a1.GenericFederatedObject, clusterStatus []fedcorev1a1.CollectedFieldsWithCluster, diff --git a/pkg/controllers/util/federatedstatus.go b/pkg/controllers/util/federatedstatus.go index dff29dfc..899d06ec 100644 --- a/pkg/controllers/util/federatedstatus.go +++ b/pkg/controllers/util/federatedstatus.go @@ -39,11 +39,6 @@ const ( LatestReplicasetObservedGenerationAnnotation = "latestreplicaset.kubeadmiral.io/observed-generation" ) -const ( - // annotations for federatedDeploymentStatus - AggregatedUpdatedReplicas = common.DefaultPrefix + "aggregated-updated-replicas" -) - // FederatedResource is a generic representation of a federated type type FederatedResource struct { metav1.TypeMeta `json:",inline"` @@ -69,13 +64,6 @@ type LatestReplicasetDigest struct { SourceGeneration int64 `json:"sourceGeneration,omitempty"` } -type ReplicaSetDigest struct { - CurrentRevision string - UpdatedReplicas int64 - Generation int64 - ObservedGeneration int64 -} - func LatestReplicasetDigestFromObject(clusterName string, object *unstructured.Unstructured) (LatestReplicasetDigest, []error) { errs := []error{} @@ -128,20 +116,3 @@ func intEntry(m map[string]string, key string, errs *[]error) int64 { return 0 } } - -func ReplicaSetDigestFromObject(utd *unstructured.Unstructured) (*ReplicaSetDigest, error) { - observedGeneration, found, err := unstructured.NestedInt64(utd.Object, "status", "observedGeneration") - if err != nil || !found { - return nil, fmt.Errorf("failed to retrieve observedGeneration: %t, %v", found, err) - } - updatedReplicas, found, err := unstructured.NestedInt64(utd.Object, "status", "updatedReplicas") - if err != nil || !found { - return nil, fmt.Errorf("failed to retrieve updatedReplicas: %t, %v", found, err) - } - return &ReplicaSetDigest{ - CurrentRevision: utd.GetAnnotations()[common.CurrentRevisionAnnotation], - UpdatedReplicas: updatedReplicas, - Generation: utd.GetGeneration(), - ObservedGeneration: observedGeneration, - }, nil -} From cf147527007af0a56c770a8f8f1d167625641ac7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Gary=20Liu=20=28=E5=88=98=E5=B9=BF=E6=BA=90=29?= Date: Thu, 27 Jul 2023 04:55:03 +0000 Subject: [PATCH 144/173] fix(sync): revert deletion of source-generation annotation --- pkg/controllers/common/constants.go | 2 ++ pkg/controllers/federate/util.go | 3 ++- pkg/controllers/sync/resource.go | 7 +++++++ 3 files changed, 11 insertions(+), 1 deletion(-) diff --git a/pkg/controllers/common/constants.go b/pkg/controllers/common/constants.go index 68d38c5f..49404b97 100644 --- a/pkg/controllers/common/constants.go +++ b/pkg/controllers/common/constants.go @@ -84,6 +84,8 @@ const ( AnnotationValueTrue = "true" AnnotationValueFalse = "false" + SourceGenerationAnnotation = DefaultPrefix + "source-generation" + // The following annotations control the behavior of Kubeadmiral controllers. NoSchedulingAnnotation = DefaultPrefix + "no-scheduling" diff --git a/pkg/controllers/federate/util.go b/pkg/controllers/federate/util.go index cab26805..daf72fba 100644 --- a/pkg/controllers/federate/util.go +++ b/pkg/controllers/federate/util.go @@ -63,7 +63,6 @@ func templateForSourceObject( template.SetSelfLink("") template.SetUID("") template.SetResourceVersion("") - template.SetGeneration(0) template.SetCreationTimestamp(metav1.Time{}) template.SetDeletionTimestamp(nil) template.SetAnnotations(annotations) @@ -72,6 +71,8 @@ func templateForSourceObject( template.SetFinalizers(nil) template.SetManagedFields(nil) unstructured.RemoveNestedField(template.Object, common.StatusField) + // NOTE: we intentionally do not set generation to 0 so the SourceGeneration annotation + // can be populated in target objects. return template } diff --git a/pkg/controllers/sync/resource.go b/pkg/controllers/sync/resource.go index 516a16b5..9f7077d9 100644 --- a/pkg/controllers/sync/resource.go +++ b/pkg/controllers/sync/resource.go @@ -39,6 +39,7 @@ import ( "github.com/kubewharf/kubeadmiral/pkg/controllers/common" "github.com/kubewharf/kubeadmiral/pkg/controllers/sync/dispatch" "github.com/kubewharf/kubeadmiral/pkg/controllers/sync/version" + annotationutil "github.com/kubewharf/kubeadmiral/pkg/util/annotation" "github.com/kubewharf/kubeadmiral/pkg/util/finalizers" "github.com/kubewharf/kubeadmiral/pkg/util/managedlabel" overridesutil "github.com/kubewharf/kubeadmiral/pkg/util/overrides" @@ -158,6 +159,12 @@ func (r *federatedResource) ComputePlacement(clusters []*fedcorev1a1.FederatedCl func (r *federatedResource) ObjectForCluster(clusterName string) (*unstructured.Unstructured, error) { obj := r.template.DeepCopy() + if obj.GetGeneration() != 0 { + if _, err := annotationutil.AddAnnotation(obj, common.SourceGenerationAnnotation, fmt.Sprintf("%d", obj.GetGeneration())); err != nil { + return nil, err + } + } + switch r.TargetGVK() { case common.JobGVK: if err := dropJobFields(obj); err != nil { From 3fed3077e4c7711711da6356d88ccbbc87dcbb1f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Gary=20Liu=20=28=E5=88=98=E5=B9=BF=E6=BA=90=29?= Date: Thu, 27 Jul 2023 05:00:33 +0000 Subject: [PATCH 145/173] chore(status): remove stale structs --- pkg/controllers/util/federatedstatus.go | 16 ---------------- 1 file changed, 16 deletions(-) diff --git a/pkg/controllers/util/federatedstatus.go b/pkg/controllers/util/federatedstatus.go index 899d06ec..5dabb9e4 100644 --- a/pkg/controllers/util/federatedstatus.go +++ b/pkg/controllers/util/federatedstatus.go @@ -24,7 +24,6 @@ import ( "fmt" "strconv" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "github.com/kubewharf/kubeadmiral/pkg/controllers/common" @@ -39,21 +38,6 @@ const ( LatestReplicasetObservedGenerationAnnotation = "latestreplicaset.kubeadmiral.io/observed-generation" ) -// FederatedResource is a generic representation of a federated type -type FederatedResource struct { - metav1.TypeMeta `json:",inline"` - metav1.ObjectMeta `json:"metadata,omitempty"` - - ClusterStatus []ResourceClusterStatus `json:"clusterStatus,omitempty"` -} - -// ResourceClusterStatus defines the status of federated resource within a cluster -type ResourceClusterStatus struct { - ClusterName string `json:"clusterName,omitempty"` - Error string `json:"error,omitempty"` - CollectedFields map[string]interface{} `json:"collectedFields,omitempty"` -} - type LatestReplicasetDigest struct { ClusterName string `json:"clusterName,omitempty"` ReplicasetName string `json:"replicasetName,omitempty"` From 4ecd638934c69d3cb8cbd172738822b96e5bdc9c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Gary=20Liu=20=28=E5=88=98=E5=B9=BF=E6=BA=90=29?= Date: Thu, 27 Jul 2023 05:02:15 +0000 Subject: [PATCH 146/173] chore: fix wrong import --- pkg/util/annotation/submap_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/util/annotation/submap_test.go b/pkg/util/annotation/submap_test.go index ac4e93d4..7136f7c1 100644 --- a/pkg/util/annotation/submap_test.go +++ b/pkg/util/annotation/submap_test.go @@ -22,7 +22,7 @@ import ( "github.com/stretchr/testify/assert" - "github.com/kubewharf/kubeadmiral/pkg/controllers/util/annotation" + "github.com/kubewharf/kubeadmiral/pkg/util/annotation" ) func TestCopySubmap(t *testing.T) { From fa9942071662eca388175c5545cb1b0031d2f9ee Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Gary=20Liu=20=28=E5=88=98=E5=B9=BF=E6=BA=90=29?= Date: Thu, 27 Jul 2023 05:09:45 +0000 Subject: [PATCH 147/173] fix(sync): nil pointer dereference --- pkg/controllers/sync/dispatch/managed.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pkg/controllers/sync/dispatch/managed.go b/pkg/controllers/sync/dispatch/managed.go index 7f7d9887..d3412cfe 100644 --- a/pkg/controllers/sync/dispatch/managed.go +++ b/pkg/controllers/sync/dispatch/managed.go @@ -173,11 +173,11 @@ func (d *managedDispatcherImpl) Create(ctx context.Context, clusterName string) defer cancel() keyedLogger.V(1).Info("Creating target object in cluster") - obj, err = client.Resource(d.fedResource.TargetGVR()).Namespace(obj.GetNamespace()).Create( + createdObj, err := client.Resource(d.fedResource.TargetGVR()).Namespace(obj.GetNamespace()).Create( ctxWithTimeout, obj, metav1.CreateOptions{}, ) if err == nil { - version := propagatedversion.ObjectVersion(obj) + version := propagatedversion.ObjectVersion(createdObj) d.recordVersion(clusterName, version) return true } From fd3e825053aac67340df89957200e6bf738255ac Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Gary=20Liu=20=28=E5=88=98=E5=B9=BF=E6=BA=90=29?= Date: Thu, 27 Jul 2023 05:10:04 +0000 Subject: [PATCH 148/173] fix(override): nil pointer derefenrence --- pkg/controllers/override/util.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/controllers/override/util.go b/pkg/controllers/override/util.go index 485ed9cc..fe11ad16 100644 --- a/pkg/controllers/override/util.go +++ b/pkg/controllers/override/util.go @@ -80,7 +80,7 @@ func lookForMatchedPolicies( return nil, true, err } if errors.IsNotFound(err) { - return nil, false, fmt.Errorf("OverridePolicy %s/%s not found", matchedPolicy.Namespace, matchedPolicy.Name) + return nil, false, fmt.Errorf("OverridePolicy %s/%s not found", obj.GetNamespace(), policyName) } policies = append(policies, matchedPolicy) } From a1afa06480606b7d745cac18734bb53f5e19cdd6 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Gary=20Liu=20=28=E5=88=98=E5=B9=BF=E6=BA=90=29?= Date: Thu, 27 Jul 2023 05:17:35 +0000 Subject: [PATCH 149/173] fix(sync): retain replicas test --- pkg/controllers/sync/dispatch/managed.go | 2 +- pkg/controllers/sync/dispatch/retain.go | 7 +++---- pkg/controllers/sync/dispatch/retain_test.go | 21 +++++++++----------- 3 files changed, 13 insertions(+), 17 deletions(-) diff --git a/pkg/controllers/sync/dispatch/managed.go b/pkg/controllers/sync/dispatch/managed.go index d3412cfe..770766bb 100644 --- a/pkg/controllers/sync/dispatch/managed.go +++ b/pkg/controllers/sync/dispatch/managed.go @@ -259,7 +259,7 @@ func (d *managedDispatcherImpl) Update(ctx context.Context, clusterName string, return d.recordOperationError(ctx, fedcorev1a1.FieldRetentionFailed, clusterName, op, wrappedErr) } - err = retainReplicas(obj, clusterObj, d.fedResource.Object(), d.fedResource.TypeConfig()) + err = retainReplicas(obj, clusterObj, d.fedResource.Object(), d.fedResource.TypeConfig().Spec.PathDefinition.ReplicasSpec) if err != nil { wrappedErr := errors.Wrapf(err, "failed to retain replicas") return d.recordOperationError(ctx, fedcorev1a1.FieldRetentionFailed, clusterName, op, wrappedErr) diff --git a/pkg/controllers/sync/dispatch/retain.go b/pkg/controllers/sync/dispatch/retain.go index a7391a77..78a722b9 100644 --- a/pkg/controllers/sync/dispatch/retain.go +++ b/pkg/controllers/sync/dispatch/retain.go @@ -30,7 +30,6 @@ import ( "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/util/sets" - fedcorev1a1 "github.com/kubewharf/kubeadmiral/pkg/apis/core/v1alpha1" "github.com/kubewharf/kubeadmiral/pkg/controllers/common" utilunstructured "github.com/kubewharf/kubeadmiral/pkg/util/unstructured" ) @@ -526,20 +525,20 @@ func checkRetainReplicas(fedObj metav1.Object) bool { return fedObj.GetAnnotations()[common.RetainReplicasAnnotation] == common.AnnotationValueTrue } -func retainReplicas(desiredObj, clusterObj *unstructured.Unstructured, fedObj metav1.Object, typeConfig *fedcorev1a1.FederatedTypeConfig) error { +func retainReplicas(desiredObj, clusterObj *unstructured.Unstructured, fedObj metav1.Object, replicasPath string) error { // Retain the replicas field if the federated object has been // configured to do so. If the replicas field is intended to be // set by the in-cluster HPA controller, not retaining it will // thrash the scheduler. retain := checkRetainReplicas(fedObj) if retain { - replicas, err := utilunstructured.GetInt64FromPath(clusterObj, typeConfig.Spec.PathDefinition.ReplicasSpec, nil) + replicas, err := utilunstructured.GetInt64FromPath(clusterObj, replicasPath, nil) if err != nil { return err } if replicas != nil { - if err := utilunstructured.SetInt64FromPath(desiredObj, typeConfig.Spec.PathDefinition.ReplicasSpec, replicas, nil); err != nil { + if err := utilunstructured.SetInt64FromPath(desiredObj, replicasPath, replicas, nil); err != nil { return err } } diff --git a/pkg/controllers/sync/dispatch/retain_test.go b/pkg/controllers/sync/dispatch/retain_test.go index b18b624a..6e9bbf51 100644 --- a/pkg/controllers/sync/dispatch/retain_test.go +++ b/pkg/controllers/sync/dispatch/retain_test.go @@ -24,10 +24,12 @@ import ( "testing" "github.com/onsi/gomega" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/util/sets" fedcorev1a1 "github.com/kubewharf/kubeadmiral/pkg/apis/core/v1alpha1" + "github.com/kubewharf/kubeadmiral/pkg/controllers/common" ) func TestRetainClusterFields(t *testing.T) { @@ -67,20 +69,15 @@ func TestRetainClusterFields(t *testing.T) { }, }, } - fedObj := &unstructured.Unstructured{ - Object: map[string]interface{}{ - "spec": map[string]interface{}{ - "retainReplicas": testCase.retainReplicas, - }, + fedObj := &fedcorev1a1.FederatedObject{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: make(map[string]string), }, } - if err := retainReplicas(desiredObj, clusterObj, fedObj, &fedcorev1a1.FederatedTypeConfig{ - Spec: fedcorev1a1.FederatedTypeConfigSpec{ - PathDefinition: fedcorev1a1.PathDefinition{ - ReplicasSpec: "spec.replicas", - }, - }, - }); err != nil { + if testCase.retainReplicas { + fedObj.GetAnnotations()[common.RetainReplicasAnnotation] = common.AnnotationValueTrue + } + if err := retainReplicas(desiredObj, clusterObj, fedObj, "spec.replicas"); err != nil { t.Fatalf("Unexpected error: %v", err) } From 46894c6ac5456643b0ab8892868900074c63891b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Gary=20Liu=20=28=E5=88=98=E5=B9=BF=E6=BA=90=29?= Date: Thu, 27 Jul 2023 05:20:26 +0000 Subject: [PATCH 150/173] fix(overrides): tests --- pkg/controllers/override/util_test.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/pkg/controllers/override/util_test.go b/pkg/controllers/override/util_test.go index 24c9919b..21602b4a 100644 --- a/pkg/controllers/override/util_test.go +++ b/pkg/controllers/override/util_test.go @@ -413,7 +413,7 @@ func TestParseOverrides(t *testing.T) { Operator: "replace", Path: "/aa/bb", Value: apiextensionsv1.JSON{ - Raw: []byte(`["banana", "mango"]`), + Raw: []byte(`["banana","mango"]`), }, }, }, @@ -516,7 +516,7 @@ func TestParseOverrides(t *testing.T) { Operator: "replace", Path: "/aa/bb", Value: apiextensionsv1.JSON{ - Raw: []byte(`["banana", "mango"]`), + Raw: []byte(`["banana","mango"]`), }, }, }, @@ -541,7 +541,7 @@ func TestParseOverrides(t *testing.T) { Operator: "replace", Path: "/cc/dd", Value: apiextensionsv1.JSON{ - Raw: []byte(`{"key": "value"}`), + Raw: []byte(`{"key":"value"}`), }, }, }, From 1f1a6e8ccdb728b6ae459f4adadd6e1d7055404e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Gary=20Liu=20=28=E5=88=98=E5=B9=BF=E6=BA=90=29?= Date: Thu, 27 Jul 2023 11:01:25 +0000 Subject: [PATCH 151/173] chore: remove remnants of KubeFed in comments --- .../crds/core.kubeadmiral.io_clusterpropagatedversions.yaml | 2 +- config/crds/core.kubeadmiral.io_propagatedversions.yaml | 2 +- pkg/apis/core/v1alpha1/types_propgatedversion.go | 2 +- pkg/controllers/sync/resource.go | 6 +++--- pkg/controllers/util/resourceinformer.go | 2 +- 5 files changed, 7 insertions(+), 7 deletions(-) diff --git a/config/crds/core.kubeadmiral.io_clusterpropagatedversions.yaml b/config/crds/core.kubeadmiral.io_clusterpropagatedversions.yaml index b9d3601d..ad9ae214 100644 --- a/config/crds/core.kubeadmiral.io_clusterpropagatedversions.yaml +++ b/config/crds/core.kubeadmiral.io_clusterpropagatedversions.yaml @@ -40,7 +40,7 @@ spec: description: The name of the cluster the version is for. type: string version: - description: The last version produced for the resource by a KubeFed operation. + description: The last version produced for the resource by a KubeAdmiral operation. type: string required: - clusterName diff --git a/config/crds/core.kubeadmiral.io_propagatedversions.yaml b/config/crds/core.kubeadmiral.io_propagatedversions.yaml index c638a9ed..a5d23dbb 100644 --- a/config/crds/core.kubeadmiral.io_propagatedversions.yaml +++ b/config/crds/core.kubeadmiral.io_propagatedversions.yaml @@ -40,7 +40,7 @@ spec: description: The name of the cluster the version is for. type: string version: - description: The last version produced for the resource by a KubeFed operation. + description: The last version produced for the resource by a KubeAdmiral operation. type: string required: - clusterName diff --git a/pkg/apis/core/v1alpha1/types_propgatedversion.go b/pkg/apis/core/v1alpha1/types_propgatedversion.go index 04d1f5b8..7851eb1f 100644 --- a/pkg/apis/core/v1alpha1/types_propgatedversion.go +++ b/pkg/apis/core/v1alpha1/types_propgatedversion.go @@ -69,7 +69,7 @@ type PropagatedVersionStatus struct { type ClusterObjectVersion struct { // The name of the cluster the version is for. ClusterName string `json:"clusterName"` - // The last version produced for the resource by a KubeFed + // The last version produced for the resource by a KubeAdmiral // operation. Version string `json:"version"` } diff --git a/pkg/controllers/sync/resource.go b/pkg/controllers/sync/resource.go index 9f7077d9..87a70dce 100644 --- a/pkg/controllers/sync/resource.go +++ b/pkg/controllers/sync/resource.go @@ -47,7 +47,7 @@ import ( // FederatedResource encapsulates the behavior of a logical federated // resource which may be implemented by one or more kubernetes -// resources in the cluster hosting the KubeFed control plane. +// resources in the cluster hosting the control plane. type FederatedResource interface { dispatch.FederatedResourceForDispatch @@ -247,9 +247,9 @@ func (r *federatedResource) ApplyOverrides( } } - // Ensure that resources managed by KubeFed always have the + // Ensure that resources managed by us always have the // managed label. The label is intended to be targeted by all the - // KubeFed controllers. + // KubeAdmiral controllers. managedlabel.AddManagedLabel(obj) return nil diff --git a/pkg/controllers/util/resourceinformer.go b/pkg/controllers/util/resourceinformer.go index bffaead1..77e8a1a2 100644 --- a/pkg/controllers/util/resourceinformer.go +++ b/pkg/controllers/util/resourceinformer.go @@ -68,7 +68,7 @@ func NewResourceInformerWithEventHandler( } // NewManagedResourceInformer returns an informer limited to resources -// managed by KubeFed as indicated by labeling. +// managed by KubeAdmiral as indicated by labeling. func NewManagedResourceInformer( client ResourceClient, namespace string, From 0ffcd277e70a91f1a173b70a924c755e5208bf62 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Gary=20Liu=20=28=E5=88=98=E5=B9=BF=E6=BA=90=29?= Date: Mon, 31 Jul 2023 02:43:04 +0000 Subject: [PATCH 152/173] fix: import grouping --- pkg/controllers/override/util_test.go | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/pkg/controllers/override/util_test.go b/pkg/controllers/override/util_test.go index 21602b4a..9a9c61c5 100644 --- a/pkg/controllers/override/util_test.go +++ b/pkg/controllers/override/util_test.go @@ -21,12 +21,11 @@ import ( "testing" "github.com/stretchr/testify/assert" + apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" jsonutil "k8s.io/apimachinery/pkg/util/json" "k8s.io/client-go/tools/cache" - apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" - fedcorev1a1 "github.com/kubewharf/kubeadmiral/pkg/apis/core/v1alpha1" fedcorev1a1listers "github.com/kubewharf/kubeadmiral/pkg/client/listers/core/v1alpha1" ) From 0939795cc86de51394a9ecc36cfe9f58b20d2bf6 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Gary=20Liu=20=28=E5=88=98=E5=B9=BF=E6=BA=90=29?= Date: Mon, 31 Jul 2023 02:50:51 +0000 Subject: [PATCH 153/173] fix(versionmanager): remove redundant ok check --- pkg/controllers/sync/version/manager.go | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/pkg/controllers/sync/version/manager.go b/pkg/controllers/sync/version/manager.go index 74056c15..ac305b19 100644 --- a/pkg/controllers/sync/version/manager.go +++ b/pkg/controllers/sync/version/manager.go @@ -108,10 +108,7 @@ func (m *VersionManager) Sync(ctx context.Context) { if !ok { return } - ok = m.load(ctx, versionList) - if !ok { - return - } + m.load(ctx, versionList) } // HasSynced indicates whether the manager's in-memory state has been @@ -258,6 +255,7 @@ func (m *VersionManager) list(ctx context.Context) (runtimeclient.ObjectList, bo func (m *VersionManager) load(ctx context.Context, versionList runtimeclient.ObjectList) bool { objs, err := meta.ExtractList(versionList) if err != nil { + m.logger.Error(err, "Failed to extract version list") return false } for _, obj := range objs { From e42029d263a8093df1b26cdbe26d0debcc7e0360 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Gary=20Liu=20=28=E5=88=98=E5=B9=BF=E6=BA=90=29?= Date: Mon, 31 Jul 2023 02:55:20 +0000 Subject: [PATCH 154/173] chore: add comments to VersionManager --- pkg/controllers/sync/version/manager.go | 20 ++++++++++++++++++++ 1 file changed, 20 insertions(+) diff --git a/pkg/controllers/sync/version/manager.go b/pkg/controllers/sync/version/manager.go index ac305b19..e7c12824 100644 --- a/pkg/controllers/sync/version/manager.go +++ b/pkg/controllers/sync/version/manager.go @@ -51,6 +51,26 @@ type VersionedResource interface { FederatedGVK() schema.GroupVersionKind } +/* +VersionManager is used by the Sync controller to record the last synced version +of a FederatedObject along with the versions of the cluster objects that were +created/updated in the process. This is important in preventing unnecessary +update requests from being sent to member clusters in subsequent reconciles. The +VersionManager persists this information in the apiserver in the form of +PropagatedVersion/ClusterPropagatedVersions, see +pkg/apis/types_propagatedversion.go. + +In the context of the Sync controller, we identify the "version" of a +FederatedObject with the hash of its template and overrides and we identify the +"version" of a cluster object to be either its Generation (if available) or its +ResourceVersion. + +VersionManager is required because created/updated cluster objects might not +match the template exactly due to various reasons such as default values, +admission plugins or webhooks. Thus we have to store the version returned by the +create/update request to avoid false-positives when determining if the cluster +object has diverged from the template in subsequent reconciles. +*/ type VersionManager struct { sync.RWMutex From da2e3ca9480dba681174232885ba8ef44a9ab808 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Gary=20Liu=20=28=E5=88=98=E5=B9=BF=E6=BA=90=29?= Date: Mon, 31 Jul 2023 02:58:37 +0000 Subject: [PATCH 155/173] chore(versionmanager): add comments to Get --- pkg/controllers/sync/version/manager.go | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/pkg/controllers/sync/version/manager.go b/pkg/controllers/sync/version/manager.go index e7c12824..1bc3ac54 100644 --- a/pkg/controllers/sync/version/manager.go +++ b/pkg/controllers/sync/version/manager.go @@ -139,8 +139,9 @@ func (m *VersionManager) HasSynced() bool { return m.hasSynced } -// Get retrieves a mapping of cluster names to versions for the given -// versioned resource. +// Get retrieves a mapping of cluster names to versions for the given versioned +// resource. It returns an empty map if the desired object for the versioned +// resource is different from last recorded. func (m *VersionManager) Get(resource VersionedResource) (map[string]string, error) { versionMap := make(map[string]string) From fae54e5d68648324c083285ac6dc90f34a77df33 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Gary=20Liu=20=28=E5=88=98=E5=B9=BF=E6=BA=90=29?= Date: Mon, 31 Jul 2023 02:59:49 +0000 Subject: [PATCH 156/173] chore: FederatedResource comments --- pkg/controllers/sync/resource.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/pkg/controllers/sync/resource.go b/pkg/controllers/sync/resource.go index 87a70dce..168e0ffc 100644 --- a/pkg/controllers/sync/resource.go +++ b/pkg/controllers/sync/resource.go @@ -45,9 +45,9 @@ import ( overridesutil "github.com/kubewharf/kubeadmiral/pkg/util/overrides" ) -// FederatedResource encapsulates the behavior of a logical federated -// resource which may be implemented by one or more kubernetes -// resources in the cluster hosting the control plane. +// FederatedResource is a wrapper for FederatedObjects and +// ClusterFederatedObjects that provides necessary abstractions for the Sync +// controller to propagate them to selected member clusters. type FederatedResource interface { dispatch.FederatedResourceForDispatch From efb2538b0727b0f95a235788e774564569bf0729 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Gary=20Liu=20=28=E5=88=98=E5=B9=BF=E6=BA=90=29?= Date: Mon, 31 Jul 2023 03:01:31 +0000 Subject: [PATCH 157/173] refactor(sync): FederatedResource interface implements VersionedResource --- pkg/controllers/sync/resource.go | 1 + 1 file changed, 1 insertion(+) diff --git a/pkg/controllers/sync/resource.go b/pkg/controllers/sync/resource.go index 168e0ffc..440ef3b2 100644 --- a/pkg/controllers/sync/resource.go +++ b/pkg/controllers/sync/resource.go @@ -50,6 +50,7 @@ import ( // controller to propagate them to selected member clusters. type FederatedResource interface { dispatch.FederatedResourceForDispatch + version.VersionedResource FederatedName() common.QualifiedName UpdateVersions(selectedClusters []string, versionMap map[string]string) error From 9aba3b8d137c76f0bcefccbbd58a78d2c991afd3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Gary=20Liu=20=28=E5=88=98=E5=B9=BF=E6=BA=90=29?= Date: Mon, 31 Jul 2023 03:16:55 +0000 Subject: [PATCH 158/173] chore(sync): add comments to FederatedResource methods --- pkg/controllers/sync/dispatch/managed.go | 12 +++++++++++- pkg/controllers/sync/resource.go | 5 +++++ pkg/controllers/sync/version/manager.go | 8 ++++++++ 3 files changed, 24 insertions(+), 1 deletion(-) diff --git a/pkg/controllers/sync/dispatch/managed.go b/pkg/controllers/sync/dispatch/managed.go index 770766bb..0a50eba6 100644 --- a/pkg/controllers/sync/dispatch/managed.go +++ b/pkg/controllers/sync/dispatch/managed.go @@ -50,16 +50,26 @@ const IndexRolloutPlans = "federation_placement_rollout" // FederatedResourceForDispatch is the subset of the FederatedResource // interface required for dispatching operations to managed resources. type FederatedResourceForDispatch interface { + // TargetName returns the name of the resource's target object. TargetName() common.QualifiedName + // TargetGVK returns the resource's target group/version/kind. TargetGVK() schema.GroupVersionKind + // TargetGVR returns the resource's target group/version/resource. TargetGVR() schema.GroupVersionResource + // TypeConfig returns the FederatedTypeConfig for the resource's target type. TypeConfig() *fedcorev1a1.FederatedTypeConfig - // Object returns the federated object. + // Object returns the underlying FederatedObject or ClusterFederatedObject + // as a GenericFederatedObject. Object() fedcorev1a1.GenericFederatedObject + // VersionForCluster returns the resource's last propagated version for the given cluster. VersionForCluster(clusterName string) (string, error) + // ObjectForCluster returns the resource's desired object for the given cluster. ObjectForCluster(clusterName string) (*unstructured.Unstructured, error) + // ApplyOverrides applies cluster-specific overrides to the given object. ApplyOverrides(obj *unstructured.Unstructured, clusterName string) error + // RecordError records an error for the resource. RecordError(errorCode string, err error) + // RecordEvent records an event for the resource. RecordEvent(reason, messageFmt string, args ...interface{}) } diff --git a/pkg/controllers/sync/resource.go b/pkg/controllers/sync/resource.go index 440ef3b2..1ebdae69 100644 --- a/pkg/controllers/sync/resource.go +++ b/pkg/controllers/sync/resource.go @@ -52,10 +52,15 @@ type FederatedResource interface { dispatch.FederatedResourceForDispatch version.VersionedResource + // FederatedName returns the name of the underlying FederatedObject or ClusterFederatedObject. FederatedName() common.QualifiedName + // UpdateVersions updates the recorded versions for the given clusters. UpdateVersions(selectedClusters []string, versionMap map[string]string) error + // DeleteVersions deletes the recorded versions. DeleteVersions() + // ComputePlacement computes the placement of the resource in the given clusters. ComputePlacement(clusters []*fedcorev1a1.FederatedCluster) sets.Set[string] + // SetObject sets the underlying FederatedObject or ClusterFederatedObject. SetObject(obj fedcorev1a1.GenericFederatedObject) } diff --git a/pkg/controllers/sync/version/manager.go b/pkg/controllers/sync/version/manager.go index 1bc3ac54..af780fe3 100644 --- a/pkg/controllers/sync/version/manager.go +++ b/pkg/controllers/sync/version/manager.go @@ -44,10 +44,18 @@ import ( // VersionedResource defines the methods a federated resource must // implement to allow versions to be tracked by the VersionManager. type VersionedResource interface { + // FederatedName returns the qualified name of the underlying + // FederatedObject or ClusterFederatedObject. FederatedName() common.QualifiedName + // Object returns the underlying FederatedObject or ClusterFederatedObject + // as a GenericFederatedObject. Object() fedcorev1a1.GenericFederatedObject + // TemplateVersion returns the resource's current template version. TemplateVersion() (string, error) + // OverrideVersion returns the resource's current override version. OverrideVersion() (string, error) + // FederatedGVK returns the GroupVersionKind of the underlying + // FederatedObject or ClusterFederatedObject. FederatedGVK() schema.GroupVersionKind } From d94304e1c46971944c67913bc56e696ba4ba97ef Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Gary=20Liu=20=28=E5=88=98=E5=B9=BF=E6=BA=90=29?= Date: Mon, 31 Jul 2023 03:35:46 +0000 Subject: [PATCH 159/173] chore(sync): update FederatedResourceAccessor comments --- pkg/controllers/sync/accessor.go | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/pkg/controllers/sync/accessor.go b/pkg/controllers/sync/accessor.go index 3e38085e..c541cd5b 100644 --- a/pkg/controllers/sync/accessor.go +++ b/pkg/controllers/sync/accessor.go @@ -42,7 +42,8 @@ import ( ) // FederatedResourceAccessor provides a way to retrieve and visit -// logical federated resources (e.g. FederatedConfigMap) +// FederatedResource representations of FederatedObject/ClusterFederatedObject +// in the apiserver. type FederatedResourceAccessor interface { Run(context.Context) HasSynced() bool From 1c35b06fd0422c9919f7f4fb7d55b9c537e2dab5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Gary=20Liu=20=28=E5=88=98=E5=B9=BF=E6=BA=90=29?= Date: Mon, 31 Jul 2023 03:49:43 +0000 Subject: [PATCH 160/173] chore: remove unused pkg/util/meta functions and tests --- pkg/util/meta/meta.go | 115 ----------------------- pkg/util/meta/meta_test.go | 183 +++++++++++-------------------------- 2 files changed, 53 insertions(+), 245 deletions(-) diff --git a/pkg/util/meta/meta.go b/pkg/util/meta/meta.go index 3628a220..d77f09c9 100644 --- a/pkg/util/meta/meta.go +++ b/pkg/util/meta/meta.go @@ -21,69 +21,12 @@ are Copyright 2023 The KubeAdmiral Authors. package meta import ( - "encoding/json" "reflect" - "strings" - "github.com/pkg/errors" - "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" pkgruntime "k8s.io/apimachinery/pkg/runtime" ) -// Copies cluster-independent, user provided data from the given ObjectMeta struct. If in -// the future the ObjectMeta structure is expanded then any field that is not populated -// by the api server should be included here. -func copyObjectMeta(obj metav1.ObjectMeta) metav1.ObjectMeta { - return metav1.ObjectMeta{ - Name: obj.Name, - Namespace: obj.Namespace, - Labels: obj.Labels, - Annotations: obj.Annotations, - ResourceVersion: obj.ResourceVersion, - } -} - -// Deep copies cluster-independent, user provided data from the given ObjectMeta struct. If in -// the future the ObjectMeta structure is expanded then any field that is not populated -// by the api server should be included here. -func DeepCopyRelevantObjectMeta(obj metav1.ObjectMeta) metav1.ObjectMeta { - copyMeta := copyObjectMeta(obj) - if obj.Labels != nil { - copyMeta.Labels = make(map[string]string) - for key, val := range obj.Labels { - copyMeta.Labels[key] = val - } - } - if obj.Annotations != nil { - copyMeta.Annotations = make(map[string]string) - for key, val := range obj.Annotations { - copyMeta.Annotations[key] = val - } - } - return copyMeta -} - -// Checks if cluster-independent, user provided data in two given ObjectMeta are equal. If in -// the future the ObjectMeta structure is expanded then any field that is not populated -// by the api server should be included here. -func ObjectMetaEquivalent(a, b metav1.ObjectMeta) bool { - if a.Name != b.Name { - return false - } - if a.Namespace != b.Namespace { - return false - } - if !reflect.DeepEqual(a.Labels, b.Labels) && (len(a.Labels) != 0 || len(b.Labels) != 0) { - return false - } - if !reflect.DeepEqual(a.Annotations, b.Annotations) && (len(a.Annotations) != 0 || len(b.Annotations) != 0) { - return false - } - return true -} - // Checks if cluster-independent, user provided data in two given ObjectMeta are equal. If in // the future the ObjectMeta structure is expanded then any field that is not populated // by the api server should be included here. @@ -107,40 +50,6 @@ func ObjectMetaObjEquivalent(a, b metav1.Object) bool { return true } -// Checks if cluster-independent, user provided data in ObjectMeta and Spec in two given top -// level api objects are equivalent. -func ObjectMetaAndSpecEquivalent(a, b pkgruntime.Object) bool { - objectMetaA := reflect.ValueOf(a).Elem().FieldByName("ObjectMeta").Interface().(metav1.ObjectMeta) - objectMetaB := reflect.ValueOf(b).Elem().FieldByName("ObjectMeta").Interface().(metav1.ObjectMeta) - specA := reflect.ValueOf(a).Elem().FieldByName("Spec").Interface() - specB := reflect.ValueOf(b).Elem().FieldByName("Spec").Interface() - return ObjectMetaEquivalent(objectMetaA, objectMetaB) && reflect.DeepEqual(specA, specB) -} - -func MetaAccessor(obj pkgruntime.Object) metav1.Object { - accessor, err := meta.Accessor(obj) - if err != nil { - // This should always succeed if obj is not nil. Also, - // adapters are slated for replacement by unstructured. - return nil - } - return accessor -} - -// GetUnstructured return Unstructured for any given kubernetes type -func GetUnstructured(resource interface{}) (*unstructured.Unstructured, error) { - content, err := json.Marshal(resource) - if err != nil { - return nil, errors.Wrap(err, "Failed to JSON Marshal") - } - unstructuredResource := &unstructured.Unstructured{} - err = unstructuredResource.UnmarshalJSON(content) - if err != nil { - return nil, errors.Wrap(err, "Failed to UnmarshalJSON into unstructured content") - } - return unstructuredResource, nil -} - func GetResourceKind(obj pkgruntime.Object) string { t := reflect.TypeOf(obj) if t.Kind() != reflect.Ptr { @@ -150,27 +59,3 @@ func GetResourceKind(obj pkgruntime.Object) string { t = t.Elem() return t.Name() } - -func GetPluralName(name string) string { - return strings.ToLower(name) + "s" -} - -// ConvertViaJson converts sourceObj to targetObj via json -func ConvertViaJson(sourceObj interface{}, targetObj interface{}) error { - content, err := json.Marshal(sourceObj) - if err != nil { - return err - } - if err := json.Unmarshal(content, targetObj); err != nil { - return err - } - return nil -} - -func GetUnstructuredStatus(typedStatus interface{}) (map[string]interface{}, error) { - unstructuredStatus := map[string]interface{}{} - if err := ConvertViaJson(typedStatus, &unstructuredStatus); err != nil { - return nil, err - } - return unstructuredStatus, nil -} diff --git a/pkg/util/meta/meta_test.go b/pkg/util/meta/meta_test.go index 2eedc6c0..a172acdf 100644 --- a/pkg/util/meta/meta_test.go +++ b/pkg/util/meta/meta_test.go @@ -24,152 +24,75 @@ import ( "testing" "github.com/stretchr/testify/assert" - appsv1 "k8s.io/api/apps/v1" - corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" ) -func TestObjectMeta(t *testing.T) { - o1 := metav1.ObjectMeta{ - Namespace: "ns1", - Name: "s1", - UID: "1231231412", - ResourceVersion: "999", - } - o2 := copyObjectMeta(o1) - o3 := metav1.ObjectMeta{ - Namespace: "ns1", - Name: "s1", - UID: "1231231412", - Annotations: map[string]string{"A": "B"}, - } - o4 := metav1.ObjectMeta{ - Namespace: "ns1", - Name: "s1", - UID: "1231255531412", - Annotations: map[string]string{"A": "B"}, - } - o5 := metav1.ObjectMeta{ - Namespace: "ns1", - Name: "s1", - ResourceVersion: "1231231412", - Annotations: map[string]string{"A": "B"}, - } - o6 := metav1.ObjectMeta{ - Namespace: "ns1", - Name: "s1", - ResourceVersion: "1231255531412", - Annotations: map[string]string{"A": "B"}, - } - o7 := metav1.ObjectMeta{ - Namespace: "ns1", - Name: "s1", - ResourceVersion: "1231255531412", - Annotations: map[string]string{}, - Labels: map[string]string{}, - } - o8 := metav1.ObjectMeta{ - Namespace: "ns1", - Name: "s1", - ResourceVersion: "1231255531412", - } - assert.Equal(t, 0, len(o2.UID)) - assert.Equal(t, 3, len(o2.ResourceVersion)) - assert.Equal(t, o1.Name, o2.Name) - assert.True(t, ObjectMetaEquivalent(o1, o2)) - assert.False(t, ObjectMetaEquivalent(o1, o3)) - assert.True(t, ObjectMetaEquivalent(o3, o4)) - assert.True(t, ObjectMetaEquivalent(o5, o6)) - assert.True(t, ObjectMetaEquivalent(o3, o5)) - assert.True(t, ObjectMetaEquivalent(o7, o8)) - assert.True(t, ObjectMetaEquivalent(o8, o7)) -} - -func TestObjectMetaAndSpec(t *testing.T) { - s1 := corev1.Service{ +func TestObjectMetaObjEquivalent(t *testing.T) { + o1 := &metav1.PartialObjectMetadata{ ObjectMeta: metav1.ObjectMeta{ - Namespace: "ns1", - Name: "s1", - }, - Spec: corev1.ServiceSpec{ - ExternalName: "Service1", + Namespace: "ns1", + Name: "s1", + UID: "1231231412", + ResourceVersion: "999", }, } - s1b := s1 - s2 := corev1.Service{ + o2 := o1 + o3 := &metav1.PartialObjectMetadata{ ObjectMeta: metav1.ObjectMeta{ - Namespace: "ns1", - Name: "s2", - }, - Spec: corev1.ServiceSpec{ - ExternalName: "Service1", + Namespace: "ns1", + Name: "s1", + UID: "1231231412", + Annotations: map[string]string{"A": "B"}, }, } - s3 := corev1.Service{ + o4 := &metav1.PartialObjectMetadata{ ObjectMeta: metav1.ObjectMeta{ - Namespace: "ns1", - Name: "s1", - }, - Spec: corev1.ServiceSpec{ - ExternalName: "Service2", + Namespace: "ns1", + Name: "s1", + UID: "1231255531412", + Annotations: map[string]string{"A": "B"}, }, } - assert.True(t, ObjectMetaAndSpecEquivalent(&s1, &s1b)) - assert.False(t, ObjectMetaAndSpecEquivalent(&s1, &s2)) - assert.False(t, ObjectMetaAndSpecEquivalent(&s1, &s3)) - assert.False(t, ObjectMetaAndSpecEquivalent(&s2, &s3)) -} - -func TestConvertViaJson(t *testing.T) { - var replicas int32 = 10 - typedObj := &appsv1.Deployment{ + o5 := &metav1.PartialObjectMetadata{ ObjectMeta: metav1.ObjectMeta{ - Namespace: "test", - Name: "foo", + Namespace: "ns1", + Name: "s1", + ResourceVersion: "1231231412", + Annotations: map[string]string{"A": "B"}, }, - TypeMeta: metav1.TypeMeta{ - Kind: "Deployment", - APIVersion: "appsv1", - }, - Spec: appsv1.DeploymentSpec{ - Replicas: &replicas, - }, - Status: appsv1.DeploymentStatus{ - Replicas: 10, - UpdatedReplicas: 5, - ReadyReplicas: 5, + } + o6 := &metav1.PartialObjectMetadata{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "ns1", + Name: "s1", + ResourceVersion: "1231255531412", + Annotations: map[string]string{"A": "B"}, }, } - - unstrunctruedObj := &unstructured.Unstructured{} - assert.NoError(t, ConvertViaJson(typedObj, unstrunctruedObj)) - - convertedTyped := &appsv1.Deployment{} - assert.NoError(t, ConvertViaJson(unstrunctruedObj, convertedTyped)) - assert.Equal(t, typedObj, convertedTyped) - - status := appsv1.DeploymentStatus{ - Replicas: 5, - UpdatedReplicas: 6, - UnavailableReplicas: 7, + o7 := &metav1.PartialObjectMetadata{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "ns1", + Name: "s1", + ResourceVersion: "1231255531412", + Annotations: map[string]string{}, + Labels: map[string]string{}, + }, } - unstrucedStatus, err := GetUnstructuredStatus(status) - assert.NoError(t, err) - unstrucedStatus["foo"] = "bar" -} - -func TestGetUnstructuredStatus(t *testing.T) { - status := appsv1.DeploymentStatus{ - Replicas: 15, - UpdatedReplicas: 6, - UnavailableReplicas: 7, + o8 := &metav1.PartialObjectMetadata{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "ns1", + Name: "s1", + ResourceVersion: "1231255531412", + }, } - unstrucedStatus, err := GetUnstructuredStatus(status) - assert.NoError(t, err) - assert.Equal(t, map[string]interface{}{ - "replicas": interface{}(float64(15)), - "updatedReplicas": interface{}(float64(6)), - "unavailableReplicas": interface{}(float64(7)), - }, unstrucedStatus) + assert.Equal(t, 0, len(o2.UID)) + assert.Equal(t, 3, len(o2.ResourceVersion)) + assert.Equal(t, o1.Name, o2.Name) + assert.True(t, ObjectMetaObjEquivalent(o1, o2)) + assert.False(t, ObjectMetaObjEquivalent(o1, o3)) + assert.True(t, ObjectMetaObjEquivalent(o3, o4)) + assert.True(t, ObjectMetaObjEquivalent(o5, o6)) + assert.True(t, ObjectMetaObjEquivalent(o3, o5)) + assert.True(t, ObjectMetaObjEquivalent(o7, o8)) + assert.True(t, ObjectMetaObjEquivalent(o8, o7)) } From f26cf0fb4c63cda8ea8a4e12784909bc55873b7f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Gary=20Liu=20=28=E5=88=98=E5=B9=BF=E6=BA=90=29?= Date: Mon, 31 Jul 2023 03:53:58 +0000 Subject: [PATCH 161/173] refactor: move pkg/util/propagatedversion to pkg/controllers/sync/propagatedversion --- pkg/controllers/sync/dispatch/managed.go | 2 +- .../sync}/propagatedversion/propagatedversion.go | 0 pkg/controllers/sync/version/manager.go | 2 +- 3 files changed, 2 insertions(+), 2 deletions(-) rename pkg/{util => controllers/sync}/propagatedversion/propagatedversion.go (100%) diff --git a/pkg/controllers/sync/dispatch/managed.go b/pkg/controllers/sync/dispatch/managed.go index 0a50eba6..c539c98b 100644 --- a/pkg/controllers/sync/dispatch/managed.go +++ b/pkg/controllers/sync/dispatch/managed.go @@ -38,11 +38,11 @@ import ( fedcorev1a1 "github.com/kubewharf/kubeadmiral/pkg/apis/core/v1alpha1" "github.com/kubewharf/kubeadmiral/pkg/controllers/common" + "github.com/kubewharf/kubeadmiral/pkg/controllers/sync/propagatedversion" "github.com/kubewharf/kubeadmiral/pkg/controllers/sync/status" "github.com/kubewharf/kubeadmiral/pkg/stats" "github.com/kubewharf/kubeadmiral/pkg/util/adoption" "github.com/kubewharf/kubeadmiral/pkg/util/managedlabel" - "github.com/kubewharf/kubeadmiral/pkg/util/propagatedversion" ) const IndexRolloutPlans = "federation_placement_rollout" diff --git a/pkg/util/propagatedversion/propagatedversion.go b/pkg/controllers/sync/propagatedversion/propagatedversion.go similarity index 100% rename from pkg/util/propagatedversion/propagatedversion.go rename to pkg/controllers/sync/propagatedversion/propagatedversion.go diff --git a/pkg/controllers/sync/version/manager.go b/pkg/controllers/sync/version/manager.go index af780fe3..035b65c1 100644 --- a/pkg/controllers/sync/version/manager.go +++ b/pkg/controllers/sync/version/manager.go @@ -38,7 +38,7 @@ import ( fedcorev1a1 "github.com/kubewharf/kubeadmiral/pkg/apis/core/v1alpha1" fedcorev1a1client "github.com/kubewharf/kubeadmiral/pkg/client/clientset/versioned/typed/core/v1alpha1" "github.com/kubewharf/kubeadmiral/pkg/controllers/common" - "github.com/kubewharf/kubeadmiral/pkg/util/propagatedversion" + "github.com/kubewharf/kubeadmiral/pkg/controllers/sync/propagatedversion" ) // VersionedResource defines the methods a federated resource must From a37b47f818caf380e772e5b0c82d5c63c5bca20a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Gary=20Liu=20=28=E5=88=98=E5=B9=BF=E6=BA=90=29?= Date: Mon, 31 Jul 2023 03:55:32 +0000 Subject: [PATCH 162/173] chore(sync): fix import formatting --- pkg/controllers/sync/controller.go | 19 +++++++++---------- 1 file changed, 9 insertions(+), 10 deletions(-) diff --git a/pkg/controllers/sync/controller.go b/pkg/controllers/sync/controller.go index 21a302ee..7079b12f 100644 --- a/pkg/controllers/sync/controller.go +++ b/pkg/controllers/sync/controller.go @@ -46,26 +46,25 @@ import ( "k8s.io/client-go/util/workqueue" "k8s.io/klog/v2" + fedcorev1a1 "github.com/kubewharf/kubeadmiral/pkg/apis/core/v1alpha1" fedclient "github.com/kubewharf/kubeadmiral/pkg/client/clientset/versioned" + fedcorev1a1informers "github.com/kubewharf/kubeadmiral/pkg/client/informers/externalversions/core/v1alpha1" + "github.com/kubewharf/kubeadmiral/pkg/controllers/common" + "github.com/kubewharf/kubeadmiral/pkg/controllers/sync/dispatch" + "github.com/kubewharf/kubeadmiral/pkg/controllers/sync/status" + "github.com/kubewharf/kubeadmiral/pkg/stats" "github.com/kubewharf/kubeadmiral/pkg/util/adoption" "github.com/kubewharf/kubeadmiral/pkg/util/cascadingdeletion" clusterutil "github.com/kubewharf/kubeadmiral/pkg/util/cluster" "github.com/kubewharf/kubeadmiral/pkg/util/eventhandlers" + "github.com/kubewharf/kubeadmiral/pkg/util/eventsink" "github.com/kubewharf/kubeadmiral/pkg/util/fedobjectadapters" + finalizersutil "github.com/kubewharf/kubeadmiral/pkg/util/finalizers" "github.com/kubewharf/kubeadmiral/pkg/util/informermanager" "github.com/kubewharf/kubeadmiral/pkg/util/logging" + "github.com/kubewharf/kubeadmiral/pkg/util/managedlabel" "github.com/kubewharf/kubeadmiral/pkg/util/naming" "github.com/kubewharf/kubeadmiral/pkg/util/orphaning" - - fedcorev1a1 "github.com/kubewharf/kubeadmiral/pkg/apis/core/v1alpha1" - fedcorev1a1informers "github.com/kubewharf/kubeadmiral/pkg/client/informers/externalversions/core/v1alpha1" - "github.com/kubewharf/kubeadmiral/pkg/controllers/common" - "github.com/kubewharf/kubeadmiral/pkg/controllers/sync/dispatch" - "github.com/kubewharf/kubeadmiral/pkg/controllers/sync/status" - "github.com/kubewharf/kubeadmiral/pkg/stats" - "github.com/kubewharf/kubeadmiral/pkg/util/eventsink" - finalizersutil "github.com/kubewharf/kubeadmiral/pkg/util/finalizers" - "github.com/kubewharf/kubeadmiral/pkg/util/managedlabel" "github.com/kubewharf/kubeadmiral/pkg/util/pendingcontrollers" "github.com/kubewharf/kubeadmiral/pkg/util/worker" ) From 3f62427913f1879b5b02b28b1aab0ec72a10b0c7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Gary=20Liu=20=28=E5=88=98=E5=B9=BF=E6=BA=90=29?= Date: Mon, 31 Jul 2023 04:52:55 +0000 Subject: [PATCH 163/173] refactor(sync): avoid redundant get ftc from ftcmanager --- pkg/controllers/sync/controller.go | 15 +++++---------- 1 file changed, 5 insertions(+), 10 deletions(-) diff --git a/pkg/controllers/sync/controller.go b/pkg/controllers/sync/controller.go index 7079b12f..a755c456 100644 --- a/pkg/controllers/sync/controller.go +++ b/pkg/controllers/sync/controller.go @@ -693,15 +693,14 @@ func (s *SyncController) deleteFromCluster( } func (s *SyncController) deleteFromClusters(ctx context.Context, fedResource FederatedResource) (bool, error) { - gvk := fedResource.TargetGVK() - qualifiedName := fedResource.TargetName() keyedLogger := klog.FromContext(ctx) remainingClusters := []string{} ok, err := s.handleDeletionInClusters( ctx, - gvk, - qualifiedName, + fedResource.TargetGVK(), + fedResource.TargetGVR(), + fedResource.TargetName(), func(dispatcher dispatch.UnmanagedDispatcher, clusterName string, clusterObj *unstructured.Unstructured) { remainingClusters = append(remainingClusters, clusterName) s.deleteFromCluster(ctx, dispatcher, clusterName, fedResource, clusterObj, true) @@ -769,6 +768,7 @@ func (s *SyncController) ensureRemovedOrUnmanaged(ctx context.Context, fedResour func (s *SyncController) handleDeletionInClusters( ctx context.Context, targetGVK schema.GroupVersionKind, + targetGVR schema.GroupVersionResource, targetQualifiedName common.QualifiedName, deletionFunc func(dispatcher dispatch.UnmanagedDispatcher, clusterName string, clusterObj *unstructured.Unstructured), ) (bool, error) { @@ -779,12 +779,7 @@ func (s *SyncController) handleDeletionInClusters( return false, fmt.Errorf("failed to get a list of clusters: %w", err) } - ftc, exists := s.ftcManager.GetResourceFTC(targetGVK) - if !exists { - return false, fmt.Errorf("FTC does not exist") - } - - dispatcher := dispatch.NewUnmanagedDispatcher(s.getClusterClient, ftc.GetSourceTypeGVR(), targetQualifiedName) + dispatcher := dispatch.NewUnmanagedDispatcher(s.getClusterClient, targetGVR, targetQualifiedName) retrievalFailureClusters := []string{} unreadyClusters := []string{} for _, cluster := range clusters { From cb1393d14747796728a9c29ec342e7bbbd956799 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Gary=20Liu=20=28=E5=88=98=E5=B9=BF=E6=BA=90=29?= Date: Mon, 31 Jul 2023 05:01:59 +0000 Subject: [PATCH 164/173] refactor(sync): rename methods for clarity --- pkg/controllers/sync/controller.go | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/pkg/controllers/sync/controller.go b/pkg/controllers/sync/controller.go index a755c456..58d033de 100644 --- a/pkg/controllers/sync/controller.go +++ b/pkg/controllers/sync/controller.go @@ -396,7 +396,7 @@ func (s *SyncController) reconcile(ctx context.Context, federatedName common.Qua }() if fedResource.Object().GetDeletionTimestamp() != nil { - return s.ensureDeletion(ctx, fedResource) + return s.handleTerminatingFederatedResource(ctx, fedResource) } pendingControllers, err := pendingcontrollers.GetPendingControllers(fedResource.Object()) @@ -507,7 +507,7 @@ func (s *SyncController) syncToClusters(ctx context.Context, fedResource Federat } // We only respect orphaning behavior during cascading deletion, but not while migrating between clusters. - s.deleteFromCluster(ctx, dispatcher, clusterName, fedResource, clusterObj, isCascadingDeletionTriggered) + s.removeFromCluster(ctx, dispatcher, clusterName, fedResource, clusterObj, isCascadingDeletionTriggered) continue } @@ -631,7 +631,7 @@ func (s *SyncController) setFederatedStatus( return worker.StatusAllOK } -func (s *SyncController) ensureDeletion(ctx context.Context, fedResource FederatedResource) worker.Result { +func (s *SyncController) handleTerminatingFederatedResource(ctx context.Context, fedResource FederatedResource) worker.Result { fedResource.DeleteVersions() keyedLogger := klog.FromContext(ctx) @@ -646,7 +646,7 @@ func (s *SyncController) ensureDeletion(ctx context.Context, fedResource Federat } keyedLogger.V(2).Info("Deleting resources managed by this federated object from member clusters") - recheckRequired, err := s.deleteFromClusters(ctx, fedResource) + recheckRequired, err := s.ensureRemovalFromClusters(ctx, fedResource) if err != nil { fedResource.RecordError(string(fedcorev1a1.EnsureDeletionFailed), err) keyedLogger.Error(err, "Failed to ensure deletion of member objects") @@ -665,7 +665,7 @@ func (s *SyncController) ensureDeletion(ctx context.Context, fedResource Federat return worker.StatusAllOK } -func (s *SyncController) deleteFromCluster( +func (s *SyncController) removeFromCluster( ctx context.Context, dispatcher dispatch.UnmanagedDispatcher, clusterName string, @@ -692,7 +692,7 @@ func (s *SyncController) deleteFromCluster( } } -func (s *SyncController) deleteFromClusters(ctx context.Context, fedResource FederatedResource) (bool, error) { +func (s *SyncController) ensureRemovalFromClusters(ctx context.Context, fedResource FederatedResource) (bool, error) { keyedLogger := klog.FromContext(ctx) remainingClusters := []string{} @@ -703,7 +703,7 @@ func (s *SyncController) deleteFromClusters(ctx context.Context, fedResource Fed fedResource.TargetName(), func(dispatcher dispatch.UnmanagedDispatcher, clusterName string, clusterObj *unstructured.Unstructured) { remainingClusters = append(remainingClusters, clusterName) - s.deleteFromCluster(ctx, dispatcher, clusterName, fedResource, clusterObj, true) + s.removeFromCluster(ctx, dispatcher, clusterName, fedResource, clusterObj, true) }, ) if err != nil { @@ -717,7 +717,7 @@ func (s *SyncController) deleteFromClusters(ctx context.Context, fedResource Fed V(2).Info("Waiting for resources managed by this federated object to be removed from some clusters") return true, nil } - err = s.ensureRemovedOrUnmanaged(ctx, fedResource) + err = s.checkObjectRemovedFromAllClusters(ctx, fedResource) if err != nil { return false, errors.Wrapf(err, "failed to verify that managed resources no longer exist in any cluster") } @@ -726,12 +726,12 @@ func (s *SyncController) deleteFromClusters(ctx context.Context, fedResource Fed return false, nil } -// ensureRemovedOrUnmanaged ensures that no resources in member +// checkObjectRemovedFromAllClusters checks that no resources in member // clusters that could be managed by the given federated resources are // present or labeled as managed. The checks are performed without // the informer to cover the possibility that the resources have not // yet been cached. -func (s *SyncController) ensureRemovedOrUnmanaged(ctx context.Context, fedResource FederatedResource) error { +func (s *SyncController) checkObjectRemovedFromAllClusters(ctx context.Context, fedResource FederatedResource) error { clusters, err := s.fedInformerManager.GetJoinedClusters() if err != nil { return errors.Wrap(err, "failed to get a list of clusters") From e1f722a45b0c5cd5331e8f5618a31ad38cf0c4ea Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Gary=20Liu=20=28=E5=88=98=E5=B9=BF=E6=BA=90=29?= Date: Mon, 31 Jul 2023 05:58:56 +0000 Subject: [PATCH 165/173] fix(scheduler): make it compilable --- pkg/controllers/scheduler/scheduler.go | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/pkg/controllers/scheduler/scheduler.go b/pkg/controllers/scheduler/scheduler.go index 4e180b53..90d79710 100644 --- a/pkg/controllers/scheduler/scheduler.go +++ b/pkg/controllers/scheduler/scheduler.go @@ -269,11 +269,12 @@ func (s *Scheduler) reconcile(ctx context.Context, key common.QualifiedName) (st fedObject = fedObject.DeepCopyGenericFederatedObject() - sourceGVK, err := fedObject.GetSpec().GetTemplateGVK() + templateMetadata, err := fedObject.GetSpec().GetTemplateMetadata() if err != nil { logger.Error(err, "Failed to get source GVK from FederatedObject") return worker.StatusError } + sourceGVK := templateMetadata.GroupVersionKind() ctx, logger = logging.InjectLoggerValues(ctx, "source-gvk", sourceGVK) ftc, exists := s.informerManager.GetResourceFTC(sourceGVK) @@ -834,12 +835,12 @@ func (s *Scheduler) enqueueFederatedObjectsForFTC(ftc *fedcorev1a1.FederatedType } for _, obj := range allObjects { - sourceGVK, err := obj.GetSpec().GetTemplateGVK() + templateMetadata, err := obj.GetSpec().GetTemplateMetadata() if err != nil { logger.Error(err, "Failed to get source GVK from FederatedObject, will not enqueue") continue } - if sourceGVK == ftc.GetSourceTypeGVK() { + if templateMetadata.GroupVersionKind() == ftc.GetSourceTypeGVK() { s.worker.Enqueue(common.NewQualifiedName(obj)) } } From 3bc1119b0eadd9d0249466864d484372d6027f1f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Gary=20Liu=20=28=E5=88=98=E5=B9=BF=E6=BA=90=29?= Date: Mon, 31 Jul 2023 06:04:41 +0000 Subject: [PATCH 166/173] fix: compile errors --- pkg/controllers/common/types.go | 11 ++--------- pkg/controllers/sync/accessor.go | 5 +---- pkg/controllers/sync/controller.go | 6 ++---- pkg/controllers/sync/version/manager.go | 3 ++- pkg/util/worker/worker.go | 3 --- 5 files changed, 7 insertions(+), 21 deletions(-) diff --git a/pkg/controllers/common/types.go b/pkg/controllers/common/types.go index 210ca55b..dbd60f3d 100644 --- a/pkg/controllers/common/types.go +++ b/pkg/controllers/common/types.go @@ -24,7 +24,6 @@ import ( "fmt" "strings" - "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) @@ -40,15 +39,9 @@ type QualifiedName struct { } func NewQualifiedName(obj metav1.Object) QualifiedName { - accessor, err := meta.Accessor(obj) - if err != nil { - // This should never happen, but if it does, the - // resulting empty name. - return QualifiedName{} - } return QualifiedName{ - Namespace: accessor.GetNamespace(), - Name: accessor.GetName(), + Namespace: obj.GetNamespace(), + Name: obj.GetName(), } } diff --git a/pkg/controllers/sync/accessor.go b/pkg/controllers/sync/accessor.go index c541cd5b..c6781cb7 100644 --- a/pkg/controllers/sync/accessor.go +++ b/pkg/controllers/sync/accessor.go @@ -27,7 +27,6 @@ import ( corev1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/labels" - pkgruntime "k8s.io/apimachinery/pkg/runtime" "k8s.io/client-go/tools/record" "k8s.io/klog/v2" @@ -92,9 +91,7 @@ func NewFederatedResourceAccessor( logger: logger.WithValues("origin", "resource-accessor"), } - handler := eventhandlers.NewTriggerOnAllChanges(func(o pkgruntime.Object) { - enqueue(common.NewQualifiedName(o)) - }) + handler := eventhandlers.NewTriggerOnAllChangesWithTransform(common.NewQualifiedName, enqueue) fedObjectInformer.Informer().AddEventHandler(handler) clusterFedObjectInformer.Informer().AddEventHandler(handler) diff --git a/pkg/controllers/sync/controller.go b/pkg/controllers/sync/controller.go index 58d033de..b29f8c84 100644 --- a/pkg/controllers/sync/controller.go +++ b/pkg/controllers/sync/controller.go @@ -165,7 +165,6 @@ func NewSyncController( s.worker = worker.NewReconcileWorker[common.QualifiedName]( SyncControllerName, - nil, s.reconcile, worker.RateLimiterOptions{}, workerCount, @@ -174,7 +173,6 @@ func NewSyncController( s.clusterCascadingDeletionWorker = worker.NewReconcileWorker[common.QualifiedName]( SyncControllerName+"-cluster-cascading-deletion-worker", - nil, s.reconcileClusterForCascadingDeletion, worker.RateLimiterOptions{}, 1, @@ -338,7 +336,7 @@ func (s *SyncController) IsControllerReady() bool { } func (s *SyncController) getClusterClient(clusterName string) (dynamic.Interface, error) { - if client, exists := s.fedInformerManager.GetClusterClient(clusterName); exists { + if client, exists := s.fedInformerManager.GetClusterDynamicClient(clusterName); exists { return client, nil } return nil, fmt.Errorf("client does not exist for cluster") @@ -990,7 +988,7 @@ func (s *SyncController) reconcileClusterForCascadingDeletion(ctx context.Contex remainingByGVK[gvk] = strconv.Itoa(len(objects)) } } else { - client, exists := s.fedInformerManager.GetClusterClient(cluster.Name) + client, exists := s.fedInformerManager.GetClusterDynamicClient(cluster.Name) if !exists { remainingByGVK[gvk] = "Unknown (cluster client does not exist)" continue diff --git a/pkg/controllers/sync/version/manager.go b/pkg/controllers/sync/version/manager.go index 035b65c1..8ecaf1c5 100644 --- a/pkg/controllers/sync/version/manager.go +++ b/pkg/controllers/sync/version/manager.go @@ -295,8 +295,9 @@ func (m *VersionManager) load(ctx context.Context, versionList runtimeclient.Obj default: } + obj := obj.(runtimeclient.Object) qualifiedName := common.NewQualifiedName(obj) - m.versions[qualifiedName.String()] = obj.(runtimeclient.Object) + m.versions[qualifiedName.String()] = obj } m.Lock() m.hasSynced = true diff --git a/pkg/util/worker/worker.go b/pkg/util/worker/worker.go index 96b6b1de..65ce446d 100644 --- a/pkg/util/worker/worker.go +++ b/pkg/util/worker/worker.go @@ -26,7 +26,6 @@ import ( "time" "golang.org/x/time/rate" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/util/workqueue" "github.com/kubewharf/kubeadmiral/pkg/stats" @@ -34,8 +33,6 @@ import ( type ReconcileFunc[Key any] func(context.Context, Key) Result -type KeyFunc[Key any] func(metav1.Object) Key - type ReconcileWorker[Key any] interface { Enqueue(key Key) EnqueueWithBackoff(key Key) From 9fcd4eee22d5579c4760718c32dfe2d53dc0958b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Gary=20Liu=20=28=E5=88=98=E5=B9=BF=E6=BA=90=29?= Date: Mon, 31 Jul 2023 06:08:18 +0000 Subject: [PATCH 167/173] fix(util): broken test --- pkg/util/meta/meta_test.go | 3 --- 1 file changed, 3 deletions(-) diff --git a/pkg/util/meta/meta_test.go b/pkg/util/meta/meta_test.go index a172acdf..c02c345f 100644 --- a/pkg/util/meta/meta_test.go +++ b/pkg/util/meta/meta_test.go @@ -85,9 +85,6 @@ func TestObjectMetaObjEquivalent(t *testing.T) { ResourceVersion: "1231255531412", }, } - assert.Equal(t, 0, len(o2.UID)) - assert.Equal(t, 3, len(o2.ResourceVersion)) - assert.Equal(t, o1.Name, o2.Name) assert.True(t, ObjectMetaObjEquivalent(o1, o2)) assert.False(t, ObjectMetaObjEquivalent(o1, o3)) assert.True(t, ObjectMetaObjEquivalent(o3, o4)) From eff8b641fcf321b58afb7841d6dc7bce0ddf50a7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Gary=20Liu=20=28=E5=88=98=E5=B9=BF=E6=BA=90=29?= Date: Mon, 31 Jul 2023 09:59:07 +0000 Subject: [PATCH 168/173] feat: forbid overriding invalid paths --- .../sync}/overrides.go | 26 ++++++++----------- pkg/controllers/sync/resource.go | 21 ++++++++++----- 2 files changed, 26 insertions(+), 21 deletions(-) rename pkg/{util/overrides => controllers/sync}/overrides.go (74%) diff --git a/pkg/util/overrides/overrides.go b/pkg/controllers/sync/overrides.go similarity index 74% rename from pkg/util/overrides/overrides.go rename to pkg/controllers/sync/overrides.go index 1b992793..f1d4dc6b 100644 --- a/pkg/util/overrides/overrides.go +++ b/pkg/controllers/sync/overrides.go @@ -18,7 +18,7 @@ This file may have been modified by The KubeAdmiral Authors are Copyright 2023 The KubeAdmiral Authors. */ -package overrides +package sync import ( "encoding/json" @@ -30,29 +30,25 @@ import ( fedcorev1a1 "github.com/kubewharf/kubeadmiral/pkg/apis/core/v1alpha1" ) -// Namespace and name may not be overridden since these fields are the -// primary mechanism of association between a federated resource in -// the host cluster and the target resources in the member clusters. +// Namespace and name may not be overridden since these fields are the primary +// mechanism of association between a federated resource in the host cluster and +// the target resources in the member clusters. // -// Kind should always be sourced from the FTC and not vary across +// Kind should always be sourced from source object and should not vary across // member clusters. // -// apiVersion can be overridden to support managing resources like -// Ingress which can exist in different groups at different -// versions. Users will need to take care not to abuse this -// capability. -var invalidPaths = sets.NewString( +// apiVersion can be overridden to support managing resources like Ingress which +// can exist in different groups at different versions. Users will need to take +// care not to abuse this capability. +var invalidOverridePaths = sets.New( "/metadata/namespace", "/metadata/name", "/metadata/generateName", "/kind", ) -// Mapping of clusterName to overrides for the cluster -type OverridesMap map[string]fedcorev1a1.OverridePatches - -// ApplyJsonPatch applies the override on to the given unstructured object. -func ApplyJsonPatch(obj *unstructured.Unstructured, overrides fedcorev1a1.OverridePatches) error { +// ApplyJSONPatch applies the override on to the given unstructured object. +func ApplyJSONPatch(obj *unstructured.Unstructured, overrides fedcorev1a1.OverridePatches) error { // TODO: Do the defaulting of "op" field to "replace" in API defaulting for i, overrideItem := range overrides { if overrideItem.Op == "" { diff --git a/pkg/controllers/sync/resource.go b/pkg/controllers/sync/resource.go index 1ebdae69..9d14ead3 100644 --- a/pkg/controllers/sync/resource.go +++ b/pkg/controllers/sync/resource.go @@ -42,7 +42,6 @@ import ( annotationutil "github.com/kubewharf/kubeadmiral/pkg/util/annotation" "github.com/kubewharf/kubeadmiral/pkg/util/finalizers" "github.com/kubewharf/kubeadmiral/pkg/util/managedlabel" - overridesutil "github.com/kubewharf/kubeadmiral/pkg/util/overrides" ) // FederatedResource is a wrapper for FederatedObjects and @@ -75,9 +74,10 @@ type federatedResource struct { federatedObject fedcorev1a1.GenericFederatedObject template *unstructured.Unstructured versionManager *version.VersionManager - overridesMap overridesutil.OverridesMap - versionMap map[string]string - eventRecorder record.EventRecorder + // Overrides for each cluster. + overridesMap map[string]fedcorev1a1.OverridePatches + versionMap map[string]string + eventRecorder record.EventRecorder } func (r *federatedResource) FederatedName() common.QualifiedName { @@ -247,8 +247,17 @@ func (r *federatedResource) ApplyOverrides( if err != nil { return err } + invalidPathsFound := sets.New[string]() + for _, override := range overrides { + if invalidOverridePaths.Has(override.Path) { + invalidPathsFound.Insert(override.Path) + } + } + if invalidPathsFound.Len() > 0 { + return fmt.Errorf("invalid override path(s): %v", invalidPathsFound.UnsortedList()) + } if overrides != nil { - if err := overridesutil.ApplyJsonPatch(obj, overrides); err != nil { + if err := ApplyJSONPatch(obj, overrides); err != nil { return err } } @@ -305,7 +314,7 @@ func (r *federatedResource) overridesForCluster(clusterName string) (fedcorev1a1 return lhs < rhs }) - r.overridesMap = make(overridesutil.OverridesMap) + r.overridesMap = make(map[string]fedcorev1a1.OverridePatches) // Merge overrides in the specified order for _, controllerOverride := range overrides { From 3473b46e0e58baf1bc52e3ef1206b5fe243911ba Mon Sep 17 00:00:00 2001 From: "lihanbo.0316" Date: Wed, 12 Jul 2023 16:34:39 +0800 Subject: [PATCH 169/173] refactor: follower controller --- .gitignore | 1 + .../app/controllermanager.go | 1 + cmd/controller-manager/app/core.go | 24 + config/sample/extra/pod-ftc.yaml | 1 - config/sample/host/01-ftc.yaml | 5 - pkg/client/generic/genericclient.go | 117 ----- .../follower/bidirectional_cache.go | 5 +- pkg/controllers/follower/controller.go | 477 ++++++++---------- pkg/controllers/follower/util.go | 105 ++-- pkg/controllers/follower/util_test.go | 16 +- 10 files changed, 290 insertions(+), 462 deletions(-) diff --git a/.gitignore b/.gitignore index 3d3028a7..00dae16d 100644 --- a/.gitignore +++ b/.gitignore @@ -28,6 +28,7 @@ output vendor .vscode *debug* +.DS_Store # log files *.log diff --git a/cmd/controller-manager/app/controllermanager.go b/cmd/controller-manager/app/controllermanager.go index b8211e92..cc8f3c13 100644 --- a/cmd/controller-manager/app/controllermanager.go +++ b/cmd/controller-manager/app/controllermanager.go @@ -55,6 +55,7 @@ var knownControllers = map[string]controllermanager.StartControllerFunc{ FederatedClusterControllerName: startFederatedClusterController, SchedulerName: startScheduler, SyncControllerName: startSyncController, + FollowerControllerName: startFollowerController, } var controllersDisabledByDefault = sets.New[string]() diff --git a/cmd/controller-manager/app/core.go b/cmd/controller-manager/app/core.go index a105add5..04466923 100644 --- a/cmd/controller-manager/app/core.go +++ b/cmd/controller-manager/app/core.go @@ -26,6 +26,7 @@ import ( controllercontext "github.com/kubewharf/kubeadmiral/pkg/controllers/context" "github.com/kubewharf/kubeadmiral/pkg/controllers/federate" "github.com/kubewharf/kubeadmiral/pkg/controllers/federatedcluster" + "github.com/kubewharf/kubeadmiral/pkg/controllers/follower" "github.com/kubewharf/kubeadmiral/pkg/controllers/nsautoprop" "github.com/kubewharf/kubeadmiral/pkg/controllers/override" "github.com/kubewharf/kubeadmiral/pkg/controllers/policyrc" @@ -237,3 +238,26 @@ func startSyncController( return syncController, nil } + +func startFollowerController( + ctx context.Context, + controllerCtx *controllercontext.Context, +) (controllermanager.Controller, error) { + followerController, err := follower.NewFollowerController( + controllerCtx.KubeClientset, + controllerCtx.FedClientset, + controllerCtx.InformerManager, + controllerCtx.FedInformerFactory.Core().V1alpha1().FederatedObjects(), + controllerCtx.FedInformerFactory.Core().V1alpha1().ClusterFederatedObjects(), + controllerCtx.Metrics, + klog.Background(), + controllerCtx.WorkerCount, + ) + if err != nil { + return nil, fmt.Errorf("error creating follower controller: %w", err) + } + + go followerController.Run(ctx) + + return followerController, nil +} diff --git a/config/sample/extra/pod-ftc.yaml b/config/sample/extra/pod-ftc.yaml index 464df484..38ca9acd 100644 --- a/config/sample/extra/pod-ftc.yaml +++ b/config/sample/extra/pod-ftc.yaml @@ -31,7 +31,6 @@ spec: controllers: - - kubeadmiral.io/global-scheduler - - kubeadmiral.io/overridepolicy-controller - - - kubeadmiral.io/follower-controller statusCollection: fields: - metadata.creationTimestamp diff --git a/config/sample/host/01-ftc.yaml b/config/sample/host/01-ftc.yaml index 5c6c05db..ee160329 100644 --- a/config/sample/host/01-ftc.yaml +++ b/config/sample/host/01-ftc.yaml @@ -51,7 +51,6 @@ spec: controllers: - - kubeadmiral.io/global-scheduler - - kubeadmiral.io/overridepolicy-controller - - - kubeadmiral.io/follower-controller pathDefinition: labelSelector: spec.selector replicasSpec: spec.replicas @@ -264,7 +263,6 @@ spec: controllers: - - kubeadmiral.io/global-scheduler - - kubeadmiral.io/overridepolicy-controller - - - kubeadmiral.io/follower-controller statusCollection: enabled: true fields: @@ -288,7 +286,6 @@ spec: controllers: - - kubeadmiral.io/global-scheduler - - kubeadmiral.io/overridepolicy-controller - - - kubeadmiral.io/follower-controller statusCollection: enabled: true fields: @@ -309,7 +306,6 @@ spec: controllers: - - kubeadmiral.io/global-scheduler - - kubeadmiral.io/overridepolicy-controller - - - kubeadmiral.io/follower-controller statusCollection: enabled: true fields: @@ -330,7 +326,6 @@ spec: controllers: - - kubeadmiral.io/global-scheduler - - kubeadmiral.io/overridepolicy-controller - - - kubeadmiral.io/follower-controller statusCollection: enabled: true fields: diff --git a/pkg/client/generic/genericclient.go b/pkg/client/generic/genericclient.go index f22ba80f..e372f51c 100644 --- a/pkg/client/generic/genericclient.go +++ b/pkg/client/generic/genericclient.go @@ -23,21 +23,15 @@ package generic import ( "context" "fmt" - "strings" appsv1 "k8s.io/api/apps/v1" "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/labels" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/types" "k8s.io/client-go/rest" "sigs.k8s.io/controller-runtime/pkg/client" "github.com/kubewharf/kubeadmiral/pkg/client/generic/scheme" - "github.com/kubewharf/kubeadmiral/pkg/controllers/common" - "github.com/kubewharf/kubeadmiral/pkg/controllers/util/history" ) type Client interface { @@ -48,7 +42,6 @@ type Client interface { List(ctx context.Context, obj client.ObjectList, namespace string) error UpdateStatus(ctx context.Context, obj client.Object) error Patch(ctx context.Context, obj client.Object, patch client.Patch) error - Rollback(ctx context.Context, obj client.Object, toRevision int64) error DeleteHistory(ctx context.Context, obj client.Object) error ListWithOptions(ctx context.Context, obj client.ObjectList, opts ...client.ListOption) error @@ -121,91 +114,6 @@ func (c *genericClient) Patch(ctx context.Context, obj client.Object, patch clie return c.client.Patch(ctx, obj, patch) } -// Rollback rollbacks federated Object such as FederatedDeployment -func (c *genericClient) Rollback(ctx context.Context, obj client.Object, toRevision int64) error { - if toRevision < 0 { - return fmt.Errorf("unable to find specified revision %v in history", toRevision) - } - if toRevision == 0 { - // try to get last revision from annotations, fallback to list all revisions on error - if err := c.rollbackToLastRevision(ctx, obj); err == nil { - return nil - } - } - - history, err := c.controlledHistory(ctx, obj) - if err != nil { - return fmt.Errorf("failed to list history: %s", err) - } - if toRevision == 0 && len(history) <= 1 { - return fmt.Errorf("no last revision to roll back to") - } - - toHistory := findHistory(toRevision, history) - if toHistory == nil { - return fmt.Errorf("unable to find specified revision %v in history", toHistory) - } - - // Restore revision - if err := c.Patch(ctx, obj, client.RawPatch(types.JSONPatchType, toHistory.Data.Raw)); err != nil { - return fmt.Errorf("failed restoring revision %d: %v", toRevision, err) - } - return nil -} - -func (c *genericClient) rollbackToLastRevision(ctx context.Context, obj client.Object) error { - accessor, err := meta.Accessor(obj) - if err != nil { - return err - } - lastRevisionNameWithHash := accessor.GetAnnotations()[common.LastRevisionAnnotation] - if len(lastRevisionNameWithHash) == 0 { - return fmt.Errorf("annotation: %s not found", common.LastRevisionAnnotation) - } - - lastRevisionName, err := c.checkLastRevisionNameWithHash(lastRevisionNameWithHash, obj) - if err != nil { - return fmt.Errorf("failed to check last revision name, err: %v", err) - } - - latestRevision := &appsv1.ControllerRevision{} - if err := c.Get(ctx, latestRevision, accessor.GetNamespace(), lastRevisionName); err != nil { - return err - } - - // restore latest revision - if err := c.Patch(ctx, obj, client.RawPatch(types.JSONPatchType, latestRevision.Data.Raw)); err != nil { - return fmt.Errorf("failed restoring latest revision: %v", err) - } - return nil -} - -func (c *genericClient) checkLastRevisionNameWithHash(lastRevisionNameWithHash string, obj client.Object) (string, error) { - parts := strings.Split(lastRevisionNameWithHash, "|") - if len(parts) != 2 { - return "", fmt.Errorf("invalid lastRevisionNameWithHash: %s", lastRevisionNameWithHash) - } - lastRevisionName, hash := parts[0], parts[1] - - utdObj, err := runtime.DefaultUnstructuredConverter.ToUnstructured(obj) - if err != nil { - return "", err - } - - template, ok, err := unstructured.NestedMap(utdObj, "spec", "template", "spec", "template") - if err != nil { - return "", err - } - if !ok { - return "", fmt.Errorf("spec.template.spec.template is not found, fedResource: %+v", obj) - } - - if templateHash := history.HashObject(template); templateHash != hash { - return "", fmt.Errorf("pod template hash: %s, last revision name suffix: %s, they should be equal", templateHash, hash) - } - return lastRevisionName, nil -} - // controlledHistories returns all ControllerRevisions in namespace that selected by selector and owned by accessor func (c *genericClient) controlledHistory(ctx context.Context, obj client.Object) ([]*appsv1.ControllerRevision, error) { accessor, err := meta.Accessor(obj) @@ -246,28 +154,3 @@ func (c *genericClient) DeleteHistory(ctx context.Context, obj client.Object) er } return nil } - -// findHistory returns a controllerrevision of a specific revision from the given controllerrevisions. -// It returns nil if no such controllerrevision exists. -// If toRevision is 0, the last previously used history is returned. -func findHistory(toRevision int64, allHistory []*appsv1.ControllerRevision) *appsv1.ControllerRevision { - if toRevision == 0 && len(allHistory) <= 1 { - return nil - } - - // Find the history to rollback to - var toHistory *appsv1.ControllerRevision - if toRevision == 0 { - // If toRevision == 0, find the latest revision (2nd max) - history.SortControllerRevisions(allHistory) - toHistory = allHistory[len(allHistory)-2] - } else { - for _, h := range allHistory { - if h.Revision == toRevision { - // If toRevision != 0, find the history with matching revision - return h - } - } - } - return toHistory -} diff --git a/pkg/controllers/follower/bidirectional_cache.go b/pkg/controllers/follower/bidirectional_cache.go index 1ea4af86..8935eb5d 100644 --- a/pkg/controllers/follower/bidirectional_cache.go +++ b/pkg/controllers/follower/bidirectional_cache.go @@ -1,4 +1,3 @@ -//go:build exclude /* Copyright 2023 The KubeAdmiral Authors. @@ -40,13 +39,13 @@ type bidirectionalCache[V1, V2 comparable] struct { func (c *bidirectionalCache[V1, V2]) lookup(key V1) sets.Set[V2] { c.RLock() defer c.RUnlock() - return c.cache[key] + return c.cache[key].Clone() } func (c *bidirectionalCache[V1, V2]) reverseLookup(key V2) sets.Set[V1] { c.RLock() defer c.RUnlock() - return c.reverseCache[key] + return c.reverseCache[key].Clone() } func (c *bidirectionalCache[V1, V2]) update(key V1, newValues sets.Set[V2]) { diff --git a/pkg/controllers/follower/controller.go b/pkg/controllers/follower/controller.go index 009a4c5c..cc79a486 100644 --- a/pkg/controllers/follower/controller.go +++ b/pkg/controllers/follower/controller.go @@ -1,4 +1,3 @@ -//go:build exclude /* Copyright 2023 The KubeAdmiral Authors. @@ -20,6 +19,7 @@ package follower import ( "context" "fmt" + "sync" "time" appsv1 "k8s.io/api/apps/v1" @@ -29,29 +29,26 @@ import ( "k8s.io/apimachinery/pkg/api/equality" apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" - "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/util/sets" - "k8s.io/client-go/dynamic" - "k8s.io/client-go/dynamic/dynamicinformer" - "k8s.io/client-go/informers" "k8s.io/client-go/kubernetes" "k8s.io/client-go/tools/cache" "k8s.io/client-go/tools/record" "k8s.io/klog/v2" fedcorev1a1 "github.com/kubewharf/kubeadmiral/pkg/apis/core/v1alpha1" - fedtypesv1a1 "github.com/kubewharf/kubeadmiral/pkg/apis/types/v1alpha1" fedclient "github.com/kubewharf/kubeadmiral/pkg/client/clientset/versioned" + fedcorev1a1informers "github.com/kubewharf/kubeadmiral/pkg/client/informers/externalversions/core/v1alpha1" "github.com/kubewharf/kubeadmiral/pkg/controllers/common" "github.com/kubewharf/kubeadmiral/pkg/controllers/util" - "github.com/kubewharf/kubeadmiral/pkg/controllers/util/delayingdeliver" - "github.com/kubewharf/kubeadmiral/pkg/controllers/util/eventsink" - "github.com/kubewharf/kubeadmiral/pkg/controllers/util/pendingcontrollers" - schemautil "github.com/kubewharf/kubeadmiral/pkg/controllers/util/schema" - "github.com/kubewharf/kubeadmiral/pkg/controllers/util/worker" "github.com/kubewharf/kubeadmiral/pkg/stats" + "github.com/kubewharf/kubeadmiral/pkg/util/eventhandlers" + "github.com/kubewharf/kubeadmiral/pkg/util/eventsink" + "github.com/kubewharf/kubeadmiral/pkg/util/fedobjectadapters" + "github.com/kubewharf/kubeadmiral/pkg/util/informermanager" + "github.com/kubewharf/kubeadmiral/pkg/util/logging" + "github.com/kubewharf/kubeadmiral/pkg/util/naming" + "github.com/kubewharf/kubeadmiral/pkg/util/worker" ) const ( @@ -92,40 +89,22 @@ var ( // TODO: support handles-object annotations in this controller? // TODO: support parsing followers introduced by overrides -// Handles for a leader or follower type. -type typeHandles struct { - // +federatedKind - name string - typeConfig *fedcorev1a1.FederatedTypeConfig - sourceGK schema.GroupKind - federatedGK schema.GroupKind - informer informers.GenericInformer - client dynamic.NamespaceableResourceInterface - worker worker.ReconcileWorker -} - type Controller struct { - name string + gkToFTCLock sync.RWMutex + gkToFTCName map[schema.GroupKind]string + + cacheObservedFromLeaders *bidirectionalCache[fedcorev1a1.LeaderReference, FollowerReference] + cacheObservedFromFollowers *bidirectionalCache[FollowerReference, fedcorev1a1.LeaderReference] + worker worker.ReconcileWorker[objectGroupKindKey] + informerManager informermanager.InformerManager + fedObjectInformer fedcorev1a1informers.FederatedObjectInformer + clusterFedObjectInformer fedcorev1a1informers.ClusterFederatedObjectInformer + + fedClient fedclient.Interface eventRecorder record.EventRecorder metrics stats.Metrics logger klog.Logger - - // The following maps are written during initialization and only read afterward, - // therefore no locks are required. - - // map from source GroupKind to federated GroupKind - sourceToFederatedGKMap map[schema.GroupKind]schema.GroupKind - // map from leader federated GroupKind to typeHandle - leaderTypeHandles map[schema.GroupKind]*typeHandles - // map from follower federated GroupKind to typeHandle - followerTypeHandles map[schema.GroupKind]*typeHandles - - cacheObservedFromLeaders *bidirectionalCache[fedtypesv1a1.LeaderReference, FollowerReference] - cacheObservedFromFollowers *bidirectionalCache[FollowerReference, fedtypesv1a1.LeaderReference] - - kubeClient kubernetes.Interface - fedClient fedclient.Interface } func (c *Controller) IsControllerReady() bool { @@ -134,121 +113,118 @@ func (c *Controller) IsControllerReady() bool { func NewFollowerController( kubeClient kubernetes.Interface, - dynamicClient dynamic.Interface, fedClient fedclient.Interface, - informerFactory dynamicinformer.DynamicSharedInformerFactory, + informerManager informermanager.InformerManager, + fedObjectInformer fedcorev1a1informers.FederatedObjectInformer, + clusterFedObjectInformer fedcorev1a1informers.ClusterFederatedObjectInformer, metrics stats.Metrics, + logger klog.Logger, workerCount int, ) (*Controller, error) { c := &Controller{ - name: ControllerName, + gkToFTCName: make(map[schema.GroupKind]string), + informerManager: informerManager, + fedObjectInformer: fedObjectInformer, + clusterFedObjectInformer: clusterFedObjectInformer, + cacheObservedFromLeaders: newBidirectionalCache[fedcorev1a1.LeaderReference, FollowerReference](), + cacheObservedFromFollowers: newBidirectionalCache[FollowerReference, fedcorev1a1.LeaderReference](), + fedClient: fedClient, eventRecorder: eventsink.NewDefederatingRecorderMux(kubeClient, ControllerName, 4), metrics: metrics, - logger: klog.LoggerWithValues(klog.Background(), "controller", ControllerName), - sourceToFederatedGKMap: make(map[schema.GroupKind]schema.GroupKind), - leaderTypeHandles: make(map[schema.GroupKind]*typeHandles), - followerTypeHandles: make(map[schema.GroupKind]*typeHandles), - cacheObservedFromLeaders: newBidirectionalCache[fedtypesv1a1.LeaderReference, FollowerReference](), - cacheObservedFromFollowers: newBidirectionalCache[FollowerReference, fedtypesv1a1.LeaderReference](), - kubeClient: kubeClient, - fedClient: fedClient, + logger: logger.WithValues("controller", ControllerName), } - getHandles := func( - ftc *fedcorev1a1.FederatedTypeConfig, - handleNamePrefix string, - reconcile func(*typeHandles, common.QualifiedName) worker.Result, - ) *typeHandles { - targetType := ftc.GetTargetType() - federatedType := ftc.GetFederatedType() - federatedGVR := schemautil.APIResourceToGVR(&federatedType) - - handles := &typeHandles{ - name: handleNamePrefix + "-" + federatedType.Kind, - typeConfig: ftc, - sourceGK: schemautil.APIResourceToGVK(&targetType).GroupKind(), - federatedGK: schemautil.APIResourceToGVK(&federatedType).GroupKind(), - informer: informerFactory.ForResource(federatedGVR), - client: dynamicClient.Resource(federatedGVR), - } - handles.worker = worker.NewReconcileWorker( - func(qualifiedName common.QualifiedName) worker.Result { - return reconcile(handles, qualifiedName) - }, - worker.RateLimiterOptions{}, - workerCount, - c.metrics, - delayingdeliver.NewMetricTags("follower-controller-worker", handles.name), - ) - handles.informer.Informer().AddEventHandlerWithResyncPeriod( - util.NewTriggerOnAllChanges(handles.worker.EnqueueObject), - util.NoResyncPeriod, - ) - - return handles + if _, err := c.fedObjectInformer.Informer().AddEventHandlerWithResyncPeriod( + eventhandlers.NewTriggerOnAllChanges(c.enqueueSupportedType), + util.NoResyncPeriod, + ); err != nil { + return nil, err } - ftcs, err := c.fedClient.CoreV1alpha1(). - FederatedTypeConfigs(). - List(context.TODO(), metav1.ListOptions{}) - if err != nil { + if _, err := c.clusterFedObjectInformer.Informer().AddEventHandlerWithResyncPeriod( + eventhandlers.NewTriggerOnAllChanges(c.enqueueSupportedType), + util.NoResyncPeriod, + ); err != nil { return nil, err } - // Find the supported leader and follower types and create their handles - for i := range ftcs.Items { - ftc := &ftcs.Items[i] - targetType := ftc.Spec.TargetType - federatedType := ftc.Spec.FederatedType + c.worker = worker.NewReconcileWorker[objectGroupKindKey]( + "follower-controller-worker", + c.reconcile, + worker.RateLimiterOptions{}, + workerCount, + c.metrics, + ) + + if err := c.informerManager.AddFTCUpdateHandler(func(_, latest *fedcorev1a1.FederatedTypeConfig) { + targetType := latest.Spec.SourceType targetGK := schema.GroupKind{Group: targetType.Group, Kind: targetType.Kind} - federatedGK := schema.GroupKind{Group: federatedType.Group, Kind: federatedType.Kind} - - if _, exists := leaderPodTemplatePaths[targetGK]; exists { - handles := getHandles(ftc, "leader", c.reconcileLeader) - c.sourceToFederatedGKMap[targetGK] = federatedGK - c.leaderTypeHandles[federatedGK] = handles - c.logger.V(2).Info(fmt.Sprintf("Found supported leader FederatedTypeConfig %s", ftc.Name)) - } else if supportedFollowerTypes.Has(targetGK) { - handles := getHandles(ftc, "follower", c.reconcileFollower) - c.sourceToFederatedGKMap[targetGK] = federatedGK - c.followerTypeHandles[federatedGK] = handles - c.logger.V(2).Info(fmt.Sprintf("Found supported follower FederatedTypeConfig %s", ftc.Name)) - } + + c.gkToFTCLock.Lock() + defer c.gkToFTCLock.Unlock() + + c.gkToFTCName[targetGK] = latest.Name + }); err != nil { + return nil, err } return c, nil } -func (c *Controller) Run(stopChan <-chan struct{}) { - c.logger.Info("Starting controller") - defer c.logger.Info("Stopping controller") +func (c *Controller) Run(ctx context.Context) { + ctx, logger := logging.InjectLogger(ctx, c.logger) + + logger.Info("Starting controller") + defer logger.Info("Stopping controller") - if !cache.WaitForNamedCacheSync(c.name, stopChan, c.HasSynced) { + if !cache.WaitForNamedCacheSync(ControllerName, ctx.Done(), c.HasSynced) { + logger.Error(nil, "Timed out waiting for caches to sync") return } + logger.Info("Caches are synced") + c.worker.Run(ctx) + <-ctx.Done() +} + +func (c *Controller) HasSynced() bool { + return c.fedObjectInformer.Informer().HasSynced() && + c.clusterFedObjectInformer.Informer().HasSynced() +} - for _, handle := range c.leaderTypeHandles { - handle.worker.Run(stopChan) +func (c *Controller) enqueueSupportedType(object interface{}) { + fedObject, ok := object.(fedcorev1a1.GenericFederatedObject) + if !ok { + return } - for _, handle := range c.followerTypeHandles { - handle.worker.Run(stopChan) + + template, err := fedObject.GetSpec().GetTemplateAsUnstructured() + if err != nil { + return } - <-stopChan + templateGK := template.GroupVersionKind().GroupKind() + _, isLeader := leaderPodTemplatePaths[templateGK] + isFollower := supportedFollowerTypes.Has(templateGK) + if isLeader || isFollower { + c.worker.Enqueue(objectGroupKindKey{ + sourceGK: templateGK, + namespace: fedObject.GetNamespace(), + fedName: fedObject.GetName(), + sourceName: template.GetName(), + }) + } } -func (c *Controller) HasSynced() bool { - for _, handle := range c.leaderTypeHandles { - if !handle.informer.Informer().HasSynced() { - return false - } +func (c *Controller) reconcile(ctx context.Context, key objectGroupKindKey) (status worker.Result) { + if _, exists := leaderPodTemplatePaths[key.sourceGK]; exists { + return c.reconcileLeader(ctx, key) } - for _, handle := range c.followerTypeHandles { - if !handle.informer.Informer().HasSynced() { - return false - } + + if _, exists := supportedFollowerTypes[key.sourceGK]; exists { + return c.reconcileFollower(ctx, key) } - return true + + return worker.StatusAllOK } /* @@ -256,47 +232,42 @@ Reconciles the leader to make sure its desired followers (derivable from the lea and its stale followers (derivable from cache) no longer reference it. */ func (c *Controller) reconcileLeader( - handles *typeHandles, - qualifiedName common.QualifiedName, + ctx context.Context, + key objectGroupKindKey, ) (status worker.Result) { - c.metrics.Rate(fmt.Sprintf("follower-controller-%s.throughput", handles.name), 1) - key := qualifiedName.String() - logger := c.logger.WithValues("origin", "reconcileLeader", "type", handles.name, "key", key) + c.metrics.Rate(fmt.Sprintf("follower-controller-%s.throughput", key.sourceGK.String()), 1) + ctx, keyedLogger := logging.InjectLoggerValues(ctx, "origin", "reconcileLeader", "type", key.sourceGK.String(), "key", key.ObjectSourceKey()) startTime := time.Now() - logger.V(3).Info("Starting reconcileLeader") + keyedLogger.V(3).Info("Starting reconcileLeader") defer func() { - c.metrics.Duration(fmt.Sprintf("follower-controller-%s.latency", handles.name), startTime) - logger.WithValues("duration", time.Since(startTime), "status", status.String()).V(3).Info("Finished reconcileLeader") + c.metrics.Duration(fmt.Sprintf("follower-controller-%s.latency", key.sourceGK.String()), startTime) + keyedLogger.WithValues("duration", time.Since(startTime), "status", status.String()).V(3).Info("Finished reconcileLeader") }() - leader := fedtypesv1a1.LeaderReference{ - Group: handles.federatedGK.Group, - Kind: handles.federatedGK.Kind, - Namespace: qualifiedName.Namespace, - Name: qualifiedName.Name, + leader := fedcorev1a1.LeaderReference{ + Group: key.sourceGK.Group, + Kind: key.sourceGK.Kind, + Namespace: key.namespace, + Name: key.sourceName, } - fedObj, err := getObjectFromStore(handles.informer.Informer().GetStore(), key) - if err != nil { - logger.Error(err, "Failed to retrieve object from store") + fedObj, err := fedobjectadapters.GetFromLister(c.fedObjectInformer.Lister(), c.clusterFedObjectInformer.Lister(), key.namespace, key.fedName) + if err != nil && !apierrors.IsNotFound(err) { + keyedLogger.Error(err, "Failed to get leader object from store") return worker.StatusError } + if apierrors.IsNotFound(err) { + fedObj = nil + } + var desiredFollowers sets.Set[FollowerReference] if fedObj != nil { - // We only need to check for dependencies if the object exists - if ok, err := pendingcontrollers.ControllerDependenciesFulfilled(fedObj, PrefixedControllerName); err != nil { - logger.Error(err, "Failed to check controller dependencies") - return worker.StatusError - } else if !ok { - return worker.StatusAllOK - } - // Only leaders that have not been deleted should have followers - desiredFollowers, err = c.inferFollowers(handles, fedObj) + desiredFollowers, err = c.inferFollowers(key.sourceGK, fedObj) if err != nil { - logger.Error(err, "Failed to infer followers") + keyedLogger.Error(err, "Failed to infer followers") if fedObj != nil { c.eventRecorder.Eventf( fedObj, @@ -309,67 +280,55 @@ func (c *Controller) reconcileLeader( return worker.StatusError } } + c.cacheObservedFromLeaders.update(leader, desiredFollowers) currentFollowers := c.cacheObservedFromFollowers.reverseLookup(leader) // enqueue all followers whose desired state may have changed - for follower := range desiredFollowers.Union(currentFollowers) { - handles, exists := c.followerTypeHandles[follower.GroupKind] + c.enqueueFollowers(desiredFollowers.Union(currentFollowers)) + + return worker.StatusAllOK +} + +func (c *Controller) enqueueFollowers(followers sets.Set[FollowerReference]) { + c.gkToFTCLock.RLock() + defer c.gkToFTCLock.RUnlock() + + // enqueue all followers whose desired state may have changed + for follower := range followers { + ftcName, exists := c.gkToFTCName[follower.GroupKind] if !exists { - logger.WithValues("follower", follower).Error(nil, "Unsupported follower type") - return worker.StatusError + continue } - handles.worker.Enqueue( - common.QualifiedName{Namespace: follower.Namespace, Name: follower.Name}, - ) - } - if fedObj != nil { - updated, err := pendingcontrollers.UpdatePendingControllers( - fedObj, - PrefixedControllerName, - false, - handles.typeConfig.GetControllers(), + c.worker.Enqueue( + objectGroupKindKey{ + sourceGK: follower.GroupKind, + namespace: follower.Namespace, + sourceName: follower.Name, + fedName: naming.GenerateFederatedObjectName(follower.Name, ftcName), + }, ) - if err != nil { - logger.Error(err, "Failed to set pending controllers") - return worker.StatusError - } - if updated { - logger.V(1).Info("Updating leader to sync with pending controllers") - _, err = handles.client.Namespace(fedObj.GetNamespace()). - Update(context.Background(), fedObj, metav1.UpdateOptions{}) - if err != nil { - if apierrors.IsConflict(err) { - return worker.StatusConflict - } - logger.Error(err, "Failed to update after modifying pending controllers") - return worker.StatusError - } - } } - - return worker.StatusAllOK } func (c *Controller) inferFollowers( - handles *typeHandles, - fedObj *unstructured.Unstructured, + sourceGK schema.GroupKind, + fedObj fedcorev1a1.GenericFederatedObject, ) (sets.Set[FollowerReference], error) { if fedObj.GetAnnotations()[common.EnableFollowerSchedulingAnnotation] != common.AnnotationValueTrue { // follower scheduling is not enabled return nil, nil } - followersFromAnnotation, err := getFollowersFromAnnotation(fedObj, c.sourceToFederatedGKMap) + followersFromAnnotation, err := getFollowersFromAnnotation(fedObj) if err != nil { return nil, err } followersFromPodTemplate, err := getFollowersFromPodTemplate( fedObj, - leaderPodTemplatePaths[handles.sourceGK], - c.sourceToFederatedGKMap, + leaderPodTemplatePaths[sourceGK], ) if err != nil { return nil, err @@ -380,18 +339,14 @@ func (c *Controller) inferFollowers( func (c *Controller) updateFollower( ctx context.Context, - handles *typeHandles, - followerUns *unstructured.Unstructured, - followerObj *fedtypesv1a1.GenericFederatedFollower, + followerObj fedcorev1a1.GenericFederatedObject, leadersChanged bool, - leaders []fedtypesv1a1.LeaderReference, + leaders []fedcorev1a1.LeaderReference, ) (updated bool, err error) { logger := klog.FromContext(ctx) if leadersChanged { - if err := fedtypesv1a1.SetFollows(followerUns, leaders); err != nil { - return false, fmt.Errorf("set leaders on follower: %w", err) - } + followerObj.GetSpec().Follows = leaders } clusters, err := c.leaderPlacementUnion(leaders) @@ -399,19 +354,11 @@ func (c *Controller) updateFollower( return false, fmt.Errorf("get leader placement union: %w", err) } - placementsChanged := followerObj.Spec.SetPlacementNames(PrefixedControllerName, clusters) - if placementsChanged { - err = util.SetGenericPlacements(followerUns, followerObj.Spec.Placements) - if err != nil { - return false, fmt.Errorf("set placements: %w", err) - } - } - + placementsChanged := followerObj.GetSpec().SetControllerPlacement(PrefixedControllerName, clusters.UnsortedList()) needsUpdate := leadersChanged || placementsChanged if needsUpdate { logger.V(1).Info("Updating follower to sync with leaders") - _, err = handles.client.Namespace(followerUns.GetNamespace()). - Update(context.TODO(), followerUns, metav1.UpdateOptions{}) + _, err = fedobjectadapters.Update(ctx, c.fedClient.CoreV1alpha1(), followerObj, metav1.UpdateOptions{}) if err != nil { return false, fmt.Errorf("update follower: %w", err) } @@ -425,50 +372,44 @@ func (c *Controller) updateFollower( Reconciles the follower so it references the desired leaders and has the correct placements. */ func (c *Controller) reconcileFollower( - handles *typeHandles, - qualifiedName common.QualifiedName, + ctx context.Context, + key objectGroupKindKey, ) (status worker.Result) { - c.metrics.Rate(fmt.Sprintf("follower-controller-%s.throughput", handles.name), 1) - key := qualifiedName.String() - logger := c.logger.WithValues("origin", "reconcileFollower", "type", handles.name, "key", key) - ctx := klog.NewContext(context.TODO(), logger) + c.metrics.Rate(fmt.Sprintf("follower-controller-%s.throughput", key.sourceGK.String()), 1) + ctx, keyedLogger := logging.InjectLoggerValues(ctx, "origin", "reconcileFollower", "type", key.sourceGK.String(), "key", key.ObjectSourceKey()) + startTime := time.Now() - logger.V(3).Info("Starting reconcileFollower") + keyedLogger.V(3).Info("Starting reconcileFollower") defer func() { - c.metrics.Duration(fmt.Sprintf("follower-controller-%s.latency", handles.name), startTime) - logger.WithValues("duration", time.Since(startTime), "status", status.String()).V(3).Info("Finished reconcileFollower") + c.metrics.Duration(fmt.Sprintf("follower-controller-%s.latency", key.sourceGK.String()), startTime) + keyedLogger.WithValues("duration", time.Since(startTime), "status", status.String()).V(3).Info("Finished reconcileFollower") }() follower := FollowerReference{ - GroupKind: handles.federatedGK, - Namespace: qualifiedName.Namespace, - Name: qualifiedName.Name, + GroupKind: key.sourceGK, + Namespace: key.namespace, + Name: key.sourceName, } - followerUns, err := getObjectFromStore( - handles.informer.Informer().GetStore(), - qualifiedName.String(), + followerObject, err := fedobjectadapters.GetFromLister( + c.fedObjectInformer.Lister(), + c.clusterFedObjectInformer.Lister(), + key.namespace, + key.fedName, ) - if err != nil { - logger.Error(err, "Failed to get follower object from store") + if err != nil && !apierrors.IsNotFound(err) { + keyedLogger.Error(err, "Failed to get follower object from store") return worker.StatusError } - if followerUns == nil { + if apierrors.IsNotFound(err) { // The deleted follower no longer references any leaders c.cacheObservedFromFollowers.update(follower, nil) return worker.StatusAllOK } - followerObj := &fedtypesv1a1.GenericFederatedFollower{} - err = runtime.DefaultUnstructuredConverter.FromUnstructured(followerUns.Object, followerObj) - if err != nil { - logger.Error(err, "Failed to unmarshall follower object from unstructured") - return worker.StatusAllOK // retrying won't help - } - - currentLeaders := sets.New(followerObj.Spec.Follows...) + currentLeaders := sets.New(followerObject.GetSpec().Follows...) c.cacheObservedFromFollowers.update(follower, currentLeaders) desiredLeaders := c.cacheObservedFromLeaders.reverseLookup(follower) @@ -476,9 +417,7 @@ func (c *Controller) reconcileFollower( leadersChanged := !equality.Semantic.DeepEqual(desiredLeaders, currentLeaders) updated, err := c.updateFollower( ctx, - handles, - followerUns, - followerObj, + followerObject, leadersChanged, desiredLeaders.UnsortedList(), ) @@ -486,9 +425,9 @@ func (c *Controller) reconcileFollower( if apierrors.IsConflict(err) { return worker.StatusConflict } - logger.Error(err, "Failed to update follower") + keyedLogger.Error(err, "Failed to update follower") c.eventRecorder.Eventf( - followerUns, + followerObject, corev1.EventTypeWarning, EventReasonFailedUpdateFollower, "Failed to update follower to sync with leader placements: %v", @@ -496,67 +435,59 @@ func (c *Controller) reconcileFollower( ) return worker.StatusError } else if updated { - logger.V(1).Info("Updated follower to sync with leaders") + keyedLogger.V(1).Info("Updated follower to sync with leaders") } return worker.StatusAllOK } func (c *Controller) getLeaderObj( - leader fedtypesv1a1.LeaderReference, -) (*typeHandles, *fedtypesv1a1.GenericObjectWithPlacements, error) { + leader fedcorev1a1.LeaderReference, +) (fedcorev1a1.GenericFederatedObject, error) { leaderGK := leader.GroupKind() - handles, exists := c.leaderTypeHandles[leaderGK] + _, exists := leaderPodTemplatePaths[leaderGK] if !exists { - return nil, nil, fmt.Errorf("unsupported leader type %v", leaderGK) - } - leaderQualifiedName := common.QualifiedName{Namespace: leader.Namespace, Name: leader.Name} - leaderUns, err := getObjectFromStore( - handles.informer.Informer().GetStore(), - leaderQualifiedName.String(), - ) - if err != nil { - return nil, nil, fmt.Errorf("get from store: %w", err) + return nil, fmt.Errorf("unsupported leader type %v", leaderGK) } - if leaderUns == nil { - return nil, nil, nil + + c.gkToFTCLock.RLock() + ftcName, exists := c.gkToFTCName[leaderGK] + if !exists { + return nil, fmt.Errorf("unknown leader gk %v", leaderGK) } - leaderObj, err := util.UnmarshalGenericPlacements(leaderUns) - if err != nil { - return nil, nil, fmt.Errorf("unmarshal to generic object with placements: %w", err) + leaderName := naming.GenerateFederatedObjectName(leader.Name, ftcName) + leaderObj, err := fedobjectadapters.GetFromLister( + c.fedObjectInformer.Lister(), + c.clusterFedObjectInformer.Lister(), + leader.Namespace, + leaderName, + ) + if err != nil && !apierrors.IsNotFound(err) { + return nil, fmt.Errorf("get from store: %w", err) + } + if apierrors.IsNotFound(err) { + return nil, nil } - return handles, leaderObj, nil + return leaderObj, nil } func (c *Controller) leaderPlacementUnion( - leaders []fedtypesv1a1.LeaderReference, -) (map[string]struct{}, error) { - clusters := map[string]struct{}{} + leaders []fedcorev1a1.LeaderReference, +) (sets.Set[string], error) { + clusters := sets.New[string]() for _, leader := range leaders { - _, leaderObjWithPlacement, err := c.getLeaderObj(leader) + leaderObjWithPlacement, err := c.getLeaderObj(leader) if err != nil { return nil, fmt.Errorf("get leader object %v: %w", leader, err) } if leaderObjWithPlacement == nil { continue } - for cluster := range leaderObjWithPlacement.ClusterNameUnion() { - clusters[cluster] = struct{}{} - } + + clusters = leaderObjWithPlacement.GetSpec().GetPlacementUnion().Union(clusters) } return clusters, nil } - -func getObjectFromStore(store cache.Store, key string) (*unstructured.Unstructured, error) { - obj, exists, err := store.GetByKey(key) - if err != nil { - return nil, err - } - if !exists { - return nil, nil - } - return obj.(*unstructured.Unstructured).DeepCopy(), nil -} diff --git a/pkg/controllers/follower/util.go b/pkg/controllers/follower/util.go index 99b32364..c5977419 100644 --- a/pkg/controllers/follower/util.go +++ b/pkg/controllers/follower/util.go @@ -1,4 +1,3 @@ -//go:build exclude /* Copyright 2023 The KubeAdmiral Authors. @@ -28,10 +27,22 @@ import ( "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/util/sets" + fedcorev1a1 "github.com/kubewharf/kubeadmiral/pkg/apis/core/v1alpha1" "github.com/kubewharf/kubeadmiral/pkg/controllers/common" podutil "github.com/kubewharf/kubeadmiral/pkg/lifted/kubernetes/pkg/api/v1/pod" ) +type objectGroupKindKey struct { + namespace string + fedName string + sourceName string + sourceGK schema.GroupKind +} + +func (k objectGroupKindKey) ObjectSourceKey() string { + return fmt.Sprintf("%s/%s", k.namespace, k.sourceName) +} + type FollowerReference struct { GroupKind schema.GroupKind Namespace string @@ -45,8 +56,7 @@ type followerAnnotationElement struct { } func getFollowersFromAnnotation( - fedObject *unstructured.Unstructured, - sourceToFederatedGKMap map[schema.GroupKind]schema.GroupKind, + fedObject fedcorev1a1.GenericFederatedObject, ) (sets.Set[FollowerReference], error) { annotation := fedObject.GetAnnotations()[common.FollowersAnnotation] if len(annotation) == 0 { @@ -64,12 +74,9 @@ func getFollowersFromAnnotation( Group: followerFromAnnotation.Group, Kind: followerFromAnnotation.Kind, } - federatedGK, exists := sourceToFederatedGKMap[sourceGK] - if !exists { - return nil, fmt.Errorf("no federated type config found for source type %v", sourceGK) - } + followers.Insert(FollowerReference{ - GroupKind: federatedGK, + GroupKind: sourceGK, // Only allow followers from the same namespace Namespace: fedObject.GetNamespace(), Name: followerFromAnnotation.Name, @@ -79,9 +86,8 @@ func getFollowersFromAnnotation( } func getFollowersFromPodTemplate( - fedObject *unstructured.Unstructured, + fedObject fedcorev1a1.GenericFederatedObject, podTemplatePath string, - sourceToFederatedGKMap map[schema.GroupKind]schema.GroupKind, ) (sets.Set[FollowerReference], error) { podSpec, err := getPodSpec(fedObject, podTemplatePath) if err != nil { @@ -91,72 +97,67 @@ func getFollowersFromPodTemplate( pod := &corev1.Pod{ Spec: *podSpec, } - return getFollowersFromPod(fedObject.GetNamespace(), pod, sourceToFederatedGKMap), nil + return getFollowersFromPod(fedObject.GetNamespace(), pod), nil } func getFollowersFromPod( namespace string, pod *corev1.Pod, - sourceToFederatedGKMap map[schema.GroupKind]schema.GroupKind, ) sets.Set[FollowerReference] { followers := sets.New[FollowerReference]() - if federatedSecretGK, exists := sourceToFederatedGKMap[schema.GroupKind{Kind: "Secret"}]; exists { - podutil.VisitPodSecretNames(pod, func(name string) bool { - followers.Insert(FollowerReference{ - GroupKind: federatedSecretGK, - Namespace: namespace, - Name: name, - }) - return true + podutil.VisitPodSecretNames(pod, func(name string) bool { + followers.Insert(FollowerReference{ + GroupKind: schema.GroupKind{Kind: "Secret"}, + Namespace: namespace, + Name: name, }) - } + return true + }) - if federatedConfigMapGK, exists := sourceToFederatedGKMap[schema.GroupKind{Kind: "ConfigMap"}]; exists { - podutil.VisitPodConfigmapNames(pod, func(name string) bool { - followers.Insert(FollowerReference{ - GroupKind: federatedConfigMapGK, - Namespace: namespace, - Name: name, - }) - return true + podutil.VisitPodConfigmapNames(pod, func(name string) bool { + followers.Insert(FollowerReference{ + GroupKind: schema.GroupKind{Kind: "ConfigMap"}, + Namespace: namespace, + Name: name, }) - } + return true + }) - if federatedPVCGK, exists := sourceToFederatedGKMap[schema.GroupKind{Kind: "PersistentVolumeClaim"}]; exists { - for _, vol := range pod.Spec.Volumes { - // TODO: do we need to support PVCs created from ephemeral volumes? - if vol.PersistentVolumeClaim != nil { - followers.Insert(FollowerReference{ - GroupKind: federatedPVCGK, - Namespace: namespace, - Name: vol.PersistentVolumeClaim.ClaimName, - }) - } - } - } - - if federatedSAGK, exists := sourceToFederatedGKMap[schema.GroupKind{Kind: "ServiceAccount"}]; exists { - if saName := pod.Spec.ServiceAccountName; saName != "" { + for _, vol := range pod.Spec.Volumes { + // TODO: do we need to support PVCs created from ephemeral volumes? + if vol.PersistentVolumeClaim != nil { followers.Insert(FollowerReference{ - GroupKind: federatedSAGK, + GroupKind: schema.GroupKind{Kind: "PersistentVolumeClaim"}, Namespace: namespace, - Name: saName, + Name: vol.PersistentVolumeClaim.ClaimName, }) } } + if saName := pod.Spec.ServiceAccountName; saName != "" { + followers.Insert(FollowerReference{ + GroupKind: schema.GroupKind{Kind: "ServiceAccount"}, + Namespace: namespace, + Name: saName, + }) + } + return followers } -func getPodSpec(fedObject *unstructured.Unstructured, podTemplatePath string) (*corev1.PodSpec, error) { +func getPodSpec(fedObject fedcorev1a1.GenericFederatedObject, podTemplatePath string) (*corev1.PodSpec, error) { if fedObject == nil { return nil, fmt.Errorf("fedObject is nil") } - fedObjectPodTemplatePath := append( - []string{common.SpecField, common.TemplateField}, - strings.Split(podTemplatePath, ".")...) - podTemplateMap, found, err := unstructured.NestedMap(fedObject.Object, fedObjectPodTemplatePath...) + fedObjectPodTemplatePath := strings.Split(podTemplatePath, ".") + templateObj := &unstructured.Unstructured{} + err := templateObj.UnmarshalJSON(fedObject.GetSpec().Template.Raw) + if err != nil { + return nil, err + } + + podTemplateMap, found, err := unstructured.NestedMap(templateObj.Object, fedObjectPodTemplatePath...) if err != nil { return nil, err } diff --git a/pkg/controllers/follower/util_test.go b/pkg/controllers/follower/util_test.go index e129ed4c..df46e5cb 100644 --- a/pkg/controllers/follower/util_test.go +++ b/pkg/controllers/follower/util_test.go @@ -256,7 +256,7 @@ func TestGetFollowersFromPod(t *testing.T) { } expectedNamesByGK := map[schema.GroupKind]sets.Set[string]{ - {Group: "kubeadmiral.io", Kind: "FederatedConfigMap"}: sets.New( + {Group: "", Kind: "ConfigMap"}: sets.New( "Spec.Containers[*].EnvFrom[*].ConfigMapRef", "Spec.Containers[*].Env[*].ValueFrom.ConfigMapKeyRef", "Spec.EphemeralContainers[*].EphemeralContainerCommon.EnvFrom[*].ConfigMapRef", @@ -266,7 +266,7 @@ func TestGetFollowersFromPod(t *testing.T) { "Spec.Volumes[*].VolumeSource.Projected.Sources[*].ConfigMap", "Spec.Volumes[*].VolumeSource.ConfigMap", ), - {Group: "kubeadmiral.io", Kind: "FederatedSecret"}: sets.New( + {Group: "", Kind: "Secret"}: sets.New( "Spec.Containers[*].EnvFrom[*].SecretRef", "Spec.Containers[*].Env[*].ValueFrom.SecretKeyRef", "Spec.EphemeralContainers[*].EphemeralContainerCommon.EnvFrom[*].SecretRef", @@ -287,20 +287,14 @@ func TestGetFollowersFromPod(t *testing.T) { "Spec.Volumes[*].VolumeSource.StorageOS.SecretRef", "Spec.Volumes[*].VolumeSource.CSI.NodePublishSecretRef", ), - {Group: "kubeadmiral.io", Kind: "FederatedPersistentVolumeClaim"}: sets.New( + {Group: "", Kind: "PersistentVolumeClaim"}: sets.New( "Spec.Volumes[*].VolumeSource.PersistentVolumeClaim", ), - {Group: "kubeadmiral.io", Kind: "FederatedServiceAccount"}: sets.New( + {Group: "", Kind: "ServiceAccount"}: sets.New( "Spec.ServiceAccountName", ), } - sourceToFederatedGKMap := map[schema.GroupKind]schema.GroupKind{ - {Kind: "ConfigMap"}: {Group: "kubeadmiral.io", Kind: "FederatedConfigMap"}, - {Kind: "Secret"}: {Group: "kubeadmiral.io", Kind: "FederatedSecret"}, - {Kind: "PersistentVolumeClaim"}: {Group: "kubeadmiral.io", Kind: "FederatedPersistentVolumeClaim"}, - {Kind: "ServiceAccount"}: {Group: "kubeadmiral.io", Kind: "FederatedServiceAccount"}, - } namespace := "default" expectedFollowers := sets.New[FollowerReference]() @@ -314,7 +308,7 @@ func TestGetFollowersFromPod(t *testing.T) { } } - followers := getFollowersFromPod("default", &pod, sourceToFederatedGKMap) + followers := getFollowersFromPod("default", &pod) assert := assert.New(t) assert.Equal(expectedFollowers, followers) From 099bdc3ab7d9439f1672e446ed546c9158f562e0 Mon Sep 17 00:00:00 2001 From: "zhangxinjie.next" Date: Fri, 14 Jul 2023 18:21:08 +0800 Subject: [PATCH 170/173] refactor(auto-migration): unify types and support pod trigger --- .../app/controllermanager.go | 2 + cmd/controller-manager/app/core.go | 26 ++ .../extensions_federatedtypeconfig.go | 4 + pkg/controllers/automigration/controller.go | 408 +++++++++++++----- .../automigration/plugins/deployments.go | 56 ++- .../automigration/plugins/plugins.go | 27 +- pkg/controllers/automigration/plugins/util.go | 66 +++ pkg/controllers/automigration/util.go | 52 ++- pkg/controllers/automigration/util_test.go | 68 +++ pkg/controllers/common/constants.go | 5 + .../federatedinformermanager.go | 25 ++ pkg/util/informermanager/interface.go | 48 +++ pkg/util/informermanager/podinformer.go | 15 +- 13 files changed, 640 insertions(+), 162 deletions(-) create mode 100644 pkg/controllers/automigration/plugins/util.go diff --git a/cmd/controller-manager/app/controllermanager.go b/cmd/controller-manager/app/controllermanager.go index cc8f3c13..4265c9dd 100644 --- a/cmd/controller-manager/app/controllermanager.go +++ b/cmd/controller-manager/app/controllermanager.go @@ -44,6 +44,7 @@ const ( StatusControllerName = "status" SchedulerName = "scheduler" SyncControllerName = "sync" + AutoMigrationControllerName = "auto-migration" ) var knownControllers = map[string]controllermanager.StartControllerFunc{ @@ -56,6 +57,7 @@ var knownControllers = map[string]controllermanager.StartControllerFunc{ SchedulerName: startScheduler, SyncControllerName: startSyncController, FollowerControllerName: startFollowerController, + AutoMigrationControllerName: startAutoMigrationController, } var controllersDisabledByDefault = sets.New[string]() diff --git a/cmd/controller-manager/app/core.go b/cmd/controller-manager/app/core.go index 04466923..c648a103 100644 --- a/cmd/controller-manager/app/core.go +++ b/cmd/controller-manager/app/core.go @@ -23,6 +23,7 @@ import ( "k8s.io/klog/v2" "github.com/kubewharf/kubeadmiral/pkg/controllermanager" + "github.com/kubewharf/kubeadmiral/pkg/controllers/automigration" controllercontext "github.com/kubewharf/kubeadmiral/pkg/controllers/context" "github.com/kubewharf/kubeadmiral/pkg/controllers/federate" "github.com/kubewharf/kubeadmiral/pkg/controllers/federatedcluster" @@ -261,3 +262,28 @@ func startFollowerController( return followerController, nil } + +func startAutoMigrationController( + ctx context.Context, + controllerCtx *controllercontext.Context, +) (controllermanager.Controller, error) { + autoMigrationController, err := automigration.NewAutoMigrationController( + ctx, + controllerCtx.KubeClientset, + controllerCtx.FedClientset, + controllerCtx.FedInformerFactory.Core().V1alpha1().FederatedObjects(), + controllerCtx.FedInformerFactory.Core().V1alpha1().ClusterFederatedObjects(), + controllerCtx.FederatedInformerManager, + controllerCtx.InformerManager, + controllerCtx.Metrics, + klog.Background(), + controllerCtx.WorkerCount, + ) + if err != nil { + return nil, fmt.Errorf("error creating auto-migration controller: %w", err) + } + + go autoMigrationController.Run(ctx) + + return autoMigrationController, nil +} diff --git a/pkg/apis/core/v1alpha1/extensions_federatedtypeconfig.go b/pkg/apis/core/v1alpha1/extensions_federatedtypeconfig.go index 486f8766..41e2b931 100644 --- a/pkg/apis/core/v1alpha1/extensions_federatedtypeconfig.go +++ b/pkg/apis/core/v1alpha1/extensions_federatedtypeconfig.go @@ -72,6 +72,10 @@ func (f *FederatedTypeConfig) GetStatusAggregationEnabled() bool { return f.Spec.StatusAggregation != nil && f.Spec.StatusAggregation.Enabled } +func (f *FederatedTypeConfig) GetAutoMigrationEnabled() bool { + return f.Spec.AutoMigration != nil && f.Spec.AutoMigration.Enabled +} + func (f *FederatedTypeConfig) GetPolicyRcEnabled() bool { return true // TODO: should this be configurable? } diff --git a/pkg/controllers/automigration/controller.go b/pkg/controllers/automigration/controller.go index bc08257c..d066bf4d 100644 --- a/pkg/controllers/automigration/controller.go +++ b/pkg/controllers/automigration/controller.go @@ -1,4 +1,3 @@ -//go:build exclude /* Copyright 2023 The KubeAdmiral Authors. @@ -28,29 +27,32 @@ import ( apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" - pkgruntime "k8s.io/apimachinery/pkg/runtime" - dynamicclient "k8s.io/client-go/dynamic" - "k8s.io/client-go/informers" + "k8s.io/apimachinery/pkg/runtime/schema" kubeclient "k8s.io/client-go/kubernetes" "k8s.io/client-go/tools/cache" "k8s.io/client-go/tools/record" "k8s.io/klog/v2" fedcorev1a1 "github.com/kubewharf/kubeadmiral/pkg/apis/core/v1alpha1" - "github.com/kubewharf/kubeadmiral/pkg/client/generic" + fedclient "github.com/kubewharf/kubeadmiral/pkg/client/clientset/versioned" + fedcorev1a1informers "github.com/kubewharf/kubeadmiral/pkg/client/informers/externalversions/core/v1alpha1" "github.com/kubewharf/kubeadmiral/pkg/controllers/automigration/plugins" "github.com/kubewharf/kubeadmiral/pkg/controllers/common" "github.com/kubewharf/kubeadmiral/pkg/controllers/scheduler/framework" - "github.com/kubewharf/kubeadmiral/pkg/controllers/util" - "github.com/kubewharf/kubeadmiral/pkg/controllers/util/delayingdeliver" - "github.com/kubewharf/kubeadmiral/pkg/controllers/util/eventsink" - utilunstructured "github.com/kubewharf/kubeadmiral/pkg/controllers/util/unstructured" - "github.com/kubewharf/kubeadmiral/pkg/controllers/util/worker" "github.com/kubewharf/kubeadmiral/pkg/stats" + "github.com/kubewharf/kubeadmiral/pkg/util/eventsink" + "github.com/kubewharf/kubeadmiral/pkg/util/fedobjectadapters" + "github.com/kubewharf/kubeadmiral/pkg/util/informermanager" + "github.com/kubewharf/kubeadmiral/pkg/util/logging" + "github.com/kubewharf/kubeadmiral/pkg/util/managedlabel" + "github.com/kubewharf/kubeadmiral/pkg/util/naming" + utilunstructured "github.com/kubewharf/kubeadmiral/pkg/util/unstructured" + "github.com/kubewharf/kubeadmiral/pkg/util/worker" ) const ( EventReasonAutoMigrationInfoUpdated = "AutoMigrationInfoUpdated" + AutoMigrationControllerName = "auto-migration" ) /* @@ -65,15 +67,16 @@ One way to prevent both is: */ type Controller struct { - typeConfig *fedcorev1a1.FederatedTypeConfig - name string + name string - federatedObjectClient dynamicclient.NamespaceableResourceInterface - federatedObjectInformer informers.GenericInformer + fedObjectInformer fedcorev1a1informers.FederatedObjectInformer + clusterFedObjectInformer fedcorev1a1informers.ClusterFederatedObjectInformer + federatedInformer informermanager.FederatedInformerManager + ftcManager informermanager.FederatedTypeConfigManager - federatedInformer util.FederatedInformer + fedClient fedclient.Interface - worker worker.ReconcileWorker + worker worker.ReconcileWorker[common.QualifiedName] eventRecorder record.EventRecorder @@ -87,85 +90,157 @@ func (c *Controller) IsControllerReady() bool { } func NewAutoMigrationController( - controllerConfig *util.ControllerConfig, - typeConfig *fedcorev1a1.FederatedTypeConfig, - genericFedClient generic.Client, + ctx context.Context, kubeClient kubeclient.Interface, - federatedObjectClient dynamicclient.NamespaceableResourceInterface, - federatedObjectInformer informers.GenericInformer, + fedClient fedclient.Interface, + fedObjectInformer fedcorev1a1informers.FederatedObjectInformer, + clusterFedObjectInformer fedcorev1a1informers.ClusterFederatedObjectInformer, + federatedInformer informermanager.FederatedInformerManager, + ftcManager informermanager.FederatedTypeConfigManager, + metrics stats.Metrics, + logger klog.Logger, + workerCount int, ) (*Controller, error) { - controllerName := fmt.Sprintf("%s-auto-migration", typeConfig.Name) - c := &Controller{ - typeConfig: typeConfig, - name: controllerName, + name: AutoMigrationControllerName, + + fedObjectInformer: fedObjectInformer, + clusterFedObjectInformer: clusterFedObjectInformer, + federatedInformer: federatedInformer, + ftcManager: ftcManager, - federatedObjectClient: federatedObjectClient, - federatedObjectInformer: federatedObjectInformer, + fedClient: fedClient, - metrics: controllerConfig.Metrics, - logger: klog.NewKlogr().WithValues("controller", "auto-migration", "ftc", typeConfig.Name), - eventRecorder: eventsink.NewDefederatingRecorderMux(kubeClient, controllerName, 6), + metrics: metrics, + logger: logger.WithValues("controller", AutoMigrationControllerName), + eventRecorder: eventsink.NewDefederatingRecorderMux(kubeClient, AutoMigrationControllerName, 6), } - c.worker = worker.NewReconcileWorker( + c.worker = worker.NewReconcileWorker[common.QualifiedName]( + AutoMigrationControllerName, c.reconcile, worker.RateLimiterOptions{}, - controllerConfig.WorkerCount, - controllerConfig.Metrics, - delayingdeliver.NewMetricTags("auto-migration-worker", c.typeConfig.GetFederatedType().Kind), + workerCount, + metrics, ) - federatedObjectInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{ + federatedObjectHandler := cache.ResourceEventHandlerFuncs{ // Only need to handle UnschedulableThreshold updates // Addition and deletion will be triggered by the target resources. - UpdateFunc: func(oldUntyped, newUntyped interface{}) { - oldObj, newObj := oldUntyped.(*unstructured.Unstructured), newUntyped.(*unstructured.Unstructured) + UpdateFunc: func(oldFedObj, newFedObj interface{}) { + oldObj, newObj := oldFedObj.(fedcorev1a1.GenericFederatedObject), newFedObj.(fedcorev1a1.GenericFederatedObject) oldThreshold := oldObj.GetAnnotations()[common.PodUnschedulableThresholdAnnotation] newThreshold := newObj.GetAnnotations()[common.PodUnschedulableThresholdAnnotation] if oldThreshold != newThreshold { c.worker.Enqueue(common.NewQualifiedName(newObj)) } }, + } + if _, err := c.fedObjectInformer.Informer().AddEventHandler(federatedObjectHandler); err != nil { + return nil, fmt.Errorf("failed to create federated informer: %w", err) + } + if _, err := c.clusterFedObjectInformer.Informer().AddEventHandler(federatedObjectHandler); err != nil { + return nil, fmt.Errorf("failed to create cluster federated informer: %w", err) + } + + c.federatedInformer.AddPodEventHandler(&informermanager.ResourceEventHandlerWithClusterFuncs{ + UpdateFunc: func(oldObj, newObj interface{}, cluster string) { + ctx := klog.NewContext(ctx, c.logger) + ctx, logger := logging.InjectLoggerValues(ctx, "cluster", cluster) + + newPod := newObj.(*corev1.Pod) + if newPod.GetDeletionTimestamp() != nil { + return + } + oldPod := oldObj.(*corev1.Pod) + if !podScheduledConditionChanged(oldPod, newPod) { + return + } + + qualifiedNames, err := c.getPossibleSourceObjectsFromCluster(ctx, newPod, cluster) + if err != nil { + logger.V(3).Info( + "Failed to get possible source objects form pod", + "pod", common.NewQualifiedName(newPod), + "err", err, + ) + return + } + for _, qualifiedName := range qualifiedNames { + // enqueue with a delay to simulate a rudimentary rate limiter + c.worker.EnqueueWithDelay(qualifiedName, 10*time.Second) + } + }, }) - var err error - targetType := typeConfig.GetTargetType() - c.federatedInformer, err = util.NewFederatedInformer( - controllerConfig, - genericFedClient, - controllerConfig.KubeConfig, - &targetType, - func(o pkgruntime.Object) { - // enqueue with a delay to simulate a rudimentary rate limiter - c.worker.EnqueueWithDelay(common.NewQualifiedName(o), 10*time.Second) + objectHandler := func(obj interface{}) { + // work.enqueue + targetObj := obj.(*unstructured.Unstructured) + if !targetObj.GetDeletionTimestamp().IsZero() { + return + } + gvk := targetObj.GroupVersionKind() + ftc := c.getFTCIfAutoMigrationIsEnabled(gvk) + if ftc == nil { + c.logger.V(3).Info("Auto migration is disabled", "gvk", gvk) + return + } + c.worker.EnqueueWithDelay(common.QualifiedName{ + Namespace: targetObj.GetNamespace(), + Name: naming.GenerateFederatedObjectName(targetObj.GetName(), ftc.Name), + }, 10*time.Second) + } + if err := c.federatedInformer.AddEventHandlerGenerator(&informermanager.EventHandlerGenerator{ + Predicate: informermanager.RegisterOncePredicate, + Generator: func(ftc *fedcorev1a1.FederatedTypeConfig) cache.ResourceEventHandler { + // EventHandler for target obj + return cache.ResourceEventHandlerFuncs{ + AddFunc: func(obj interface{}) { + objectHandler(obj) + }, + UpdateFunc: func(oldObj, newObj interface{}) { + objectHandler(newObj) + }, + DeleteFunc: func(obj interface{}) { + if tombstone, ok := obj.(cache.DeletedFinalStateUnknown); ok { + obj = tombstone.Obj + if obj == nil { + return + } + } + objectHandler(obj) + }, + } }, - &util.ClusterLifecycleHandlerFuncs{}, - ) - if err != nil { - return nil, fmt.Errorf("failed to create federated informer: %w", err) + }); err != nil { + return nil, fmt.Errorf("failed to create target object informer: %w", err) } return c, nil } func (c *Controller) Run(ctx context.Context) { - c.logger.Info("Starting controller") - defer c.logger.Info("Stopping controller") + ctx, logger := logging.InjectLogger(ctx, c.logger) - c.federatedInformer.Start() - defer c.federatedInformer.Stop() + logger.Info("Starting controller") + defer logger.Info("Stopping controller") - if !cache.WaitForNamedCacheSync(c.name, ctx.Done(), c.HasSynced) { + if !cache.WaitForNamedCacheSync(AutoMigrationControllerName, ctx.Done(), c.HasSynced) { + logger.Error(nil, "Timed out waiting for cache sync") return } - c.worker.Run(ctx.Done()) + logger.Info("Caches are synced") + + c.worker.Run(ctx) <-ctx.Done() } func (c *Controller) HasSynced() bool { - if !c.federatedObjectInformer.Informer().HasSynced() || !c.federatedInformer.ClustersSynced() { + if !c.fedObjectInformer.Informer().HasSynced() || + !c.clusterFedObjectInformer.Informer().HasSynced() || + !c.federatedInformer.HasSynced() || + !c.ftcManager.HasSynced() { return false } @@ -176,39 +251,39 @@ func (c *Controller) HasSynced() bool { return true } -func (c *Controller) reconcile(qualifiedName common.QualifiedName) (status worker.Result) { +func (c *Controller) reconcile(ctx context.Context, qualifiedName common.QualifiedName) (status worker.Result) { key := qualifiedName.String() - keyedLogger := c.logger.WithValues("control-loop", "reconcile", "object", key) - ctx := klog.NewContext(context.TODO(), keyedLogger) + ctx, keyedLogger := logging.InjectLoggerValues(ctx, "control-loop", "reconcile", "object", key) startTime := time.Now() c.metrics.Rate("auto-migration.throughput", 1) keyedLogger.V(3).Info("Start reconcile") defer func() { c.metrics.Duration(fmt.Sprintf("%s.latency", c.name), startTime) - keyedLogger.V(3).Info("Finished reconcile", "duration", time.Since(startTime), "status", status.String()) + keyedLogger.V(3).Info("Finished reconcile", "duration", time.Since(startTime), "status", status) }() - fedObject, err := util.UnstructuredFromStore(c.federatedObjectInformer.Informer().GetStore(), key) + fedObject, err := fedobjectadapters.GetFromLister( + c.fedObjectInformer.Lister(), + c.clusterFedObjectInformer.Lister(), + qualifiedName.Namespace, + qualifiedName.Name, + ) if err != nil { - keyedLogger.Error(err, "Failed to get object from store") + keyedLogger.Error(err, "Failed to get federated object from store") return worker.StatusError } if fedObject == nil || fedObject.GetDeletionTimestamp() != nil { return worker.StatusAllOK } + fedObject = fedObject.DeepCopyGenericFederatedObject() - // PodUnschedulableThresholdAnnotation is set by the scheduler. Its presence determines whether auto migration is enabled. annotations := fedObject.GetAnnotations() - var unschedulableThreshold *time.Duration - if value, exists := annotations[common.PodUnschedulableThresholdAnnotation]; exists { - if duration, err := time.ParseDuration(value); err != nil { - keyedLogger.Error(err, "Failed to parse PodUnschedulableThresholdAnnotation") - } else { - unschedulableThreshold = &duration - } + clusterObjs, ftc, unschedulableThreshold, err := c.getTargetObjectsIfAutoMigrationEnabled(fedObject) + if err != nil { + keyedLogger.Error(err, "Failed to get objects from federated informer stores") + return worker.StatusError } - // auto-migration controller sets AutoMigrationAnnotation to // feedback auto-migration information back to the scheduler @@ -226,13 +301,7 @@ func (c *Controller) reconcile(qualifiedName common.QualifiedName) (status worke } else { // Keep the annotation up-to-date if auto migration is enabled. keyedLogger.V(3).Info("Auto migration is enabled") - clusterObjs, err := c.federatedInformer.GetTargetStore().GetFromAllClusters(key) - if err != nil { - keyedLogger.Error(err, "Failed to get objects from federated informer stores") - return worker.StatusError - } - - estimatedCapacity, result = c.estimateCapacity(ctx, clusterObjs, *unschedulableThreshold) + estimatedCapacity, result = c.estimateCapacity(ctx, ftc, clusterObjs, *unschedulableThreshold) autoMigrationInfo := &framework.AutoMigrationInfo{EstimatedCapacity: estimatedCapacity} // Compare with the existing autoMigration annotation @@ -258,14 +327,15 @@ func (c *Controller) reconcile(qualifiedName common.QualifiedName) (status worke keyedLogger.V(3).Info("Observed migration information", "estimatedCapacity", estimatedCapacity) if needsUpdate { - fedObject = fedObject.DeepCopy() fedObject.SetAnnotations(annotations) keyedLogger.V(1).Info("Updating federated object with auto migration information", "estimatedCapacity", estimatedCapacity) - _, err = c.federatedObjectClient. - Namespace(qualifiedName.Namespace). - Update(ctx, fedObject, metav1.UpdateOptions{}) - if err != nil { + if _, err = fedobjectadapters.Update( + ctx, + c.fedClient.CoreV1alpha1(), + fedObject, + metav1.UpdateOptions{}, + ); err != nil { keyedLogger.Error(err, "Failed to update federated object for auto migration") if apierrors.IsConflict(err) { return worker.StatusConflict @@ -292,38 +362,35 @@ func (c *Controller) reconcile(qualifiedName common.QualifiedName) (status worke func (c *Controller) estimateCapacity( ctx context.Context, - clusterObjs []util.FederatedObject, + typeConfig *fedcorev1a1.FederatedTypeConfig, + clusterObjs []FederatedObject, unschedulableThreshold time.Duration, ) (map[string]int64, *worker.Result) { - keyedLogger := klog.FromContext(ctx) needsBackoff := false var retryAfter *time.Duration estimatedCapacity := make(map[string]int64, len(clusterObjs)) for _, clusterObj := range clusterObjs { - keyedLogger := keyedLogger.WithValues("cluster", clusterObj.ClusterName) - ctx := klog.NewContext(ctx, keyedLogger) - - unsClusterObj := clusterObj.Object.(*unstructured.Unstructured) + ctx, logger := logging.InjectLoggerValues(ctx, "cluster", clusterObj.ClusterName, "ftc", typeConfig.Name) // This is an optimization to skip pod listing when there are no unschedulable pods. - totalReplicas, readyReplicas, err := c.getTotalAndReadyReplicas(unsClusterObj) + totalReplicas, readyReplicas, err := c.getTotalAndReadyReplicas(typeConfig, clusterObj.Object) if err == nil && totalReplicas == readyReplicas { - keyedLogger.V(3).Info("No unschedulable pods found, skip estimating capacity") + logger.V(3).Info("No unschedulable pods found, skip estimating capacity") continue } - desiredReplicas, err := c.getDesiredReplicas(unsClusterObj) + desiredReplicas, err := c.getDesiredReplicas(typeConfig, clusterObj.Object) if err != nil { - keyedLogger.Error(err, "Failed to get desired replicas from object") + logger.Error(err, "Failed to get desired replicas from object") continue } - keyedLogger.V(2).Info("Getting pods from cluster") - pods, clusterNeedsBackoff, err := c.getPodsFromCluster(ctx, unsClusterObj, clusterObj.ClusterName) + logger.V(2).Info("Getting pods from cluster") + pods, clusterNeedsBackoff, err := c.getPodsFromCluster(ctx, typeConfig, clusterObj.Object, clusterObj.ClusterName) if err != nil { - keyedLogger.Error(err, "Failed to get pods from cluster") + logger.Error(err, "Failed to get pods from cluster") if clusterNeedsBackoff { needsBackoff = true } @@ -331,6 +398,15 @@ func (c *Controller) estimateCapacity( } unschedulable, nextCrossIn := countUnschedulablePods(pods, time.Now(), unschedulableThreshold) + logger.V(2).Info("Analyzed pods", + "total", len(pods), + "desired", desiredReplicas, + "unschedulable", unschedulable, + ) + + if nextCrossIn != nil && (retryAfter == nil || *nextCrossIn < *retryAfter) { + retryAfter = nextCrossIn + } var clusterEstimatedCapacity int64 if len(pods) >= int(desiredReplicas) { @@ -354,16 +430,6 @@ func (c *Controller) estimateCapacity( } estimatedCapacity[clusterObj.ClusterName] = clusterEstimatedCapacity - - keyedLogger.V(2).Info("Analyzed pods", - "total", len(pods), - "desired", desiredReplicas, - "unschedulable", unschedulable, - ) - - if nextCrossIn != nil && (retryAfter == nil || *nextCrossIn < *retryAfter) { - retryAfter = nextCrossIn - } } var result *worker.Result @@ -378,13 +444,14 @@ func (c *Controller) estimateCapacity( } func (c *Controller) getTotalAndReadyReplicas( + typeConfig *fedcorev1a1.FederatedTypeConfig, unsObj *unstructured.Unstructured, ) (int64, int64, error) { // These values might not have been populated by the controller, in which case we default to 0 totalReplicas := int64(0) if replicasPtr, err := utilunstructured.GetInt64FromPath( - unsObj, c.typeConfig.Spec.PathDefinition.ReplicasStatus, nil, + unsObj, typeConfig.Spec.PathDefinition.ReplicasStatus, nil, ); err != nil { return 0, 0, fmt.Errorf("replicas: %w", err) } else if replicasPtr != nil { @@ -393,7 +460,7 @@ func (c *Controller) getTotalAndReadyReplicas( readyReplicas := int64(0) if readyReplicasPtr, err := utilunstructured.GetInt64FromPath( - unsObj, c.typeConfig.Spec.PathDefinition.ReadyReplicasStatus, nil, + unsObj, typeConfig.Spec.PathDefinition.ReadyReplicasStatus, nil, ); err != nil { return 0, 0, fmt.Errorf("ready replicas: %w", err) } else if readyReplicasPtr != nil { @@ -403,12 +470,15 @@ func (c *Controller) getTotalAndReadyReplicas( return totalReplicas, readyReplicas, nil } -func (c *Controller) getDesiredReplicas(unsObj *unstructured.Unstructured) (int64, error) { - desiredReplicas, err := utilunstructured.GetInt64FromPath(unsObj, c.typeConfig.Spec.PathDefinition.ReplicasSpec, nil) +func (c *Controller) getDesiredReplicas( + typeConfig *fedcorev1a1.FederatedTypeConfig, + unsObj *unstructured.Unstructured, +) (int64, error) { + desiredReplicas, err := utilunstructured.GetInt64FromPath(unsObj, typeConfig.Spec.PathDefinition.ReplicasSpec, nil) if err != nil { return 0, fmt.Errorf("desired replicas: %w", err) } else if desiredReplicas == nil { - return 0, fmt.Errorf("no desired replicas at %s", c.typeConfig.Spec.PathDefinition.ReplicasSpec) + return 0, fmt.Errorf("no desired replicas at %s", typeConfig.Spec.PathDefinition.ReplicasSpec) } return *desiredReplicas, nil @@ -416,16 +486,17 @@ func (c *Controller) getDesiredReplicas(unsObj *unstructured.Unstructured) (int6 func (c *Controller) getPodsFromCluster( ctx context.Context, + typeConfig *fedcorev1a1.FederatedTypeConfig, unsClusterObj *unstructured.Unstructured, clusterName string, ) ([]*corev1.Pod, bool, error) { - plugin, err := plugins.ResolvePlugin(c.typeConfig) + plugin, err := plugins.ResolvePlugin(typeConfig.GetSourceTypeGVK()) if err != nil { return nil, false, fmt.Errorf("failed to get plugin for FTC: %w", err) } - client, err := c.federatedInformer.GetClientForCluster(clusterName) - if err != nil { + client, exist := c.federatedInformer.GetClusterDynamicClient(clusterName) + if !exist { return nil, true, fmt.Errorf("failed to get client for cluster: %w", err) } @@ -438,3 +509,108 @@ func (c *Controller) getPodsFromCluster( return pods, false, nil } + +func (c *Controller) getPossibleSourceObjectsFromCluster( + ctx context.Context, + pod *corev1.Pod, + clusterName string, +) (possibleQualifies []common.QualifiedName, err error) { + client, exist := c.federatedInformer.GetClusterDynamicClient(clusterName) + if !exist { + return nil, fmt.Errorf("failed to get client for cluster %s", clusterName) + } + + for gvk, plugin := range plugins.NativePlugins { + ctx, logger := logging.InjectLoggerValues(ctx, "gvk", gvk) + ftc := c.getFTCIfAutoMigrationIsEnabled(gvk) + if ftc == nil { + continue + } + object, found, err := plugin.GetTargetObjectFromPod(ctx, pod.DeepCopy(), plugins.ClusterHandle{ + Client: client, + }) + if err != nil || !found { + logger.V(3).Info( + "Failed to get target object form pod", + "found", found, + "err", err, + ) + continue + } + managed := object.GetLabels()[managedlabel.ManagedByKubeAdmiralLabelKey] == managedlabel.ManagedByKubeAdmiralLabelValue + gkMatched := object.GroupVersionKind().GroupKind() == gvk.GroupKind() + if !managed || !gkMatched { + c.logger.V(3).Info( + "The GVK of Target object not matched", + "got-gvk", object.GroupVersionKind(), + "managed", managed, + "resource", common.NewQualifiedName(object), + ) + continue + } + possibleQualifies = append(possibleQualifies, common.QualifiedName{ + Namespace: object.GetNamespace(), + Name: naming.GenerateFederatedObjectName(object.GetName(), ftc.Name), + }) + } + return possibleQualifies, nil +} + +func (c *Controller) getTargetObjectsIfAutoMigrationEnabled( + fedObject fedcorev1a1.GenericFederatedObject, +) (clusterObjs []FederatedObject, ftc *fedcorev1a1.FederatedTypeConfig, unschedulableThreshold *time.Duration, err error) { + // PodUnschedulableThresholdAnnotation is set by the scheduler. Its presence determines whether auto migration is enabled. + if value, exists := fedObject.GetAnnotations()[common.PodUnschedulableThresholdAnnotation]; exists { + if duration, err := time.ParseDuration(value); err != nil { + err = fmt.Errorf("failed to parse PodUnschedulableThresholdAnnotation: %w", err) + return nil, nil, nil, err + } else { + unschedulableThreshold = &duration + } + } + + objectMeta := &metav1.PartialObjectMetadata{} + if err = json.Unmarshal(fedObject.GetSpec().Template.Raw, objectMeta); err != nil { + err = fmt.Errorf("failed to unmarshall template of federated object: %w", err) + return nil, nil, nil, err + } + gvk := objectMeta.GroupVersionKind() + + ftc = c.getFTCIfAutoMigrationIsEnabled(gvk) + if ftc == nil { + return nil, nil, nil, nil + } + + for _, placement := range fedObject.GetSpec().Placements { + for _, cluster := range placement.Placement { + lister, synced, exist := c.federatedInformer.GetResourceLister(gvk, cluster.Cluster) + if !exist || !synced() { + err = fmt.Errorf("informer of resource %v not exists or not synced for cluster %s", gvk, cluster.Cluster) + return nil, nil, nil, err + } + object, err := lister.ByNamespace(objectMeta.Namespace).Get(objectMeta.Name) + if err != nil && !apierrors.IsNotFound(err) { + err = fmt.Errorf("failed to get %v from informer stores for cluster %s: %w", objectMeta, cluster.Cluster, err) + return nil, nil, nil, err + } + if apierrors.IsNotFound(err) { + continue + } + unsObj, ok := object.(*unstructured.Unstructured) + if !ok { + continue + } + clusterObjs = append(clusterObjs, FederatedObject{Object: unsObj, ClusterName: cluster.Cluster}) + } + } + return clusterObjs, ftc, unschedulableThreshold, nil +} + +func (c *Controller) getFTCIfAutoMigrationIsEnabled(gvk schema.GroupVersionKind) *fedcorev1a1.FederatedTypeConfig { + typeConfig, exists := c.ftcManager.GetResourceFTC(gvk) + if !exists || typeConfig == nil || !typeConfig.GetAutoMigrationEnabled() { + return nil + } + + return typeConfig +} diff --git a/pkg/controllers/automigration/plugins/deployments.go b/pkg/controllers/automigration/plugins/deployments.go index 5e3a6b8b..21768842 100644 --- a/pkg/controllers/automigration/plugins/deployments.go +++ b/pkg/controllers/automigration/plugins/deployments.go @@ -1,4 +1,3 @@ -//go:build exclude /* Copyright 2023 The KubeAdmiral Authors. @@ -25,8 +24,8 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime" - "sigs.k8s.io/controller-runtime/pkg/client" + "github.com/kubewharf/kubeadmiral/pkg/controllers/common" deploymentutil "github.com/kubewharf/kubeadmiral/pkg/lifted/kubernetes/pkg/controller/deployment/util" ) @@ -44,11 +43,16 @@ func (*deploymentPlugin) GetPodsForClusterObject( rsList, err := deploymentutil.ListReplicaSets(deployment, func(ns string, opts metav1.ListOptions) ([]*appsv1.ReplicaSet, error) { rsList := &appsv1.ReplicaSetList{} - listOpts, err := convertListOptions(ns, &opts) + opts = *opts.DeepCopy() + opts.ResourceVersion = "0" // list from watch cache + unsRsList, err := handle.Client. + Resource(common.ReplicaSetGVR). + Namespace(ns). + List(ctx, opts) if err != nil { return nil, err } - if err := handle.Client.ListWithOptions(ctx, rsList, listOpts); err != nil { + if err = runtime.DefaultUnstructuredConverter.FromUnstructured(unsRsList.Object, rsList); err != nil { return nil, err } ret := []*appsv1.ReplicaSet{} @@ -71,11 +75,19 @@ func (*deploymentPlugin) GetPodsForClusterObject( []*appsv1.ReplicaSet{newRS}, func(ns string, opts metav1.ListOptions) (*corev1.PodList, error) { podList := &corev1.PodList{} - listOpts, err := convertListOptions(ns, &opts) + opts = *opts.DeepCopy() + opts.ResourceVersion = "0" // list from watch cache if err != nil { return nil, err } - if err := handle.Client.ListWithOptions(ctx, podList, listOpts); err != nil { + unsPodList, err := handle.Client. + Resource(common.PodGVR). + Namespace(ns). + List(ctx, opts) + if err != nil { + return nil, err + } + if err = runtime.DefaultUnstructuredConverter.FromUnstructured(unsPodList.Object, podList); err != nil { return nil, err } return podList, nil @@ -93,15 +105,27 @@ func (*deploymentPlugin) GetPodsForClusterObject( return ret, nil } -var _ Plugin = &deploymentPlugin{} - -func convertListOptions(ns string, opts *metav1.ListOptions) (*client.ListOptions, error) { - opts = opts.DeepCopy() - // list from watch cache - opts.ResourceVersion = "0" +func (*deploymentPlugin) GetTargetObjectFromPod( + ctx context.Context, + pod *corev1.Pod, + handle ClusterHandle, +) (obj *unstructured.Unstructured, found bool, err error) { + rs, found, err := GetSpecifiedOwnerFromObj(ctx, handle.Client, pod, metav1.APIResource{ + Name: "replicasets", + Group: appsv1.GroupName, + Version: "v1", + Kind: common.ReplicaSetKind, + }) + if err != nil || !found { + return nil, false, err + } - return &client.ListOptions{ - Namespace: ns, - Raw: opts, - }, nil + return GetSpecifiedOwnerFromObj(ctx, handle.Client, rs, metav1.APIResource{ + Name: "deployments", + Group: appsv1.GroupName, + Version: "v1", + Kind: common.DeploymentKind, + }) } + +var _ Plugin = &deploymentPlugin{} diff --git a/pkg/controllers/automigration/plugins/plugins.go b/pkg/controllers/automigration/plugins/plugins.go index 4699709f..8143e4ec 100644 --- a/pkg/controllers/automigration/plugins/plugins.go +++ b/pkg/controllers/automigration/plugins/plugins.go @@ -1,4 +1,3 @@ -//go:build exclude /* Copyright 2023 The KubeAdmiral Authors. @@ -21,18 +20,17 @@ import ( "context" "fmt" + appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/client-go/dynamic" - fedcorev1a1 "github.com/kubewharf/kubeadmiral/pkg/apis/core/v1alpha1" - "github.com/kubewharf/kubeadmiral/pkg/client/generic" "github.com/kubewharf/kubeadmiral/pkg/controllers/common" - schemautil "github.com/kubewharf/kubeadmiral/pkg/controllers/util/schema" ) type ClusterHandle struct { - Client generic.Client + Client dynamic.Interface } type Plugin interface { @@ -41,19 +39,22 @@ type Plugin interface { obj *unstructured.Unstructured, handle ClusterHandle, ) ([]*corev1.Pod, error) -} -var nativePlugins = map[schema.GroupVersionResource]Plugin{ - common.DeploymentGVR: &deploymentPlugin{}, + GetTargetObjectFromPod( + ctx context.Context, + pod *corev1.Pod, + handle ClusterHandle, + ) (obj *unstructured.Unstructured, found bool, err error) } -func ResolvePlugin(typeConfig *fedcorev1a1.FederatedTypeConfig) (Plugin, error) { - targetAPIResource := typeConfig.GetTargetType() - targetGVR := schemautil.APIResourceToGVR(&targetAPIResource) +var NativePlugins = map[schema.GroupVersionKind]Plugin{ + appsv1.SchemeGroupVersion.WithKind(common.DeploymentKind): &deploymentPlugin{}, +} - if plugin, exists := nativePlugins[targetGVR]; exists { +func ResolvePlugin(gvk schema.GroupVersionKind) (Plugin, error) { + if plugin, exists := NativePlugins[gvk]; exists { return plugin, nil } - return nil, fmt.Errorf("unsupported type %s", targetGVR.String()) + return nil, fmt.Errorf("unsupported type %s", gvk.String()) } diff --git a/pkg/controllers/automigration/plugins/util.go b/pkg/controllers/automigration/plugins/util.go new file mode 100644 index 00000000..081ff9a9 --- /dev/null +++ b/pkg/controllers/automigration/plugins/util.go @@ -0,0 +1,66 @@ +/* +Copyright 2023 The KubeAdmiral Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package plugins + +import ( + "context" + + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/client-go/dynamic" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +// GetSpecifiedOwnerFromObj returns the owner of the given object matches the APIResource. +func GetSpecifiedOwnerFromObj( + ctx context.Context, + client dynamic.Interface, + obj client.Object, + ownerAPIResource metav1.APIResource, +) (ownerObj *unstructured.Unstructured, found bool, err error) { + gv := schema.GroupVersion{ + Group: ownerAPIResource.Group, + Version: ownerAPIResource.Version, + } + var owner *metav1.OwnerReference + ownerReferences := obj.GetOwnerReferences() + for i, o := range ownerReferences { + if o.APIVersion == gv.String() && o.Kind == ownerAPIResource.Kind { + owner = &ownerReferences[i] + break + } + } + if owner == nil || client == nil { + return nil, false, nil + } + + ownerObj, err = client.Resource(schema.GroupVersionResource{ + Group: ownerAPIResource.Group, + Version: ownerAPIResource.Version, + Resource: ownerAPIResource.Name, + }).Namespace(obj.GetNamespace()).Get(ctx, owner.Name, metav1.GetOptions{}) + if err != nil { + if apierrors.IsNotFound(err) { + return nil, false, nil + } + return nil, false, err + } + + return ownerObj, true, nil +} diff --git a/pkg/controllers/automigration/util.go b/pkg/controllers/automigration/util.go index 55a20a94..5eb5970f 100644 --- a/pkg/controllers/automigration/util.go +++ b/pkg/controllers/automigration/util.go @@ -1,4 +1,3 @@ -//go:build exclude /* Copyright 2023 The KubeAdmiral Authors. @@ -21,6 +20,7 @@ import ( "time" corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" ) // Returns the number of unschedulable pods that remain @@ -37,17 +37,8 @@ func countUnschedulablePods( continue } - var scheduledCondition *corev1.PodCondition - for i := range pod.Status.Conditions { - condition := &pod.Status.Conditions[i] - if condition.Type == corev1.PodScheduled { - scheduledCondition = condition - break - } - } - if scheduledCondition == nil || - scheduledCondition.Status != corev1.ConditionFalse || - scheduledCondition.Reason != corev1.PodReasonUnschedulable { + scheduledCondition, isUnschedulable := getPodScheduledCondition(pod) + if !isUnschedulable { continue } @@ -63,3 +54,40 @@ func countUnschedulablePods( return unschedulableCount, nextCrossIn } + +func getPodScheduledCondition(pod *corev1.Pod) (scheduledCondition *corev1.PodCondition, isUnschedulable bool) { + for i := range pod.Status.Conditions { + condition := &pod.Status.Conditions[i] + if condition.Type == corev1.PodScheduled { + scheduledCondition = condition + break + } + } + if scheduledCondition == nil || + scheduledCondition.Status != corev1.ConditionFalse || + scheduledCondition.Reason != corev1.PodReasonUnschedulable { + return scheduledCondition, false + } + return scheduledCondition, true +} + +func podScheduledConditionChanged(oldPod, newPod *corev1.Pod) bool { + condition, _ := getPodScheduledCondition(newPod) + oldCondition, _ := getPodScheduledCondition(oldPod) + if condition == nil || oldCondition == nil { + return condition != oldCondition + } + + isEqual := condition.Status == oldCondition.Status && + condition.Reason == oldCondition.Reason && + condition.Message == oldCondition.Message && + condition.LastProbeTime.Equal(&oldCondition.LastProbeTime) && + condition.LastTransitionTime.Equal(&oldCondition.LastTransitionTime) + return !isEqual +} + +// An object with an origin information. +type FederatedObject struct { + Object *unstructured.Unstructured + ClusterName string +} diff --git a/pkg/controllers/automigration/util_test.go b/pkg/controllers/automigration/util_test.go index 4adf892b..4ee1be6a 100644 --- a/pkg/controllers/automigration/util_test.go +++ b/pkg/controllers/automigration/util_test.go @@ -120,3 +120,71 @@ func newPod(terminating bool, schedulable bool, lastTransitionTimestamp time.Tim } return pod } + +func Test_podScheduledConditionChanged(t *testing.T) { + now := time.Now() + podWithEmptyCond := newPod(false, false, now) + podWithEmptyCond.Status.Conditions = nil + + tests := []struct { + name string + oldPod *corev1.Pod + newPod *corev1.Pod + want bool + }{ + { + name: "both nil", + oldPod: podWithEmptyCond, + newPod: podWithEmptyCond, + want: false, + }, + { + name: "oldPod condition is nil", + oldPod: podWithEmptyCond, + newPod: newPod(false, false, now), + want: true, + }, + { + name: "newPod condition is nil", + oldPod: newPod(false, false, now), + newPod: podWithEmptyCond, + want: true, + }, + { + name: "unschedulable condition equal", + oldPod: newPod(false, false, now), + newPod: newPod(false, false, now), + want: false, + }, + { + name: "unschedulable condition not equal", + oldPod: newPod(false, false, now.Add(10*time.Second)), + newPod: newPod(false, false, now), + want: true, + }, + { + name: "schedulable condition equal", + oldPod: newPod(false, true, now), + newPod: newPod(false, true, now), + want: false, + }, + { + name: "schedulable condition not equal", + oldPod: newPod(false, true, now.Add(10*time.Second)), + newPod: newPod(false, true, now), + want: true, + }, + { + name: "schedulable and unschedulable conditions", + oldPod: newPod(false, true, now), + newPod: newPod(false, false, now), + want: true, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + assert.Equalf(t, tt.want, podScheduledConditionChanged(tt.oldPod, tt.newPod), + "podScheduledConditionChanged(%v, %v)", tt.oldPod, tt.newPod) + }) + } +} diff --git a/pkg/controllers/common/constants.go b/pkg/controllers/common/constants.go index 49404b97..08881f08 100644 --- a/pkg/controllers/common/constants.go +++ b/pkg/controllers/common/constants.go @@ -140,6 +140,8 @@ const ( DaemonSetResource = "daemonsets" ConfigMapResource = "configmaps" SecretResource = "secrets" + PodResource = "pods" + ReplicaSetResource = "replicasets" NamespaceKind = "Namespace" DeploymentKind = "Deployment" @@ -155,6 +157,7 @@ const ( PersistentVolumeKind = "PersistentVolume" PersistentVolumeClaimKind = "PersistentVolumeClaim" PodKind = "Pod" + ReplicaSetKind = "ReplicaSet" ) var ( @@ -171,9 +174,11 @@ var ( NamespaceGVR = corev1.SchemeGroupVersion.WithResource(NamespaceResource) ConfigMapGVR = corev1.SchemeGroupVersion.WithResource(ConfigMapResource) SecretGVR = corev1.SchemeGroupVersion.WithResource(SecretResource) + PodGVR = corev1.SchemeGroupVersion.WithResource(PodResource) DeploymentGVR = appsv1.SchemeGroupVersion.WithResource(DeploymentResource) DaemonSetGVR = appsv1.SchemeGroupVersion.WithResource(DaemonSetResource) + ReplicaSetGVR = appsv1.SchemeGroupVersion.WithResource(ReplicaSetResource) ) // MaxFederatedObjectNameLength defines the max length of a federated object name. diff --git a/pkg/util/informermanager/federatedinformermanager.go b/pkg/util/informermanager/federatedinformermanager.go index 9a5fd79c..131ccc43 100644 --- a/pkg/util/informermanager/federatedinformermanager.go +++ b/pkg/util/informermanager/federatedinformermanager.go @@ -77,6 +77,9 @@ type federatedInformerManager struct { queue workqueue.RateLimitingInterface podListerSemaphore *semaphore.Weighted initialClusters sets.Set[string] + + podEventHandlers []*ResourceEventHandlerWithClusterFuncs + podEventRegistrations map[string]map[*ResourceEventHandlerWithClusterFuncs]cache.ResourceEventHandlerRegistration } func NewFederatedInformerManager( @@ -102,6 +105,8 @@ func NewFederatedInformerManager( queue: workqueue.NewRateLimitingQueue(workqueue.DefaultItemBasedRateLimiter()), podListerSemaphore: semaphore.NewWeighted(3), // TODO: make this configurable initialClusters: sets.New[string](), + podEventHandlers: []*ResourceEventHandlerWithClusterFuncs{}, + podEventRegistrations: map[string]map[*ResourceEventHandlerWithClusterFuncs]cache.ResourceEventHandlerRegistration{}, } clusterInformer.Informer().AddEventHandler(cache.FilteringResourceEventHandler{ @@ -264,6 +269,7 @@ func (m *federatedInformerManager) processCluster( m.clusterCancelFuncs[clusterName] = cancel m.informerManagers[clusterName] = manager m.informerFactories[clusterName] = factory + m.podEventRegistrations[clusterName] = map[*ResourceEventHandlerWithClusterFuncs]cache.ResourceEventHandlerRegistration{} } if m.initialClusters.Has(cluster.Name) { @@ -276,6 +282,17 @@ func (m *federatedInformerManager) processCluster( } } + registrations := m.podEventRegistrations[clusterName] + factory := m.informerFactories[clusterName] + for _, handler := range m.podEventHandlers { + if registrations[handler] == nil { + copied := handler.copyWithClusterName(clusterName) + if r, err := factory.Core().V1().Pods().Informer().AddEventHandler(copied); err == nil { + registrations[handler] = r + } + } + } + return false, 0, nil } @@ -297,6 +314,7 @@ func (m *federatedInformerManager) processClusterDeletionUnlocked(ctx context.Co delete(m.informerManagers, clusterName) delete(m.informerFactories, clusterName) delete(m.clusterCancelFuncs, clusterName) + delete(m.podEventRegistrations, clusterName) m.initialClusters.Delete(clusterName) @@ -395,6 +413,13 @@ func (m *federatedInformerManager) GetNodeLister( return factory.Core().V1().Nodes().Lister(), factory.Core().V1().Nodes().Informer().HasSynced, true } +func (m *federatedInformerManager) AddPodEventHandler(handler *ResourceEventHandlerWithClusterFuncs) { + m.lock.Lock() + defer m.lock.Unlock() + + m.podEventHandlers = append(m.podEventHandlers, handler) +} + func (m *federatedInformerManager) GetPodLister( cluster string, ) (lister v1.PodLister, informerSynced cache.InformerSynced, exists bool) { diff --git a/pkg/util/informermanager/interface.go b/pkg/util/informermanager/interface.go index 185e8b70..55342d10 100644 --- a/pkg/util/informermanager/interface.go +++ b/pkg/util/informermanager/interface.go @@ -42,6 +42,52 @@ type EventHandlerGenerator struct { Generator func(ftc *fedcorev1a1.FederatedTypeConfig) cache.ResourceEventHandler } +// ResourceEventHandlerWithClusterFuncs is an adaptor to let you easily specify as many or +// as few of the notification functions as you want while still implementing +// ResourceEventHandler. This adapter does not remove the prohibition against +// modifying the objects. +type ResourceEventHandlerWithClusterFuncs struct { + clusterName string + + AddFunc func(obj interface{}, cluster string) + UpdateFunc func(oldObj, newObj interface{}, cluster string) + DeleteFunc func(obj interface{}, cluster string) +} + +// OnAdd calls AddFunc if it's not nil. +func (p *ResourceEventHandlerWithClusterFuncs) OnAdd(obj interface{}) { + if p.AddFunc != nil { + p.AddFunc(obj, p.clusterName) + } +} + +// OnUpdate calls UpdateFunc if it's not nil. +func (p *ResourceEventHandlerWithClusterFuncs) OnUpdate(oldObj, newObj interface{}) { + if p.UpdateFunc != nil { + p.UpdateFunc(oldObj, newObj, p.clusterName) + } +} + +// OnDelete calls DeleteFunc if it's not nil. +func (p *ResourceEventHandlerWithClusterFuncs) OnDelete(obj interface{}) { + if p.DeleteFunc != nil { + p.DeleteFunc(obj, p.clusterName) + } +} + +// copyWithClusterName returns a copy of ResourceEventHandlerWithClusterFuncs with given cluster name +func (p *ResourceEventHandlerWithClusterFuncs) copyWithClusterName( + clusterName string, +) *ResourceEventHandlerWithClusterFuncs { + return &ResourceEventHandlerWithClusterFuncs{ + clusterName: clusterName, + + AddFunc: p.AddFunc, + UpdateFunc: p.UpdateFunc, + DeleteFunc: p.DeleteFunc, + } +} + // FTCUpdateHandler is called by InformerManager each time it finishes processing an FTC. This allows controllers to // hook into the InformerManager's view of an FTC's lifecycle. When a new FTC is observed, lastObserved will be nil. // When a FTC deletion is observed, latest will be nil. @@ -121,6 +167,8 @@ type FederatedInformerManager interface { // Returns a kubernetes client for the given cluster if it exists. The client for each cluster will eventually exist. GetClusterKubeClient(cluster string) (client kubernetes.Interface, exists bool) + // Register EventHandlers for each pod informer of cluster. + AddPodEventHandler(handler *ResourceEventHandlerWithClusterFuncs) GetPodLister(cluster string) (lister corev1listers.PodLister, informerSynced cache.InformerSynced, exists bool) GetNodeLister(cluster string) (lister corev1listers.NodeLister, informerSynced cache.InformerSynced, exists bool) diff --git a/pkg/util/informermanager/podinformer.go b/pkg/util/informermanager/podinformer.go index c49da4b5..d797a7dc 100644 --- a/pkg/util/informermanager/podinformer.go +++ b/pkg/util/informermanager/podinformer.go @@ -125,11 +125,12 @@ func prunePod(pod *corev1.Pod) { } *pod = corev1.Pod{ ObjectMeta: metav1.ObjectMeta{ - Name: pod.Name, - Namespace: pod.Namespace, - Generation: pod.Generation, - ResourceVersion: pod.ResourceVersion, - UID: pod.UID, + Name: pod.Name, + Namespace: pod.Namespace, + Generation: pod.Generation, + ResourceVersion: pod.ResourceVersion, + UID: pod.UID, + DeletionTimestamp: pod.DeletionTimestamp, }, Spec: corev1.PodSpec{ NodeName: pod.Spec.NodeName, @@ -137,5 +138,9 @@ func prunePod(pod *corev1.Pod) { Containers: containers, InitContainers: initContainers, }, + Status: corev1.PodStatus{ + Phase: pod.Status.Phase, + Conditions: pod.Status.Conditions, + }, } } From 61294ec3a47e4829424b6e2f1d41449e6e474ca6 Mon Sep 17 00:00:00 2001 From: "zhangxinjie.next" Date: Wed, 19 Jul 2023 19:31:03 +0800 Subject: [PATCH 171/173] refactor(status-aggregator): unified federated type --- .../app/controllermanager.go | 2 + cmd/controller-manager/app/core.go | 28 + .../statusaggregator/controller.go | 517 ++++++++++-------- .../statusaggregator/plugins/deployment.go | 36 +- .../statusaggregator/plugins/job.go | 5 +- .../statusaggregator/plugins/job_test.go | 12 +- .../statusaggregator/plugins/plugin.go | 14 +- .../statusaggregator/plugins/pod.go | 5 +- .../statusaggregator/plugins/pod_test.go | 4 +- .../plugins/single_cluster_plugin.go | 5 +- .../propagationstatus/propagationstatus.go | 37 +- 11 files changed, 373 insertions(+), 292 deletions(-) diff --git a/cmd/controller-manager/app/controllermanager.go b/cmd/controller-manager/app/controllermanager.go index 4265c9dd..cf6187a9 100644 --- a/cmd/controller-manager/app/controllermanager.go +++ b/cmd/controller-manager/app/controllermanager.go @@ -45,6 +45,7 @@ const ( SchedulerName = "scheduler" SyncControllerName = "sync" AutoMigrationControllerName = "auto-migration" + StatusAggregatorControllerName = "status-aggregator" ) var knownControllers = map[string]controllermanager.StartControllerFunc{ @@ -58,6 +59,7 @@ var knownControllers = map[string]controllermanager.StartControllerFunc{ SyncControllerName: startSyncController, FollowerControllerName: startFollowerController, AutoMigrationControllerName: startAutoMigrationController, + StatusAggregatorControllerName: startStatusAggregatorController, } var controllersDisabledByDefault = sets.New[string]() diff --git a/cmd/controller-manager/app/core.go b/cmd/controller-manager/app/core.go index c648a103..490c835f 100644 --- a/cmd/controller-manager/app/core.go +++ b/cmd/controller-manager/app/core.go @@ -33,6 +33,7 @@ import ( "github.com/kubewharf/kubeadmiral/pkg/controllers/policyrc" "github.com/kubewharf/kubeadmiral/pkg/controllers/scheduler" "github.com/kubewharf/kubeadmiral/pkg/controllers/status" + "github.com/kubewharf/kubeadmiral/pkg/controllers/statusaggregator" "github.com/kubewharf/kubeadmiral/pkg/controllers/sync" ) @@ -287,3 +288,30 @@ func startAutoMigrationController( return autoMigrationController, nil } + +func startStatusAggregatorController( + ctx context.Context, + controllerCtx *controllercontext.Context, +) (controllermanager.Controller, error) { + statusAggregatorController, err := statusaggregator.NewStatusAggregatorController( + controllerCtx.KubeClientset, + controllerCtx.DynamicClientset, + controllerCtx.FedClientset, + controllerCtx.FedInformerFactory.Core().V1alpha1().FederatedObjects(), + controllerCtx.FedInformerFactory.Core().V1alpha1().ClusterFederatedObjects(), + controllerCtx.FederatedInformerManager, + controllerCtx.InformerManager, + controllerCtx.Metrics, + klog.Background(), + controllerCtx.WorkerCount, + controllerCtx.ClusterAvailableDelay, + controllerCtx.ClusterUnavailableDelay, + ) + if err != nil { + return nil, fmt.Errorf("error creating status-aggregator controller: %w", err) + } + + go statusAggregatorController.Run(ctx) + + return statusAggregatorController, nil +} diff --git a/pkg/controllers/statusaggregator/controller.go b/pkg/controllers/statusaggregator/controller.go index 51fb032d..d182e4bb 100644 --- a/pkg/controllers/statusaggregator/controller.go +++ b/pkg/controllers/statusaggregator/controller.go @@ -1,4 +1,3 @@ -//go:build exclude /* Copyright 2023 The KubeAdmiral Authors. @@ -20,64 +19,48 @@ package statusaggregator import ( "context" "fmt" - "strings" "time" - "github.com/pkg/errors" - appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/labels" pkgruntime "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + dynamicclient "k8s.io/client-go/dynamic" kubeclient "k8s.io/client-go/kubernetes" - "k8s.io/client-go/rest" "k8s.io/client-go/tools/cache" "k8s.io/client-go/tools/record" "k8s.io/client-go/util/workqueue" "k8s.io/klog/v2" fedcorev1a1 "github.com/kubewharf/kubeadmiral/pkg/apis/core/v1alpha1" - genericclient "github.com/kubewharf/kubeadmiral/pkg/client/generic" - "github.com/kubewharf/kubeadmiral/pkg/controllers/common" + fedclient "github.com/kubewharf/kubeadmiral/pkg/client/clientset/versioned" + fedcorev1a1informers "github.com/kubewharf/kubeadmiral/pkg/client/informers/externalversions/core/v1alpha1" "github.com/kubewharf/kubeadmiral/pkg/controllers/statusaggregator/plugins" - "github.com/kubewharf/kubeadmiral/pkg/controllers/util" - "github.com/kubewharf/kubeadmiral/pkg/controllers/util/delayingdeliver" - "github.com/kubewharf/kubeadmiral/pkg/controllers/util/eventsink" "github.com/kubewharf/kubeadmiral/pkg/controllers/util/propagationstatus" - "github.com/kubewharf/kubeadmiral/pkg/controllers/util/sourcefeedback" - "github.com/kubewharf/kubeadmiral/pkg/controllers/util/worker" "github.com/kubewharf/kubeadmiral/pkg/stats" + clusterutil "github.com/kubewharf/kubeadmiral/pkg/util/cluster" + "github.com/kubewharf/kubeadmiral/pkg/util/eventhandlers" + "github.com/kubewharf/kubeadmiral/pkg/util/eventsink" + "github.com/kubewharf/kubeadmiral/pkg/util/fedobjectadapters" + "github.com/kubewharf/kubeadmiral/pkg/util/informermanager" + "github.com/kubewharf/kubeadmiral/pkg/util/logging" + "github.com/kubewharf/kubeadmiral/pkg/util/naming" + "github.com/kubewharf/kubeadmiral/pkg/util/worker" ) const ( - ControllerName = "status-aggregator-controller" + StatusAggregatorControllerName = "status-aggregator-controller" - EventReasonUpdateSourceObjectStatus = "UpdateSourceObjectStatus" - EventReasonUpdateSourceObjectAnnotation = "UpdateSourceObjectAnnotation" + EventReasonUpdateSourceObjectStatus = "UpdateSourceObjectStatus" ) // StatusAggregator aggregates statuses of target objects in member clusters to status of source object type StatusAggregator struct { - // name of the controller: -status-aggregator name string - // Store for the federated type - federatedStore cache.Store - // Controller for the federated type - federatedController cache.Controller - // Client for federated type - federatedClient util.ResourceClient - - // Store for the source type - sourceStore cache.Store - // Controller for the source type - sourceController cache.Controller - // Client for source type - sourceClient util.ResourceClient - - // Informer for resources in member clusters - informer util.FederatedInformer // For triggering reconciliation of all target resources. This is // used when a new cluster becomes available. clusterQueue workqueue.DelayingInterface @@ -85,127 +68,180 @@ type StatusAggregator struct { clusterUnavailableDelay time.Duration objectEnqueueDelay time.Duration - worker worker.ReconcileWorker - typeConfig *fedcorev1a1.FederatedTypeConfig + worker worker.ReconcileWorker[reconcileKey] eventRecorder record.EventRecorder - // plugin for source type to aggregate statuses - plugin plugins.Plugin metrics stats.Metrics logger klog.Logger + + dynamicClient dynamicclient.Interface + fedClient fedclient.Interface + + federatedInformer informermanager.FederatedInformerManager + fedObjectInformer fedcorev1a1informers.FederatedObjectInformer + clusterFedObjectInformer fedcorev1a1informers.ClusterFederatedObjectInformer + informerManager informermanager.InformerManager } -func StartStatusAggregator(controllerConfig *util.ControllerConfig, - stopChan <-chan struct{}, typeConfig *fedcorev1a1.FederatedTypeConfig, -) error { - aggregator, err := newStatusAggregator(controllerConfig, typeConfig) - if err != nil { - return err +type reconcileKey struct { + gvk schema.GroupVersionKind + namespace string + name string +} + +func (r reconcileKey) NamespacedName() string { + if r.namespace == "" { + return r.name } - klog.V(4).Infof("Starting status aggregator for %q", typeConfig.GetSourceType().Kind) - aggregator.Run(stopChan) - return nil + return fmt.Sprintf("%s/%s", r.namespace, r.name) +} + +func (r reconcileKey) String() string { + return fmt.Sprintf(`{"gvk": %q, "namespace": %q, "name": %q}`, r.gvk.String(), r.namespace, r.name) } -func newStatusAggregator(controllerConfig *util.ControllerConfig, - typeConfig *fedcorev1a1.FederatedTypeConfig, +func NewStatusAggregatorController( + kubeClient kubeclient.Interface, + dynamicClient dynamicclient.Interface, + fedClient fedclient.Interface, + fedObjectInformer fedcorev1a1informers.FederatedObjectInformer, + clusterFedObjectInformer fedcorev1a1informers.ClusterFederatedObjectInformer, + federatedInformer informermanager.FederatedInformerManager, + informerManager informermanager.InformerManager, + metrics stats.Metrics, + logger klog.Logger, + workerCount int, + clusterAvailableDelay, clusterUnavailableDelay time.Duration, ) (*StatusAggregator, error) { - federatedAPIResource := typeConfig.GetFederatedType() - targetAPIResource := typeConfig.GetTargetType() - sourceAPIResource := typeConfig.GetSourceType() - if sourceAPIResource == nil { - return nil, errors.Errorf("Object federation is not supported for %q", federatedAPIResource.Kind) - } - plugin := plugins.GetPlugin(sourceAPIResource) - if plugin == nil { - return nil, errors.Errorf("statuses aggregation plugin is not found for %q", sourceAPIResource.Kind) - } + a := &StatusAggregator{ + name: StatusAggregatorControllerName, - userAgent := fmt.Sprintf("%s-status-aggregator", strings.ToLower(sourceAPIResource.Kind)) - configWithUserAgent := rest.CopyConfig(controllerConfig.KubeConfig) - rest.AddUserAgent(configWithUserAgent, userAgent) + // Build queue for triggering cluster reconciliations. + clusterQueue: workqueue.NewNamedDelayingQueue(StatusAggregatorControllerName), + clusterAvailableDelay: clusterAvailableDelay, + clusterUnavailableDelay: clusterUnavailableDelay, + objectEnqueueDelay: 10 * time.Second, - kubeClient := kubeclient.NewForConfigOrDie(configWithUserAgent) - recorder := eventsink.NewDefederatingRecorderMux(kubeClient, userAgent, 4) + eventRecorder: eventsink.NewDefederatingRecorderMux(kubeClient, StatusAggregatorControllerName, 4), + metrics: metrics, + logger: logger.WithValues("controller", StatusAggregatorControllerName), - a := &StatusAggregator{ - name: userAgent, - eventRecorder: recorder, - typeConfig: typeConfig, - plugin: plugin, - metrics: controllerConfig.Metrics, - logger: klog.LoggerWithValues(klog.Background(), "controller", ControllerName, "ftc", typeConfig.Name), - } - var err error - a.federatedClient, err = util.NewResourceClient(configWithUserAgent, &federatedAPIResource) - if err != nil { - return nil, err - } - a.sourceClient, err = util.NewResourceClient(configWithUserAgent, sourceAPIResource) - if err != nil { - return nil, err - } + dynamicClient: dynamicClient, + fedClient: fedClient, - // Build queue for triggering cluster reconciliations. - a.clusterQueue = workqueue.NewNamedDelayingQueue("status-aggregator-cluster-queue") - a.clusterAvailableDelay = controllerConfig.ClusterAvailableDelay - a.clusterUnavailableDelay = controllerConfig.ClusterUnavailableDelay - a.objectEnqueueDelay = 10 * time.Second + fedObjectInformer: fedObjectInformer, + clusterFedObjectInformer: clusterFedObjectInformer, + federatedInformer: federatedInformer, + informerManager: informerManager, + } - a.worker = worker.NewReconcileWorker( + a.worker = worker.NewReconcileWorker[reconcileKey]( + StatusAggregatorControllerName, a.reconcile, worker.RateLimiterOptions{}, - controllerConfig.WorkerCount, - controllerConfig.Metrics, - delayingdeliver.NewMetricTags("statusaggregator-worker", typeConfig.GetTargetType().Kind), - ) - enqueueObj := a.worker.EnqueueObject - targetNamespace := controllerConfig.TargetNamespace - a.federatedStore, a.federatedController = util.NewResourceInformer( - a.federatedClient, - targetNamespace, - enqueueObj, - controllerConfig.Metrics, + workerCount, + metrics, ) - a.sourceStore, a.sourceController = util.NewResourceInformer( - a.sourceClient, - targetNamespace, - enqueueObj, - controllerConfig.Metrics, - ) - a.informer, err = util.NewFederatedInformer( - controllerConfig, - genericclient.NewForConfigOrDie(configWithUserAgent), - configWithUserAgent, - &targetAPIResource, - func(obj pkgruntime.Object) { - qualifiedName := common.NewQualifiedName(obj) - a.worker.EnqueueWithDelay(qualifiedName, a.objectEnqueueDelay) - }, - &util.ClusterLifecycleHandlerFuncs{ - ClusterAvailable: func(cluster *fedcorev1a1.FederatedCluster) { + + genericFederatedObjectHandler := eventhandlers.NewTriggerOnAllChanges(func(fedObj fedcorev1a1.GenericFederatedObject) { + if !fedObj.GetDeletionTimestamp().IsZero() { + return + } + unsObj, err := fedObj.GetSpec().GetTemplateAsUnstructured() + if err != nil { + a.logger.Error(err, "Failed to get object metadata") + return + } + gvk := unsObj.GroupVersionKind() + if a.getFTCIfStatusAggregationIsEnabled(gvk) == nil { + a.logger.V(3).Info("Status aggregation is disabled", "gvk", gvk) + return + } + + a.worker.Enqueue(reconcileKey{ + gvk: gvk, + namespace: unsObj.GetNamespace(), + name: unsObj.GetName(), + }) + }) + if _, err := a.fedObjectInformer.Informer().AddEventHandler(genericFederatedObjectHandler); err != nil { + return nil, fmt.Errorf("failed to create federated informer: %w", err) + } + if _, err := a.clusterFedObjectInformer.Informer().AddEventHandler(genericFederatedObjectHandler); err != nil { + return nil, fmt.Errorf("failed to create cluster federated informer: %w", err) + } + + if err := a.federatedInformer.AddClusterEventHandlers(&informermanager.ClusterEventHandler{ + Predicate: func(oldCluster, newCluster *fedcorev1a1.FederatedCluster) bool { + if newCluster != nil && clusterutil.IsClusterReady(&newCluster.Status) && + (oldCluster == nil || !clusterutil.IsClusterReady(&oldCluster.Status)) { // When new cluster becomes available process all the target resources again. a.clusterQueue.AddAfter(struct{}{}, a.clusterAvailableDelay) - }, - // When a cluster becomes unavailable process all the target resources again. - ClusterUnavailable: func(cluster *fedcorev1a1.FederatedCluster, _ []interface{}) { + } + if oldCluster != nil && !oldCluster.DeletionTimestamp.IsZero() { + // When a cluster becomes unavailable process all the target resources again. a.clusterQueue.AddAfter(struct{}{}, a.clusterUnavailableDelay) - }, + } + return false }, - ) - if err != nil { - return nil, err + Callback: func(_ *fedcorev1a1.FederatedCluster) {}, + }); err != nil { + return nil, fmt.Errorf("failed to add event handler generator for cluster: %w", err) + } + + if err := a.federatedInformer.AddEventHandlerGenerator(&informermanager.EventHandlerGenerator{ + Predicate: informermanager.RegisterOncePredicate, + Generator: func(ftc *fedcorev1a1.FederatedTypeConfig) cache.ResourceEventHandler { + return eventhandlers.NewTriggerOnAllChanges(func(obj *unstructured.Unstructured) { + if !obj.GetDeletionTimestamp().IsZero() { + return + } + gvk := obj.GroupVersionKind() + if a.getFTCIfStatusAggregationIsEnabled(gvk) == nil { + a.logger.V(3).Info("Status aggregation is disabled", "gvk", gvk) + return + } + a.worker.EnqueueWithDelay(reconcileKey{ + gvk: gvk, + namespace: obj.GetNamespace(), + name: obj.GetName(), + }, a.objectEnqueueDelay) + }) + }, + }); err != nil { + return nil, fmt.Errorf("failed to add event handler generator for cluster obj: %w", err) + } + + if err := a.informerManager.AddEventHandlerGenerator(&informermanager.EventHandlerGenerator{ + Predicate: informermanager.RegisterOncePredicate, + Generator: func(ftc *fedcorev1a1.FederatedTypeConfig) cache.ResourceEventHandler { + return eventhandlers.NewTriggerOnAllChanges(func(obj *unstructured.Unstructured) { + if !obj.GetDeletionTimestamp().IsZero() || !ftc.GetStatusAggregationEnabled() { + return + } + a.worker.Enqueue(reconcileKey{ + gvk: ftc.GetSourceTypeGVK(), + namespace: obj.GetNamespace(), + name: obj.GetName(), + }) + }) + }, + }); err != nil { + return nil, fmt.Errorf("failed to add event handler generator for obj: %w", err) } return a, nil } -func (a *StatusAggregator) Run(stopChan <-chan struct{}) { - a.logger.Info("Starting controller") - defer a.logger.Info("Stopping controller") +func (a *StatusAggregator) IsControllerReady() bool { + return a.HasSynced() +} + +func (a *StatusAggregator) Run(ctx context.Context) { + ctx, logger := logging.InjectLogger(ctx, a.logger) + + logger.Info("Starting controller") + defer logger.Info("Stopping controller") - go a.sourceController.Run(stopChan) - go a.federatedController.Run(stopChan) - a.informer.Start() go func() { for { item, shutdown := a.clusterQueue.Get() @@ -216,46 +252,38 @@ func (a *StatusAggregator) Run(stopChan <-chan struct{}) { a.clusterQueue.Done(item) } }() - if !cache.WaitForNamedCacheSync(a.name, stopChan, a.HasSynced) { + if !cache.WaitForNamedCacheSync(StatusAggregatorControllerName, ctx.Done(), a.HasSynced) { + logger.Error(nil, "Timed out waiting for cache sync") return } - a.worker.Run(stopChan) + logger.Info("Caches are synced") - // Ensure all goroutines are cleaned up when the stop channel closes - go func() { - defer func() { - if r := recover(); r != nil { - a.logger.Error(fmt.Errorf("%v", r), "recovered from panic") - } - }() - <-stopChan - a.informer.Stop() - a.clusterQueue.ShutDown() - }() + a.worker.Run(ctx) + <-ctx.Done() + a.clusterQueue.ShutDown() } func (a *StatusAggregator) HasSynced() bool { - if !a.informer.ClustersSynced() { - a.logger.V(3).Info("Cluster list not synced") - return false - } - if !a.federatedController.HasSynced() { - a.logger.V(3).Info("Federated type not synced") - return false - } - if !a.sourceController.HasSynced() { - a.logger.V(3).Info("Status not synced") - return false - } - - return true + return a.fedObjectInformer.Informer().HasSynced() && + a.clusterFedObjectInformer.Informer().HasSynced() && + a.federatedInformer.HasSynced() && + a.informerManager.HasSynced() } -func (a *StatusAggregator) reconcile(qualifiedName common.QualifiedName) (status worker.Result) { - sourceKind := a.typeConfig.GetSourceType().Kind - key := qualifiedName.String() - logger := a.logger.WithValues("object", key) - ctx := klog.NewContext(context.TODO(), logger) +func (a *StatusAggregator) reconcile(ctx context.Context, key reconcileKey) (status worker.Result) { + logger := a.logger.WithValues("object", key.NamespacedName(), "gvk", key.gvk) + ctx = klog.NewContext(ctx, logger) + + ftc := a.getFTCIfStatusAggregationIsEnabled(key.gvk) + if ftc == nil { + logger.V(3).Info("StatusAggregation not enabled") + return worker.StatusAllOK + } + plugin := plugins.GetPlugin(key.gvk) + if plugin == nil { + logger.V(3).Info("Plugin not found") + return worker.StatusAllOK + } a.metrics.Rate("status-aggregator.throughput", 1) logger.V(3).Info("Starting to reconcile") @@ -266,9 +294,9 @@ func (a *StatusAggregator) reconcile(qualifiedName common.QualifiedName) (status Info("Finished reconciling") }() - sourceObject, err := objectFromCache(a.sourceStore, key) + sourceObject, err := a.getObjectFromStore(key, "") if err != nil { - logger.Error(err, "Failed to get object from cache") + logger.Error(err, "Failed to get source object from cache") return worker.StatusError } @@ -277,9 +305,18 @@ func (a *StatusAggregator) reconcile(qualifiedName common.QualifiedName) (status return worker.StatusAllOK } - fedObject, err := objectFromCache(a.federatedStore, key) + federatedName := naming.GenerateFederatedObjectName(key.name, ftc.Name) + logger = logger.WithValues("federated-object", federatedName) + ctx = klog.NewContext(ctx, logger) + + fedObject, err := fedobjectadapters.GetFromLister( + a.fedObjectInformer.Lister(), + a.clusterFedObjectInformer.Lister(), + key.namespace, + federatedName, + ) if err != nil { - logger.Error(err, "Failed to get object from cache") + logger.Error(err, "Failed to get object from store") return worker.StatusError } @@ -288,7 +325,7 @@ func (a *StatusAggregator) reconcile(qualifiedName common.QualifiedName) (status return worker.StatusAllOK } - clusterObjs, err := a.clusterObjs(ctx, qualifiedName) + clusterObjs, err := a.clusterObjs(ctx, key) if err != nil { logger.Error(err, "Failed to get cluster objs") return worker.StatusError @@ -300,85 +337,48 @@ func (a *StatusAggregator) reconcile(qualifiedName common.QualifiedName) (status return worker.StatusError } - newObj, needUpdate, err := a.plugin.AggregateStatuses(ctx, sourceObject, fedObject, clusterObjs, clusterObjsUpToDate) + newObj, needUpdate, err := plugin.AggregateStatuses(ctx, sourceObject, fedObject, clusterObjs, clusterObjsUpToDate) if err != nil { return worker.StatusError } - canReuseUpdateStatus := sourceObject.GroupVersionKind() == appsv1.SchemeGroupVersion.WithKind(common.DeploymentKind) - - if canReuseUpdateStatus { - sourcefeedback.PopulateStatusAnnotation(newObj, clusterObjs, &needUpdate) - } - if needUpdate { logger.V(1).Info("Updating status of source object") - _, err = a.sourceClient.Resources(qualifiedName.Namespace). - UpdateStatus(context.TODO(), newObj, metav1.UpdateOptions{}) + _, err = a.dynamicClient.Resource(ftc.GetSourceTypeGVR()).Namespace(key.namespace). + UpdateStatus(ctx, newObj, metav1.UpdateOptions{}) if err != nil { if apierrors.IsConflict(err) { return worker.StatusConflict } a.eventRecorder.Eventf(sourceObject, corev1.EventTypeWarning, EventReasonUpdateSourceObjectStatus, - "failed to update source object status %v %v, err: %v, retry later", sourceKind, key, err) + "failed to update source object status %v, err: %v, retry later", key, err) return worker.StatusError } a.eventRecorder.Eventf(sourceObject, corev1.EventTypeNormal, EventReasonUpdateSourceObjectStatus, - "updated source object status %v %v", sourceKind, key) - } - - if !canReuseUpdateStatus { - needUpdate = false - sourcefeedback.PopulateStatusAnnotation(newObj, clusterObjs, &needUpdate) - - if needUpdate { - logger.V(1).Info("Updating annotation of source object") - _, err = a.sourceClient.Resources(qualifiedName.Namespace). - Update(context.TODO(), newObj, metav1.UpdateOptions{}) - if err != nil { - if apierrors.IsConflict(err) { - return worker.StatusConflict - } - a.eventRecorder.Eventf(sourceObject, corev1.EventTypeWarning, EventReasonUpdateSourceObjectAnnotation, - "failed to update source object annotation %v %v, err: %v, retry later", sourceKind, key, err) - return worker.StatusError - } - a.eventRecorder.Eventf(sourceObject, corev1.EventTypeNormal, EventReasonUpdateSourceObjectAnnotation, - "updated source object annotation %v %v", sourceKind, key) - } + "updated source object status %v", key) } return worker.StatusAllOK } // clusterStatuses returns the resource status in member cluster. -func (a *StatusAggregator) clusterObjs(ctx context.Context, qualifiedName common.QualifiedName) (map[string]interface{}, error) { +func (a *StatusAggregator) clusterObjs(ctx context.Context, key reconcileKey) (map[string]interface{}, error) { logger := klog.FromContext(ctx) - key := qualifiedName.String() - clusters, err := a.informer.GetReadyClusters() + clusters, err := a.federatedInformer.GetReadyClusters() if err != nil { + logger.Error(err, "Failed to list clusters") return nil, err } objs := make(map[string]interface{}) - targetKind := a.typeConfig.GetTargetType().Kind for _, cluster := range clusters { - clusterObj, exist, err := util.GetClusterObject( - context.TODO(), - a.informer, - cluster.Name, - qualifiedName, - a.typeConfig.GetTargetType(), - ) + clusterObj, err := a.getObjectFromStore(key, cluster.Name) if err != nil { - wrappedErr := errors.Wrapf(err, "Failed to get %s %q from cluster %q", targetKind, key, cluster.Name) - logger.WithValues("cluster-name", cluster.Name).Error(err, "Failed to get object from cluster") - return nil, wrappedErr - } - if exist { - objs[cluster.Name] = clusterObj + logger.Error(err, "Failed to get object from cluster", "cluster", cluster.Name) + return nil, fmt.Errorf("failed to get object from cluster: %w", err) } + objs[cluster.Name] = clusterObj } return objs, nil @@ -386,19 +386,96 @@ func (a *StatusAggregator) clusterObjs(ctx context.Context, qualifiedName common // The function triggers reconciliation of all target federated resources. func (a *StatusAggregator) reconcileOnClusterChange() { - for _, obj := range a.sourceStore.List() { - qualifiedName := common.NewQualifiedName(obj.(pkgruntime.Object)) - a.worker.EnqueueWithDelay(qualifiedName, time.Second*3) + ftcs := a.listFTCWithStatusAggregationEnabled() + for _, ftc := range ftcs { + gvk := ftc.GetSourceTypeGVK() + logger := a.logger.WithValues("gvk", gvk) + lister, hasSynced, exists := a.informerManager.GetResourceLister(gvk) + if !exists { + logger.Error(nil, "Lister does not exist") + return + } + if !hasSynced() { + logger.V(3).Info("Lister not synced, retry it later") + a.clusterQueue.AddAfter(struct{}{}, a.clusterAvailableDelay) + return + } + sources, err := lister.List(labels.Everything()) + if err != nil { + logger.Error(err, "Failed to list source objects, retry it later") + a.clusterQueue.AddAfter(struct{}{}, a.clusterAvailableDelay) + return + } + for _, item := range sources { + unsObj, ok := item.(*unstructured.Unstructured) + if !ok { + logger.V(3).Info("Expected unstructured, but got non-type", "obj", item) + return + } + a.worker.EnqueueWithDelay(reconcileKey{ + gvk: gvk, + namespace: unsObj.GetNamespace(), + name: unsObj.GetName(), + }, a.objectEnqueueDelay) + } } } -func objectFromCache(store cache.Store, key string) (*unstructured.Unstructured, error) { - cachedObj, exist, err := store.GetByKey(key) +// getObjectFromStore returns the specified obj from cluster. +// If cluster is "", get the obj from informerManager. +func (a *StatusAggregator) getObjectFromStore(qualifedName reconcileKey, cluster string) (*unstructured.Unstructured, error) { + var ( + lister cache.GenericLister + hasSynced cache.InformerSynced + exists bool + ) + if cluster != "" { + lister, hasSynced, exists = a.federatedInformer.GetResourceLister(qualifedName.gvk, cluster) + } else { + lister, hasSynced, exists = a.informerManager.GetResourceLister(qualifedName.gvk) + } + if !exists { + return nil, fmt.Errorf("lister for %s does not exist", qualifedName.gvk) + } + if !hasSynced() { + return nil, fmt.Errorf("lister for %s not synced", qualifedName.gvk) + } + + var obj pkgruntime.Object + var err error + if qualifedName.namespace == "" { + obj, err = lister.Get(qualifedName.name) + } else { + obj, err = lister.ByNamespace(qualifedName.namespace).Get(qualifedName.name) + } if err != nil { return nil, err } - if !exist { - return nil, nil + + return obj.(*unstructured.Unstructured), nil +} + +func (a *StatusAggregator) getFTCIfStatusAggregationIsEnabled(gvk schema.GroupVersionKind) *fedcorev1a1.FederatedTypeConfig { + typeConfig, exists := a.informerManager.GetResourceFTC(gvk) + if !exists || typeConfig == nil || !typeConfig.GetStatusAggregationEnabled() { + return nil + } + + return typeConfig.DeepCopy() +} + +func (a *StatusAggregator) listFTCWithStatusAggregationEnabled() []*fedcorev1a1.FederatedTypeConfig { + ftcLister := a.federatedInformer.GetFederatedTypeConfigLister() + ftcList, err := ftcLister.List(labels.Everything()) + if err != nil { + return nil + } + + res := make([]*fedcorev1a1.FederatedTypeConfig, 0, len(ftcList)) + for _, ftc := range ftcList { + if ftc.GetStatusAggregationEnabled() { + res = append(res, ftc.DeepCopy()) + } } - return cachedObj.(*unstructured.Unstructured).DeepCopy(), nil + return res } diff --git a/pkg/controllers/statusaggregator/plugins/deployment.go b/pkg/controllers/statusaggregator/plugins/deployment.go index c30371eb..62c25e32 100644 --- a/pkg/controllers/statusaggregator/plugins/deployment.go +++ b/pkg/controllers/statusaggregator/plugins/deployment.go @@ -1,4 +1,3 @@ -//go:build exclude /* Copyright 2023 The KubeAdmiral Authors. @@ -26,12 +25,13 @@ import ( appsv1 "k8s.io/api/apps/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" "k8s.io/klog/v2" - fedtypesv1a1 "github.com/kubewharf/kubeadmiral/pkg/apis/types/v1alpha1" + fedcorev1a1 "github.com/kubewharf/kubeadmiral/pkg/apis/core/v1alpha1" "github.com/kubewharf/kubeadmiral/pkg/controllers/common" "github.com/kubewharf/kubeadmiral/pkg/controllers/util" - "github.com/kubewharf/kubeadmiral/pkg/controllers/util/annotation" + "github.com/kubewharf/kubeadmiral/pkg/util/annotation" ) type DeploymentPlugin struct{} @@ -42,7 +42,8 @@ func NewDeploymentPlugin() *DeploymentPlugin { func (receiver *DeploymentPlugin) AggregateStatuses( ctx context.Context, - sourceObject, fedObject *unstructured.Unstructured, + sourceObject *unstructured.Unstructured, + fedObject fedcorev1a1.GenericFederatedObject, clusterObjs map[string]interface{}, clusterObjsUpToDate bool, ) (*unstructured.Unstructured, bool, error) { @@ -52,28 +53,15 @@ func (receiver *DeploymentPlugin) AggregateStatuses( digests := []util.LatestReplicasetDigest{} sourceDeployment := &appsv1.Deployment{} - if err := util.ConvertViaJson(sourceObject, sourceDeployment); err != nil { + if err := runtime.DefaultUnstructuredConverter.FromUnstructured(sourceObject.Object, sourceDeployment); err != nil { return nil, false, err } aggregatedStatus := &appsv1.DeploymentStatus{} - resource := &fedtypesv1a1.GenericObjectWithStatus{} - err := util.UnstructuredToInterface(fedObject, resource) - if err != nil { - return nil, false, fmt.Errorf("failed to unmarshall to generic resource: %w", err) - } - - if !clusterObjsUpToDate { - logger.V(3).Info("Cluster objects are not up to date") - needUpdateObservedGeneration = false - } - clusterSyncedGenerations := make(map[string]int64) - if resource.Status != nil { - for _, cluster := range resource.Status.Clusters { - clusterSyncedGenerations[cluster.Name] = cluster.Generation - } + for _, cluster := range fedObject.GetStatus().Clusters { + clusterSyncedGenerations[cluster.Cluster] = cluster.LastObservedGeneration } for clusterName, clusterObj := range clusterObjs { @@ -93,7 +81,7 @@ func (receiver *DeploymentPlugin) AggregateStatuses( } deployStatus := &appsv1.DeploymentStatus{} - if err = util.ConvertViaJson(status, deployStatus); err != nil { + if err = runtime.DefaultUnstructuredConverter.FromUnstructured(status, deployStatus); err != nil { return nil, false, err } @@ -135,7 +123,7 @@ func (receiver *DeploymentPlugin) AggregateStatuses( aggregatedStatus.ObservedGeneration = sourceDeployment.Status.ObservedGeneration } - newStatus, err := util.GetUnstructuredStatus(aggregatedStatus) + newStatus, err := runtime.DefaultUnstructuredConverter.ToUnstructured(aggregatedStatus) if err != nil { return nil, false, err } @@ -165,7 +153,7 @@ func (receiver *DeploymentPlugin) AggregateStatuses( rsDigestsAnnotation := string(rsDigestsAnnotationBytes) hasRSDigestsAnnotation, err := annotation.HasAnnotationKeyValue( sourceObject, - util.LatestReplicasetDigestsAnnotation, + common.LatestReplicasetDigestsAnnotation, rsDigestsAnnotation, ) if err != nil { @@ -178,7 +166,7 @@ func (receiver *DeploymentPlugin) AggregateStatuses( needUpdate = true } - _, err = annotation.AddAnnotation(sourceObject, util.LatestReplicasetDigestsAnnotation, rsDigestsAnnotation) + _, err = annotation.AddAnnotation(sourceObject, common.LatestReplicasetDigestsAnnotation, rsDigestsAnnotation) if err != nil { return nil, false, err } diff --git a/pkg/controllers/statusaggregator/plugins/job.go b/pkg/controllers/statusaggregator/plugins/job.go index aad89156..0a1717a4 100644 --- a/pkg/controllers/statusaggregator/plugins/job.go +++ b/pkg/controllers/statusaggregator/plugins/job.go @@ -1,4 +1,3 @@ -//go:build exclude /* Copyright 2023 The KubeAdmiral Authors. @@ -32,6 +31,7 @@ import ( "k8s.io/apimachinery/pkg/runtime" "k8s.io/klog/v2" + fedcorev1a1 "github.com/kubewharf/kubeadmiral/pkg/apis/core/v1alpha1" "github.com/kubewharf/kubeadmiral/pkg/controllers/common" ) @@ -43,7 +43,8 @@ func NewJobPlugin() *JobPlugin { func (receiver *JobPlugin) AggregateStatuses( ctx context.Context, - sourceObject, fedObject *unstructured.Unstructured, + sourceObject *unstructured.Unstructured, + fedObject fedcorev1a1.GenericFederatedObject, clusterObjs map[string]interface{}, clusterObjsUpToDate bool, ) (*unstructured.Unstructured, bool, error) { diff --git a/pkg/controllers/statusaggregator/plugins/job_test.go b/pkg/controllers/statusaggregator/plugins/job_test.go index bd432f45..2bc6b8eb 100644 --- a/pkg/controllers/statusaggregator/plugins/job_test.go +++ b/pkg/controllers/statusaggregator/plugins/job_test.go @@ -29,6 +29,8 @@ import ( "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime" "k8s.io/klog/v2" + + fedcorev1a1 "github.com/kubewharf/kubeadmiral/pkg/apis/core/v1alpha1" ) func TestJobPlugin(t *testing.T) { @@ -97,7 +99,7 @@ func TestJobPlugin(t *testing.T) { name string sourceObject *unstructured.Unstructured - fedObject *unstructured.Unstructured + fedObject fedcorev1a1.GenericFederatedObject clusterObjs map[string]interface{} expectedErr error @@ -107,7 +109,7 @@ func TestJobPlugin(t *testing.T) { { name: "2 completed jobs, need update", sourceObject: uCompletedJob.DeepCopy(), - fedObject: &unstructured.Unstructured{}, + fedObject: nil, clusterObjs: map[string]interface{}{"c1": uCompletedJob.DeepCopy(), "c2": uCompletedJob.DeepCopy()}, expectedErr: nil, expectedNeedUpdate: true, @@ -128,7 +130,7 @@ func TestJobPlugin(t *testing.T) { { name: "1 completed job, 1 failed job, need update", sourceObject: uCompletedJob.DeepCopy(), - fedObject: &unstructured.Unstructured{}, + fedObject: nil, clusterObjs: map[string]interface{}{"c1": uCompletedJob.DeepCopy(), "c2": uFailedJob.DeepCopy()}, expectedErr: nil, expectedNeedUpdate: true, @@ -148,7 +150,7 @@ func TestJobPlugin(t *testing.T) { { name: "2 failed jobs, need update", sourceObject: uCompletedJob.DeepCopy(), - fedObject: &unstructured.Unstructured{}, + fedObject: nil, clusterObjs: map[string]interface{}{"c1": uFailedJob.DeepCopy(), "c2": uFailedJob.DeepCopy()}, expectedErr: nil, expectedNeedUpdate: true, @@ -168,7 +170,7 @@ func TestJobPlugin(t *testing.T) { { name: "1 completed job, 1 suspended job, need update", sourceObject: uCompletedJob.DeepCopy(), - fedObject: &unstructured.Unstructured{}, + fedObject: nil, clusterObjs: map[string]interface{}{"c1": uCompletedJob.DeepCopy(), "c2": uSuspendedJob.DeepCopy()}, expectedErr: nil, expectedNeedUpdate: true, diff --git a/pkg/controllers/statusaggregator/plugins/plugin.go b/pkg/controllers/statusaggregator/plugins/plugin.go index 13a722b6..587addaa 100644 --- a/pkg/controllers/statusaggregator/plugins/plugin.go +++ b/pkg/controllers/statusaggregator/plugins/plugin.go @@ -1,4 +1,3 @@ -//go:build exclude /* Copyright 2023 The KubeAdmiral Authors. @@ -23,10 +22,10 @@ import ( appsv1 "k8s.io/api/apps/v1" batchv1 "k8s.io/api/batch/v1" corev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime/schema" + fedcorev1a1 "github.com/kubewharf/kubeadmiral/pkg/apis/core/v1alpha1" "github.com/kubewharf/kubeadmiral/pkg/controllers/common" ) @@ -34,7 +33,8 @@ type Plugin interface { // AggregateStatuses aggregates member cluster object statues to source object status and returns the latter AggregateStatuses( ctx context.Context, - sourceObject, fedObject *unstructured.Unstructured, + sourceObject *unstructured.Unstructured, + fedObject fedcorev1a1.GenericFederatedObject, clusterObjs map[string]interface{}, clusterObjsUpToDate bool, ) (*unstructured.Unstructured, bool, error) @@ -47,10 +47,6 @@ var pluginsMap = map[schema.GroupVersionKind]Plugin{ corev1.SchemeGroupVersion.WithKind(common.PodKind): NewPodPlugin(), } -func GetPlugin(apiResource *metav1.APIResource) Plugin { - return pluginsMap[schema.GroupVersionKind{ - Group: apiResource.Group, - Version: apiResource.Version, - Kind: apiResource.Kind, - }] +func GetPlugin(gvk schema.GroupVersionKind) Plugin { + return pluginsMap[gvk] } diff --git a/pkg/controllers/statusaggregator/plugins/pod.go b/pkg/controllers/statusaggregator/plugins/pod.go index da045563..9ba9320d 100644 --- a/pkg/controllers/statusaggregator/plugins/pod.go +++ b/pkg/controllers/statusaggregator/plugins/pod.go @@ -1,4 +1,3 @@ -//go:build exclude /* Copyright 2023 The KubeAdmiral Authors. @@ -30,6 +29,7 @@ import ( "k8s.io/apimachinery/pkg/runtime" "k8s.io/klog/v2" + fedcorev1a1 "github.com/kubewharf/kubeadmiral/pkg/apis/core/v1alpha1" "github.com/kubewharf/kubeadmiral/pkg/controllers/common" ) @@ -41,7 +41,8 @@ func NewPodPlugin() *PodPlugin { func (receiver *PodPlugin) AggregateStatuses( ctx context.Context, - sourceObject, fedObject *unstructured.Unstructured, + sourceObject *unstructured.Unstructured, + fedObject fedcorev1a1.GenericFederatedObject, clusterObjs map[string]interface{}, clusterObjsUpToDate bool, ) (*unstructured.Unstructured, bool, error) { diff --git a/pkg/controllers/statusaggregator/plugins/pod_test.go b/pkg/controllers/statusaggregator/plugins/pod_test.go index c313c627..d8789a87 100644 --- a/pkg/controllers/statusaggregator/plugins/pod_test.go +++ b/pkg/controllers/statusaggregator/plugins/pod_test.go @@ -28,6 +28,8 @@ import ( "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime" "k8s.io/klog/v2" + + fedcorev1a1 "github.com/kubewharf/kubeadmiral/pkg/apis/core/v1alpha1" ) func TestPodPlugin(t *testing.T) { @@ -111,7 +113,7 @@ func TestPodPlugin(t *testing.T) { name string sourceObject *unstructured.Unstructured - fedObject *unstructured.Unstructured + fedObject fedcorev1a1.GenericFederatedObject clusterObjs map[string]interface{} expectedErr error diff --git a/pkg/controllers/statusaggregator/plugins/single_cluster_plugin.go b/pkg/controllers/statusaggregator/plugins/single_cluster_plugin.go index 60ab9c67..3951d411 100644 --- a/pkg/controllers/statusaggregator/plugins/single_cluster_plugin.go +++ b/pkg/controllers/statusaggregator/plugins/single_cluster_plugin.go @@ -1,4 +1,3 @@ -//go:build exclude /* Copyright 2023 The KubeAdmiral Authors. @@ -25,6 +24,7 @@ import ( "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/klog/v2" + fedcorev1a1 "github.com/kubewharf/kubeadmiral/pkg/apis/core/v1alpha1" "github.com/kubewharf/kubeadmiral/pkg/controllers/common" ) @@ -37,7 +37,8 @@ type SingleClusterPlugin struct{} func (receiver *SingleClusterPlugin) AggregateStatuses( ctx context.Context, - sourceObject, fedObject *unstructured.Unstructured, + sourceObject *unstructured.Unstructured, + fedObject fedcorev1a1.GenericFederatedObject, clusterObjs map[string]interface{}, clusterObjsUpToDate bool, ) (*unstructured.Unstructured, bool, error) { diff --git a/pkg/controllers/util/propagationstatus/propagationstatus.go b/pkg/controllers/util/propagationstatus/propagationstatus.go index acb0b016..f6ab47f0 100644 --- a/pkg/controllers/util/propagationstatus/propagationstatus.go +++ b/pkg/controllers/util/propagationstatus/propagationstatus.go @@ -1,4 +1,3 @@ -//go:build exclude /* Copyright 2023 The KubeAdmiral Authors. @@ -27,12 +26,11 @@ import ( utiljson "k8s.io/apimachinery/pkg/util/json" "k8s.io/apimachinery/pkg/util/sets" - fedtypesv1a1 "github.com/kubewharf/kubeadmiral/pkg/apis/types/v1alpha1" + fedcorev1a1 "github.com/kubewharf/kubeadmiral/pkg/apis/core/v1alpha1" "github.com/kubewharf/kubeadmiral/pkg/controllers/common" - "github.com/kubewharf/kubeadmiral/pkg/controllers/util" ) -func IsResourcePropagated(sourceObject, fedObject *unstructured.Unstructured) (bool, error) { +func IsResourcePropagated(sourceObject *unstructured.Unstructured, fedObject fedcorev1a1.GenericFederatedObject) (bool, error) { if sourceObject == nil { return false, fmt.Errorf("source object can't be nil") } @@ -51,19 +49,9 @@ func IsResourcePropagated(sourceObject, fedObject *unstructured.Unstructured) (b return synced, err } - resource := &fedtypesv1a1.GenericObjectWithStatus{} - err = util.UnstructuredToInterface(fedObject, resource) - if err != nil { - return false, fmt.Errorf("failed to unmarshall to generic resource: %w", err) - } - - if resource.Status == nil { - return false, nil - } - syncAllOk := true - for _, cluster := range resource.Status.Clusters { - if cluster.Status != fedtypesv1a1.ClusterPropagationOK { + for _, cluster := range fedObject.GetStatus().Clusters { + if cluster.Status != fedcorev1a1.ClusterPropagationOK { syncAllOk = false break } @@ -72,23 +60,18 @@ func IsResourcePropagated(sourceObject, fedObject *unstructured.Unstructured) (b return syncAllOk, nil } -func IsResourceTemplateSyncedToMemberClusters(fedObject *unstructured.Unstructured) (bool, error) { +func IsResourceTemplateSyncedToMemberClusters(fedObject fedcorev1a1.GenericFederatedObject) (bool, error) { if fedObject == nil { return false, nil } - observedGeneration, found, err := unstructured.NestedInt64(fedObject.Object, "status", "syncedGeneration") - if err != nil || !found { - return false, err - } - // check if the latest resource template has been synced to member clusters - return observedGeneration == fedObject.GetGeneration(), nil + return fedObject.GetStatus().SyncedGeneration == fedObject.GetGeneration(), nil } -func IsFederatedTemplateUpToDate(sourceObject, fedObject *unstructured.Unstructured) (bool, error) { - templateMap, exists, err := unstructured.NestedMap(fedObject.Object, "spec", "template") - if err != nil || !exists { +func IsFederatedTemplateUpToDate(sourceObject *unstructured.Unstructured, fedObject fedcorev1a1.GenericFederatedObject) (bool, error) { + templateMap, err := fedObject.GetSpec().GetTemplateAsUnstructured() + if err != nil { return false, err } @@ -121,7 +104,7 @@ func IsFederatedTemplateUpToDate(sourceObject, fedObject *unstructured.Unstructu return false, err } - if reflect.DeepEqual(prunedSourceObj, templateMap) { + if reflect.DeepEqual(prunedSourceObj, templateMap.Object) { return true, nil } From 1bfc64af9ec5eae35697e5fefc08d56494996eb1 Mon Sep 17 00:00:00 2001 From: "zhangxinjie.next" Date: Tue, 25 Jul 2023 14:53:24 +0800 Subject: [PATCH 172/173] feat: add ReschedulePolicy for pp/cpp --- ...admiral.io_clusterpropagationpolicies.yaml | 43 ++++++++++++++- ...re.kubeadmiral.io_propagationpolicies.yaml | 43 ++++++++++++++- .../generic_propagationpolicies.src.sh | 1 + .../core/v1alpha1/types_propagationpolicy.go | 52 +++++++++++++++++++ .../core/v1alpha1/zz_generated.deepcopy.go | 47 +++++++++++++++++ 5 files changed, 182 insertions(+), 4 deletions(-) diff --git a/config/crds/core.kubeadmiral.io_clusterpropagationpolicies.yaml b/config/crds/core.kubeadmiral.io_clusterpropagationpolicies.yaml index f3707846..f5bec50c 100644 --- a/config/crds/core.kubeadmiral.io_clusterpropagationpolicies.yaml +++ b/config/crds/core.kubeadmiral.io_clusterpropagationpolicies.yaml @@ -160,7 +160,7 @@ spec: type: object type: array replicaRescheduling: - description: Configures behaviors related to replica rescheduling. Default set via a post-generation patch. See patch file for details. + description: 'Configures behaviors related to replica rescheduling. Default set via a post-generation patch. See patch file for details. Deprecated: Please use reschedulePolicy.disableRescheduling instead. This field will be removed in the next release.' properties: avoidDisruption: default: true @@ -168,6 +168,45 @@ spec: type: boolean type: object default: {} + reschedulePolicy: + default: + rescheduleWhen: + policyContentChanged: true + description: Configures behaviors related to rescheduling. + properties: + disableRescheduling: + description: DisableRescheduling determines if a federated object can be rescheduled. + type: boolean + replicaRescheduling: + description: Configures behaviors related to replica rescheduling. Default set via a post-generation patch. See patch file for details. + properties: + avoidDisruption: + default: true + description: If set to true, the scheduler will attempt to prevent migrating existing replicas during rescheduling. In order to do so, replica scheduling preferences might not be fully respected. If set to false, the scheduler will always rebalance the replicas based on the specified preferences, which might cause temporary service disruption. + type: boolean + type: object + default: {} + rescheduleWhen: + description: When the related objects should be subject to reschedule. + properties: + clusterAPIResourcesChanged: + default: false + description: If set to true, changes to clusters' enabled list of api resources will trigger rescheduling. It set to false, the scheduler will reschedule only when other options are triggered or the replicas or the requested resources of the template changed. + type: boolean + clusterJoined: + default: false + description: If set to true, clusters joining the federation will trigger rescheduling. It set to false, the scheduler will reschedule only when other options are triggered or the replicas or the requested resources of the template changed. + type: boolean + clusterLabelsChanged: + default: false + description: If set to true, changes to cluster labels will trigger rescheduling. It set to false, the scheduler will reschedule only when other options are triggered or the replicas or the requested resources of the template changed. + type: boolean + policyContentChanged: + default: true + description: If set to true, the scheduler will trigger rescheduling when the semantics of the policy changes. For example, modifying placement, schedulingMode, maxClusters, clusterSelector, and other configurations related to scheduling (includes reschedulePolicy itself) will immediately trigger rescheduling. Modifying the labels, annotations, autoMigration configuration will not trigger rescheduling. It set to false, the scheduler will not reschedule when the policy content changes. + type: boolean + type: object + type: object schedulingMode: description: SchedulingMode determines the mode used for scheduling. enum: @@ -178,7 +217,7 @@ spec: description: Profile determines the scheduling profile to be used for scheduling type: string stickyCluster: - description: StickyCluster determines if a federated object can be rescheduled. + description: 'StickyCluster determines if a federated object can be rescheduled. Deprecated: Please use reschedulePolicy.disableRescheduling instead. This field will be removed in the next release.' type: boolean tolerations: description: Tolerations describe a set of cluster taints that the policy tolerates. diff --git a/config/crds/core.kubeadmiral.io_propagationpolicies.yaml b/config/crds/core.kubeadmiral.io_propagationpolicies.yaml index 424b87d8..d963908d 100644 --- a/config/crds/core.kubeadmiral.io_propagationpolicies.yaml +++ b/config/crds/core.kubeadmiral.io_propagationpolicies.yaml @@ -160,7 +160,7 @@ spec: type: object type: array replicaRescheduling: - description: Configures behaviors related to replica rescheduling. Default set via a post-generation patch. See patch file for details. + description: 'Configures behaviors related to replica rescheduling. Default set via a post-generation patch. See patch file for details. Deprecated: Please use reschedulePolicy.disableRescheduling instead. This field will be removed in the next release.' properties: avoidDisruption: default: true @@ -168,6 +168,45 @@ spec: type: boolean type: object default: {} + reschedulePolicy: + default: + rescheduleWhen: + policyContentChanged: true + description: Configures behaviors related to rescheduling. + properties: + disableRescheduling: + description: DisableRescheduling determines if a federated object can be rescheduled. + type: boolean + replicaRescheduling: + description: Configures behaviors related to replica rescheduling. Default set via a post-generation patch. See patch file for details. + properties: + avoidDisruption: + default: true + description: If set to true, the scheduler will attempt to prevent migrating existing replicas during rescheduling. In order to do so, replica scheduling preferences might not be fully respected. If set to false, the scheduler will always rebalance the replicas based on the specified preferences, which might cause temporary service disruption. + type: boolean + type: object + default: {} + rescheduleWhen: + description: When the related objects should be subject to reschedule. + properties: + clusterAPIResourcesChanged: + default: false + description: If set to true, changes to clusters' enabled list of api resources will trigger rescheduling. It set to false, the scheduler will reschedule only when other options are triggered or the replicas or the requested resources of the template changed. + type: boolean + clusterJoined: + default: false + description: If set to true, clusters joining the federation will trigger rescheduling. It set to false, the scheduler will reschedule only when other options are triggered or the replicas or the requested resources of the template changed. + type: boolean + clusterLabelsChanged: + default: false + description: If set to true, changes to cluster labels will trigger rescheduling. It set to false, the scheduler will reschedule only when other options are triggered or the replicas or the requested resources of the template changed. + type: boolean + policyContentChanged: + default: true + description: If set to true, the scheduler will trigger rescheduling when the semantics of the policy changes. For example, modifying placement, schedulingMode, maxClusters, clusterSelector, and other configurations related to scheduling (includes reschedulePolicy itself) will immediately trigger rescheduling. Modifying the labels, annotations, autoMigration configuration will not trigger rescheduling. It set to false, the scheduler will not reschedule when the policy content changes. + type: boolean + type: object + type: object schedulingMode: description: SchedulingMode determines the mode used for scheduling. enum: @@ -178,7 +217,7 @@ spec: description: Profile determines the scheduling profile to be used for scheduling type: string stickyCluster: - description: StickyCluster determines if a federated object can be rescheduled. + description: 'StickyCluster determines if a federated object can be rescheduled. Deprecated: Please use reschedulePolicy.disableRescheduling instead. This field will be removed in the next release.' type: boolean tolerations: description: Tolerations describe a set of cluster taints that the policy tolerates. diff --git a/config/crds/patches/generic_propagationpolicies.src.sh b/config/crds/patches/generic_propagationpolicies.src.sh index 7b3690d0..012d5b30 100644 --- a/config/crds/patches/generic_propagationpolicies.src.sh +++ b/config/crds/patches/generic_propagationpolicies.src.sh @@ -1,6 +1,7 @@ # controller-gen does not respect {} as default value for a struct field # issue: https://github.com/kubernetes-sigs/controller-tools/issues/622 yq eval -i '.spec.versions[0].schema.openAPIV3Schema.properties.spec.properties.replicaRescheduling.default = {}' "$1" +yq eval -i '.spec.versions[0].schema.openAPIV3Schema.properties.spec.properties.reschedulePolicy.properties.replicaRescheduling.default = {}' "$1" # policies are always referenced from labels, the value of which has limited length yq eval -i '.spec.versions[].schema.openAPIV3Schema.properties.metadata |= diff --git a/pkg/apis/core/v1alpha1/types_propagationpolicy.go b/pkg/apis/core/v1alpha1/types_propagationpolicy.go index 5773cb86..c2e9addf 100644 --- a/pkg/apis/core/v1alpha1/types_propagationpolicy.go +++ b/pkg/apis/core/v1alpha1/types_propagationpolicy.go @@ -68,6 +68,7 @@ type PropagationPolicySpec struct { SchedulingMode SchedulingMode `json:"schedulingMode"` // StickyCluster determines if a federated object can be rescheduled. // +optional + // Deprecated: Please use reschedulePolicy.disableRescheduling instead. This field will be removed in the next release. StickyCluster bool `json:"stickyCluster"` // ClusterSelector is a label query over clusters to consider for scheduling. @@ -106,7 +107,13 @@ type PropagationPolicySpec struct { // +optional // Default set via a post-generation patch. // See patch file for details. + // Deprecated: Please use reschedulePolicy.disableRescheduling instead. This field will be removed in the next release. ReplicaRescheduling *ReplicaRescheduling `json:"replicaRescheduling,omitempty"` + + // Configures behaviors related to rescheduling. + // +optional + // +kubebuilder:default:={rescheduleWhen:{policyContentChanged:true}} + ReschedulePolicy *ReschedulePolicy `json:"reschedulePolicy,omitempty"` } type PropagationPolicyStatus struct { @@ -187,3 +194,48 @@ type ReplicaRescheduling struct { // +kubebuilder:default:=true AvoidDisruption bool `json:"avoidDisruption"` } + +// ReschedulePolicy describes the rescheduling policy. +type ReschedulePolicy struct { + // DisableRescheduling determines if a federated object can be rescheduled. + // +optional + DisableRescheduling bool `json:"disableRescheduling,omitempty"` + // When the related objects should be subject to reschedule. + // +optional + Trigger *RescheduleTrigger `json:"rescheduleWhen,omitempty"` + // Configures behaviors related to replica rescheduling. + // +optional + // Default set via a post-generation patch. + // See patch file for details. + ReplicaRescheduling *ReplicaRescheduling `json:"replicaRescheduling,omitempty"` +} + +// RescheduleTrigger configures the criteria for triggering rescheduling. +type RescheduleTrigger struct { + // If set to true, the scheduler will trigger rescheduling when the semantics of the policy changes. For example, + // modifying placement, schedulingMode, maxClusters, clusterSelector, and other configurations related to + // scheduling (includes reschedulePolicy itself) will immediately trigger rescheduling. Modifying the labels, + // annotations, autoMigration configuration will not trigger rescheduling. + // It set to false, the scheduler will not reschedule when the policy content changes. + // +optional + // +kubebuilder:default:=true + PolicyContentChanged bool `json:"policyContentChanged"` + // If set to true, clusters joining the federation will trigger rescheduling. + // It set to false, the scheduler will reschedule only when other options are triggered or the replicas or the + // requested resources of the template changed. + // +optional + // +kubebuilder:default:=false + ClusterJoined bool `json:"clusterJoined"` + // If set to true, changes to cluster labels will trigger rescheduling. + // It set to false, the scheduler will reschedule only when other options are triggered or the replicas or the + // requested resources of the template changed. + // +optional + // +kubebuilder:default:=false + ClusterLabelsChanged bool `json:"clusterLabelsChanged"` + // If set to true, changes to clusters' enabled list of api resources will trigger rescheduling. + // It set to false, the scheduler will reschedule only when other options are triggered or the replicas or the + // requested resources of the template changed. + // +optional + // +kubebuilder:default:=false + ClusterAPIResourcesChanged bool `json:"clusterAPIResourcesChanged"` +} diff --git a/pkg/apis/core/v1alpha1/zz_generated.deepcopy.go b/pkg/apis/core/v1alpha1/zz_generated.deepcopy.go index d14044b6..9bc9762b 100644 --- a/pkg/apis/core/v1alpha1/zz_generated.deepcopy.go +++ b/pkg/apis/core/v1alpha1/zz_generated.deepcopy.go @@ -1598,6 +1598,11 @@ func (in *PropagationPolicySpec) DeepCopyInto(out *PropagationPolicySpec) { *out = new(ReplicaRescheduling) **out = **in } + if in.ReschedulePolicy != nil { + in, out := &in.ReschedulePolicy, &out.ReschedulePolicy + *out = new(ReschedulePolicy) + (*in).DeepCopyInto(*out) + } return } @@ -1660,6 +1665,48 @@ func (in *ReplicaRescheduling) DeepCopy() *ReplicaRescheduling { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ReschedulePolicy) DeepCopyInto(out *ReschedulePolicy) { + *out = *in + if in.Trigger != nil { + in, out := &in.Trigger, &out.Trigger + *out = new(RescheduleTrigger) + **out = **in + } + if in.ReplicaRescheduling != nil { + in, out := &in.ReplicaRescheduling, &out.ReplicaRescheduling + *out = new(ReplicaRescheduling) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReschedulePolicy. +func (in *ReschedulePolicy) DeepCopy() *ReschedulePolicy { + if in == nil { + return nil + } + out := new(ReschedulePolicy) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RescheduleTrigger) DeepCopyInto(out *RescheduleTrigger) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RescheduleTrigger. +func (in *RescheduleTrigger) DeepCopy() *RescheduleTrigger { + if in == nil { + return nil + } + out := new(RescheduleTrigger) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Resources) DeepCopyInto(out *Resources) { *out = *in From 145cae9a2f21d539806497c2bbc26d68989e21ba Mon Sep 17 00:00:00 2001 From: "zhangxinjie.next" Date: Mon, 31 Jul 2023 11:10:30 +0800 Subject: [PATCH 173/173] feat: implement scheduling triggers --- pkg/controllers/federatedcluster/util.go | 46 +- pkg/controllers/follower/controller.go | 12 +- pkg/controllers/follower/util.go | 31 +- pkg/controllers/scheduler/constants.go | 3 +- pkg/controllers/scheduler/scheduler.go | 11 +- pkg/controllers/scheduler/scheduler_test.go | 14 +- .../scheduler/schedulingtriggers.go | 301 ++++++-- .../scheduler/schedulingtriggers_test.go | 720 ++++++++++++++++++ pkg/controllers/scheduler/schedulingunit.go | 14 +- pkg/util/pod/pod.go | 78 ++ pkg/util/resource/resource.go | 62 ++ .../framework/policies/propagationpolicy.go | 1 - 12 files changed, 1146 insertions(+), 147 deletions(-) create mode 100644 pkg/controllers/scheduler/schedulingtriggers_test.go create mode 100644 pkg/util/pod/pod.go create mode 100644 pkg/util/resource/resource.go diff --git a/pkg/controllers/federatedcluster/util.go b/pkg/controllers/federatedcluster/util.go index e21ab7ae..e751c844 100644 --- a/pkg/controllers/federatedcluster/util.go +++ b/pkg/controllers/federatedcluster/util.go @@ -21,6 +21,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" fedcorev1a1 "github.com/kubewharf/kubeadmiral/pkg/apis/core/v1alpha1" + "github.com/kubewharf/kubeadmiral/pkg/util/resource" ) func getClusterCondition( @@ -131,47 +132,6 @@ func isNodeSchedulable(node *corev1.Node) bool { return true } -func addResources(src, dest corev1.ResourceList) { - for k, v := range src { - if prevVal, ok := dest[k]; ok { - prevVal.Add(v) - dest[k] = prevVal - } else { - dest[k] = v.DeepCopy() - } - } -} - -// maxResources sets dst to the greater of dst/src for every resource in src -func maxResources(src, dst corev1.ResourceList) { - for name, srcQuantity := range src { - if dstQuantity, ok := dst[name]; !ok || srcQuantity.Cmp(dstQuantity) > 0 { - dst[name] = srcQuantity.DeepCopy() - } - } -} - -// podResourceRequest = max(sum(podSpec.Containers), podSpec.InitContainers...) + overHead -func getPodResourceRequests(pod *corev1.Pod) corev1.ResourceList { - reqs := make(corev1.ResourceList) - - for _, container := range pod.Spec.Containers { - addResources(container.Resources.Requests, reqs) - } - - for _, container := range pod.Spec.InitContainers { - maxResources(container.Resources.Requests, reqs) - } - - // if PodOverhead feature is supported, add overhead for running a pod - // to the sum of requests and to non-zero limits: - if pod.Spec.Overhead != nil { - addResources(pod.Spec.Overhead, reqs) - } - - return reqs -} - // aggregateResources returns // - allocatable resources from the nodes and, // - available resources after considering allocations to the given pods. @@ -185,7 +145,7 @@ func aggregateResources( continue } - addResources(node.Status.Allocatable, allocatable) + resource.AddResources(node.Status.Allocatable, allocatable) } // Don't consider pod resource for now @@ -201,7 +161,7 @@ func aggregateResources( continue } - podRequests := getPodResourceRequests(pod) + podRequests := resource.GetPodResourceRequests(&pod.Spec) for name, requestedQuantity := range podRequests { if availableQuantity, ok := available[name]; ok { availableQuantity.Sub(requestedQuantity) diff --git a/pkg/controllers/follower/controller.go b/pkg/controllers/follower/controller.go index cc79a486..ab39d28f 100644 --- a/pkg/controllers/follower/controller.go +++ b/pkg/controllers/follower/controller.go @@ -22,8 +22,6 @@ import ( "sync" "time" - appsv1 "k8s.io/api/apps/v1" - batchv1 "k8s.io/api/batch/v1" corev1 "k8s.io/api/core/v1" networkingv1 "k8s.io/api/networking/v1" "k8s.io/apimachinery/pkg/api/equality" @@ -48,6 +46,7 @@ import ( "github.com/kubewharf/kubeadmiral/pkg/util/informermanager" "github.com/kubewharf/kubeadmiral/pkg/util/logging" "github.com/kubewharf/kubeadmiral/pkg/util/naming" + podutil "github.com/kubewharf/kubeadmiral/pkg/util/pod" "github.com/kubewharf/kubeadmiral/pkg/util/worker" ) @@ -66,14 +65,7 @@ var ( // TODO: think about whether PodTemplatePath/PodSpecPath should be specified in the FTC instead. // Specifying in the FTC allows changing the path according to the api version. // Other controllers should consider using the specified paths instead of hardcoded paths. - leaderPodTemplatePaths = map[schema.GroupKind]string{ - {Group: appsv1.GroupName, Kind: common.DeploymentKind}: "spec.template", - {Group: appsv1.GroupName, Kind: common.StatefulSetKind}: "spec.template", - {Group: appsv1.GroupName, Kind: common.DaemonSetKind}: "spec.template", - {Group: batchv1.GroupName, Kind: common.JobKind}: "spec.template", - {Group: batchv1.GroupName, Kind: common.CronJobKind}: "spec.jobTemplate.spec.template", - {Group: "", Kind: common.PodKind}: "spec", - } + leaderPodTemplatePaths = podutil.PodTemplatePaths supportedFollowerTypes = sets.New( schema.GroupKind{Group: "", Kind: common.ConfigMapKind}, diff --git a/pkg/controllers/follower/util.go b/pkg/controllers/follower/util.go index c5977419..bb434ec7 100644 --- a/pkg/controllers/follower/util.go +++ b/pkg/controllers/follower/util.go @@ -19,17 +19,15 @@ package follower import ( "encoding/json" "fmt" - "strings" corev1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" - "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/util/sets" fedcorev1a1 "github.com/kubewharf/kubeadmiral/pkg/apis/core/v1alpha1" "github.com/kubewharf/kubeadmiral/pkg/controllers/common" podutil "github.com/kubewharf/kubeadmiral/pkg/lifted/kubernetes/pkg/api/v1/pod" + admiralpodutil "github.com/kubewharf/kubeadmiral/pkg/util/pod" ) type objectGroupKindKey struct { @@ -89,7 +87,7 @@ func getFollowersFromPodTemplate( fedObject fedcorev1a1.GenericFederatedObject, podTemplatePath string, ) (sets.Set[FollowerReference], error) { - podSpec, err := getPodSpec(fedObject, podTemplatePath) + podSpec, err := admiralpodutil.GetPodSpec(fedObject, podTemplatePath) if err != nil { return nil, err } @@ -145,28 +143,3 @@ func getFollowersFromPod( return followers } - -func getPodSpec(fedObject fedcorev1a1.GenericFederatedObject, podTemplatePath string) (*corev1.PodSpec, error) { - if fedObject == nil { - return nil, fmt.Errorf("fedObject is nil") - } - fedObjectPodTemplatePath := strings.Split(podTemplatePath, ".") - templateObj := &unstructured.Unstructured{} - err := templateObj.UnmarshalJSON(fedObject.GetSpec().Template.Raw) - if err != nil { - return nil, err - } - - podTemplateMap, found, err := unstructured.NestedMap(templateObj.Object, fedObjectPodTemplatePath...) - if err != nil { - return nil, err - } - if !found { - return nil, fmt.Errorf("pod template does not exist at path %q", podTemplatePath) - } - podTemplate := &corev1.PodTemplateSpec{} - if err := runtime.DefaultUnstructuredConverter.FromUnstructured(podTemplateMap, podTemplate); err != nil { - return nil, err - } - return &podTemplate.Spec, nil -} diff --git a/pkg/controllers/scheduler/constants.go b/pkg/controllers/scheduler/constants.go index 460ede03..0a1e85d5 100644 --- a/pkg/controllers/scheduler/constants.go +++ b/pkg/controllers/scheduler/constants.go @@ -46,5 +46,6 @@ const ( EventReasonWebhookConfigurationError = "WebhookConfigurationError" EventReasonWebhookRegistered = "WebhookRegistered" - SchedulingTriggerHashAnnotation = common.DefaultPrefix + "scheduling-trigger-hash" + SchedulingTriggersAnnotation = common.DefaultPrefix + "scheduling-triggers" + SchedulingDeferredReasonsAnnotation = common.DefaultPrefix + "scheduling-deferred-reasons" ) diff --git a/pkg/controllers/scheduler/scheduler.go b/pkg/controllers/scheduler/scheduler.go index 90d79710..eefae0a2 100644 --- a/pkg/controllers/scheduler/scheduler.go +++ b/pkg/controllers/scheduler/scheduler.go @@ -45,7 +45,6 @@ import ( "github.com/kubewharf/kubeadmiral/pkg/controllers/common" "github.com/kubewharf/kubeadmiral/pkg/controllers/scheduler/core" "github.com/kubewharf/kubeadmiral/pkg/stats" - "github.com/kubewharf/kubeadmiral/pkg/util/annotation" clusterutil "github.com/kubewharf/kubeadmiral/pkg/util/cluster" "github.com/kubewharf/kubeadmiral/pkg/util/eventhandlers" "github.com/kubewharf/kubeadmiral/pkg/util/eventsink" @@ -415,15 +414,15 @@ func (s *Scheduler) prepareToSchedule( } } - triggerHash, err := s.computeSchedulingTriggerHash(ftc, fedObject, policy, clusters) + trigger, err := computeSchedulingTrigger(ftc, fedObject, policy, clusters) if err != nil { - logger.Error(err, "Failed to compute scheduling trigger hash") + logger.Error(err, "Failed to compute scheduling triggers") return nil, nil, nil, &worker.StatusError } - triggersChanged, err := annotation.AddAnnotation(fedObject, SchedulingTriggerHashAnnotation, triggerHash) + triggersChanged, annotationChanged, err := trigger.updateAnnotationsIfTriggersChanged(fedObject, policy) if err != nil { - logger.Error(err, "Failed to update scheduling trigger hash") + logger.Error(err, "Failed to update scheduling triggers") return nil, nil, nil, &worker.StatusError } @@ -448,7 +447,7 @@ func (s *Scheduler) prepareToSchedule( if updated, err := s.updatePendingControllers(ftc, fedObject, false); err != nil { logger.Error(err, "Failed to update pending controllers") return nil, nil, nil, &worker.StatusError - } else if updated { + } else if updated || annotationChanged { if _, err := fedobjectadapters.Update( ctx, s.fedClient.CoreV1alpha1(), diff --git a/pkg/controllers/scheduler/scheduler_test.go b/pkg/controllers/scheduler/scheduler_test.go index e6e2c06f..08489394 100644 --- a/pkg/controllers/scheduler/scheduler_test.go +++ b/pkg/controllers/scheduler/scheduler_test.go @@ -108,8 +108,10 @@ func TestGetSchedulingUnit(t *testing.T) { AutoMigration: &fedcorev1a1.AutoMigration{ KeepUnschedulableReplicas: false, }, - ReplicaRescheduling: &fedcorev1a1.ReplicaRescheduling{ - AvoidDisruption: false, + ReschedulePolicy: &fedcorev1a1.ReschedulePolicy{ + ReplicaRescheduling: &fedcorev1a1.ReplicaRescheduling{ + AvoidDisruption: false, + }, }, }, } @@ -194,10 +196,10 @@ func TestGetSchedulingUnitWithAnnotationOverrides(t *testing.T) { name: "sticky cluster override", policy: &fedcorev1a1.PropagationPolicy{ Spec: fedcorev1a1.PropagationPolicySpec{ - StickyCluster: true, ClusterSelector: map[string]string{ "label": "value1", }, + ReschedulePolicy: &fedcorev1a1.ReschedulePolicy{DisableRescheduling: true}, }, }, annotations: map[string]string{ @@ -215,10 +217,10 @@ func TestGetSchedulingUnitWithAnnotationOverrides(t *testing.T) { name: "Cluster selector override", policy: &fedcorev1a1.PropagationPolicy{ Spec: fedcorev1a1.PropagationPolicySpec{ - StickyCluster: true, ClusterSelector: map[string]string{ "label": "value1", }, + ReschedulePolicy: &fedcorev1a1.ReschedulePolicy{DisableRescheduling: true}, }, }, annotations: map[string]string{ @@ -237,10 +239,10 @@ func TestGetSchedulingUnitWithAnnotationOverrides(t *testing.T) { policy: &fedcorev1a1.PropagationPolicy{ Spec: fedcorev1a1.PropagationPolicySpec{ SchedulingMode: fedcorev1a1.SchedulingModeDuplicate, - StickyCluster: true, ClusterSelector: map[string]string{ "label": "value1", }, + ReschedulePolicy: &fedcorev1a1.ReschedulePolicy{DisableRescheduling: true}, }, }, annotations: map[string]string{ @@ -291,7 +293,6 @@ func TestGetSchedulingUnitWithAnnotationOverrides(t *testing.T) { name: "Tolerations override", policy: &fedcorev1a1.PropagationPolicy{ Spec: fedcorev1a1.PropagationPolicySpec{ - StickyCluster: true, ClusterSelector: map[string]string{ "label": "value1", }, @@ -302,6 +303,7 @@ func TestGetSchedulingUnitWithAnnotationOverrides(t *testing.T) { Effect: corev1.TaintEffectNoExecute, }, }, + ReschedulePolicy: &fedcorev1a1.ReschedulePolicy{DisableRescheduling: true}, }, }, annotations: map[string]string{ diff --git a/pkg/controllers/scheduler/schedulingtriggers.go b/pkg/controllers/scheduler/schedulingtriggers.go index db00812b..0c376271 100644 --- a/pkg/controllers/scheduler/schedulingtriggers.go +++ b/pkg/controllers/scheduler/schedulingtriggers.go @@ -22,7 +22,9 @@ import ( "hash/fnv" "sort" "strconv" + "strings" + "github.com/pkg/errors" "golang.org/x/exp/constraints" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/util/sets" @@ -30,7 +32,10 @@ import ( fedcorev1a1 "github.com/kubewharf/kubeadmiral/pkg/apis/core/v1alpha1" "github.com/kubewharf/kubeadmiral/pkg/controllers/common" "github.com/kubewharf/kubeadmiral/pkg/controllers/scheduler/framework" - unstructuredutil "github.com/kubewharf/kubeadmiral/pkg/util/unstructured" + "github.com/kubewharf/kubeadmiral/pkg/util/annotation" + podutil "github.com/kubewharf/kubeadmiral/pkg/util/pod" + resourceutil "github.com/kubewharf/kubeadmiral/pkg/util/resource" + utilunstructured "github.com/kubewharf/kubeadmiral/pkg/util/unstructured" ) /* @@ -49,7 +54,7 @@ Federated object changes: Propagation policy changes: 1. policy creation -2. generation change (spec update) +2. semantics of policy change (see PropagationPolicySpec.ReschedulePolicy.Trigger.PolicyContentChanged for more detail) Cluster changes: 1. cluster creation @@ -82,66 +87,222 @@ func sortMap[K constraints.Ordered, V any](m map[K]V) []keyValue[K, V] { type schedulingTriggers struct { // NOTE: Use slices instead of maps for deterministic iteration order - SchedulingAnnotations []keyValue[string, string] `json:"schedulingAnnotations"` - ReplicaCount int64 `json:"replicaCount"` - ResourceRequest framework.Resource `json:"resourceRequest"` + SchedulingAnnotationsHash string `json:"schedulingAnnotationsHash"` + ReplicaCount int64 `json:"replicaCount"` + ResourceRequest framework.Resource `json:"resourceRequest"` AutoMigrationInfo *string `json:"autoMigrationInfo,omitempty"` - PolicyName string `json:"policyName"` - PolicyGeneration int64 `json:"policyGeneration"` + PolicyName string `json:"policyName"` + PolicySchedulingContentHash string `json:"policyContentHash"` + clusters sets.Set[string] // a map from each cluster to its labels - ClusterLabels []keyValue[string, []keyValue[string, string]] `json:"clusterLabels"` + ClusterLabelsHashes []keyValue[string, string] `json:"clusterLabelsHashes"` // a map from each cluster to its taints - ClusterTaints []keyValue[string, []corev1.Taint] `json:"clusterTaints"` + ClusterTaintsHashes []keyValue[string, string] `json:"clusterTaintsHashes"` // a map from each cluster to its apiresources - ClusterAPIResourceTypes []keyValue[string, []fedcorev1a1.APIResource] `json:"clusterAPIResourceTypes"` + ClusterAPIResourceTypesHashes []keyValue[string, string] `json:"clusterAPIResourceTypesHashes"` } -func (s *Scheduler) computeSchedulingTriggerHash( +func (t *schedulingTriggers) JsonMarshal() (string, error) { + triggerBytes, err := json.Marshal(t) + if err != nil { + return "", fmt.Errorf("failed to marshal scheduling trigger: %w", err) + } + return string(triggerBytes), nil +} + +func (t *schedulingTriggers) JsonUnmarshal(v []byte) error { + if t == nil { + return fmt.Errorf("nil receiver") + } + trigger := &schedulingTriggers{} + err := json.Unmarshal(v, trigger) + if err != nil { + return fmt.Errorf("failed to unmarshal scheduling trigger: %w", err) + } + clusters := sets.Set[string]{} + for _, v := range trigger.ClusterLabelsHashes { + clusters.Insert(v.Key) + } + for _, v := range trigger.ClusterTaintsHashes { + clusters.Insert(v.Key) + } + for _, v := range trigger.ClusterAPIResourceTypesHashes { + clusters.Insert(v.Key) + } + trigger.clusters = clusters + + *t = *trigger + return nil +} + +// If the member cluster is removed, we regard as a trigger of the label. +// But if a cluster joins, we think there is no trigger for the label. +func isClusterTriggerChanged(newClusters, oldClusters []keyValue[string, string]) bool { + newLen, oldLen := len(newClusters), len(oldClusters) + if newLen == 0 { + return oldLen != 0 + } + + for i, j := 0, 0; i < newLen && j < oldLen; { + if newClusters[i].Key != oldClusters[j].Key { + i++ + if newLen-i < oldLen-j { + return true + } + continue + } + if newClusters[i].Value != oldClusters[j].Value { + return true + } + i++ + j++ + } + return false +} + +func (t *schedulingTriggers) updateAnnotationsIfTriggersChanged( + fedObject fedcorev1a1.GenericFederatedObject, + policy fedcorev1a1.GenericPropagationPolicy, +) (triggersChanged, annotationChanged bool, err error) { + triggerText, err := t.JsonMarshal() + if err != nil { + return false, false, err + } + + defer func() { + if triggersChanged { + annotationChanged, err = annotation.AddAnnotation(fedObject, SchedulingTriggersAnnotation, triggerText) + if err != nil { + return + } + _, err = annotation.RemoveAnnotation(fedObject, SchedulingDeferredReasonsAnnotation) + } + }() + + anno := fedObject.GetAnnotations() + if anno == nil { + return true, false, nil + } + + if old, ok := anno[SchedulingTriggersAnnotation]; !ok || old == "" { + return true, false, nil + } else if old == triggerText { + return false, false, nil + } else { + oldTrigger := &schedulingTriggers{} + if err = oldTrigger.JsonUnmarshal([]byte(old)); err != nil { + return false, false, err + } + if t.PolicyName != oldTrigger.PolicyName { + return true, false, nil + } + + reschedulePolicy := policy.GetSpec().ReschedulePolicy + if getIsStickyClusterFromPolicy(policy) || reschedulePolicy.Trigger == nil { + return false, false, nil + } + + policyTrigger := reschedulePolicy.Trigger + var deferredReasons []string + if t.PolicySchedulingContentHash != oldTrigger.PolicySchedulingContentHash { + if !policyTrigger.PolicyContentChanged { + deferredReasons = append(deferredReasons, "policyContentChanged: false") + } else { + triggersChanged = true + } + } + + if isClusterTriggerChanged(t.ClusterLabelsHashes, oldTrigger.ClusterLabelsHashes) { + if !policyTrigger.ClusterLabelsChanged { + deferredReasons = append(deferredReasons, "clusterLabelsChanged: false") + } else { + triggersChanged = true + } + } + + if isClusterTriggerChanged(t.ClusterTaintsHashes, oldTrigger.ClusterTaintsHashes) { + triggersChanged = true + } + + if isClusterTriggerChanged(t.ClusterAPIResourceTypesHashes, oldTrigger.ClusterAPIResourceTypesHashes) { + if !policyTrigger.ClusterAPIResourcesChanged { + deferredReasons = append(deferredReasons, "clusterAPIResourcesChanged: false") + } else { + triggersChanged = true + } + } + + if t.clusters.IsSuperset(oldTrigger.clusters) && len(t.clusters) != len(oldTrigger.clusters) { + if !policyTrigger.ClusterJoined { + deferredReasons = append(deferredReasons, "clusterJoined: false") + } else { + triggersChanged = true + } + } + + if triggersChanged { + return true, false, nil + } + annotationChanged, err = annotation.AddAnnotation(fedObject, SchedulingDeferredReasonsAnnotation, strings.Join(deferredReasons, ";")) + return false, annotationChanged, err + } +} + +func computeSchedulingTrigger( ftc *fedcorev1a1.FederatedTypeConfig, fedObject fedcorev1a1.GenericFederatedObject, policy fedcorev1a1.GenericPropagationPolicy, clusters []*fedcorev1a1.FederatedCluster, -) (string, error) { - trigger := &schedulingTriggers{} +) (*schedulingTriggers, error) { + trigger := &schedulingTriggers{clusters: sets.New[string]()} var err error - trigger.SchedulingAnnotations = getSchedulingAnnotations(fedObject) + trigger.SchedulingAnnotationsHash, err = getSchedulingAnnotationsHash(fedObject) + if err != nil { + return nil, fmt.Errorf("failed to get scheduling annotations: %w", err) + } if trigger.ReplicaCount, err = getReplicaCount(ftc, fedObject); err != nil { - return "", fmt.Errorf("failed to get object replica count: %w", err) + return nil, fmt.Errorf("failed to get object replica count: %w", err) + } + if trigger.ResourceRequest, err = getResourceRequest(ftc, fedObject); err != nil { + return nil, fmt.Errorf("failed to get object resource request: %w", err) } - trigger.ResourceRequest = getResourceRequest(fedObject) if policy != nil { trigger.PolicyName = policy.GetName() - trigger.PolicyGeneration = policy.GetGeneration() if policy.GetSpec().AutoMigration != nil { // Only consider auto-migration annotation when auto-migration is enabled in the policy. if value, exists := fedObject.GetAnnotations()[common.AutoMigrationInfoAnnotation]; exists { trigger.AutoMigrationInfo = &value } } + if trigger.PolicySchedulingContentHash, err = getPolicySchedulingContentHash(policy.GetSpec()); err != nil { + return nil, fmt.Errorf("failed to get scheduling content of policy %s: %w", policy.GetName(), err) + } } - trigger.ClusterLabels = getClusterLabels(clusters) - trigger.ClusterTaints = getClusterTaints(clusters) - trigger.ClusterAPIResourceTypes = getClusterAPIResourceTypes(clusters) + for _, cluster := range clusters { + trigger.clusters.Insert(cluster.Name) + } - triggerBytes, err := json.Marshal(trigger) + trigger.ClusterLabelsHashes, err = getClusterLabelsHashes(clusters) if err != nil { - return "", fmt.Errorf("failed to compute scheduling trigger hash: %w", err) + return nil, fmt.Errorf("failed to get cluster labels hashes: %w", err) } - - hash := fnv.New32() - if _, err = hash.Write(triggerBytes); err != nil { - return "", fmt.Errorf("failed to compute scheduling trigger hash: %w", err) + trigger.ClusterTaintsHashes, err = getClusterTaintsHashes(clusters) + if err != nil { + return nil, fmt.Errorf("failed to get cluster taints hashes: %w", err) + } + trigger.ClusterAPIResourceTypesHashes, err = getClusterAPIResourceTypesHashes(clusters) + if err != nil { + return nil, fmt.Errorf("failed to get cluster API resource types hashes: %w", err) } - triggerHash := strconv.FormatInt(int64(hash.Sum32()), 10) - return triggerHash, nil + return trigger, nil } var knownSchedulingAnnotations = sets.New( @@ -155,14 +316,29 @@ var knownSchedulingAnnotations = sets.New( FollowsObjectAnnotation, ) -func getSchedulingAnnotations(fedObject fedcorev1a1.GenericFederatedObject) []keyValue[string, string] { +func getSchedulingAnnotationsHash(fedObject fedcorev1a1.GenericFederatedObject) (string, error) { result := map[string]string{} for k, v := range fedObject.GetAnnotations() { if knownSchedulingAnnotations.Has(k) { result[k] = v } } - return sortMap(result) + return hashResult(sortMap(result)) +} + +func hashResult(v any) (string, error) { + hashBytes, err := json.Marshal(v) + if err != nil { + return "", fmt.Errorf("failed to compute scheduling trigger hash: %w", err) + } + + hash := fnv.New32() + if _, err = hash.Write(hashBytes); err != nil { + return "", fmt.Errorf("failed to compute scheduling trigger hash: %w", err) + } + result := strconv.FormatInt(int64(hash.Sum32()), 10) + + return result, nil } func getReplicaCount( @@ -178,7 +354,7 @@ func getReplicaCount( return 0, err } - value, err := unstructuredutil.GetInt64FromPath(template, ftc.Spec.PathDefinition.ReplicasSpec, nil) + value, err := utilunstructured.GetInt64FromPath(template, ftc.Spec.PathDefinition.ReplicasSpec, nil) if err != nil || value == nil { return 0, err } @@ -186,21 +362,44 @@ func getReplicaCount( return *value, nil } -func getResourceRequest(fedObject fedcorev1a1.GenericFederatedObject) framework.Resource { - // TODO: update once we have a proper way to obtian resource request from federated objects - return framework.Resource{} +func getResourceRequest( + ftc *fedcorev1a1.FederatedTypeConfig, + fedObject fedcorev1a1.GenericFederatedObject, +) (framework.Resource, error) { + gvk := ftc.GetSourceTypeGVK() + podSpec, err := podutil.GetResourcePodSpec(fedObject, gvk) + if err != nil { + if errors.Is(err, podutil.ErrUnknownTypeToGetPodTemplate) { + // TODO: update once we have a proper way to obtian resource request from federated objects + return framework.Resource{}, nil + } + return framework.Resource{}, err + } + resource := resourceutil.GetPodResourceRequests(podSpec) + return *framework.NewResource(resource), nil } -func getClusterLabels(clusters []*fedcorev1a1.FederatedCluster) []keyValue[string, []keyValue[string, string]] { - ret := make(map[string][]keyValue[string, string], len(clusters)) +func getPolicySchedulingContentHash(policySpec *fedcorev1a1.PropagationPolicySpec) (string, error) { + policySpec = policySpec.DeepCopy() + policySpec.DisableFollowerScheduling = false + policySpec.AutoMigration = nil + return hashResult(policySpec) +} + +func getClusterLabelsHashes(clusters []*fedcorev1a1.FederatedCluster) ([]keyValue[string, string], error) { + ret := make(map[string]string, len(clusters)) for _, cluster := range clusters { - ret[cluster.Name] = sortMap(cluster.GetLabels()) + hash, err := hashResult(sortMap(cluster.GetLabels())) + if err != nil { + return nil, err + } + ret[cluster.Name] = hash } - return sortMap(ret) + return sortMap(ret), nil } -func getClusterTaints(clusters []*fedcorev1a1.FederatedCluster) []keyValue[string, []corev1.Taint] { - ret := make(map[string][]corev1.Taint, len(clusters)) +func getClusterTaintsHashes(clusters []*fedcorev1a1.FederatedCluster) ([]keyValue[string, string], error) { + ret := make(map[string]string, len(clusters)) for _, cluster := range clusters { taints := make([]corev1.Taint, len(cluster.Spec.Taints)) for i, t := range cluster.Spec.Taints { @@ -226,15 +425,17 @@ func getClusterTaints(clusters []*fedcorev1a1.FederatedCluster) []keyValue[strin return false } }) - ret[cluster.Name] = taints + hash, err := hashResult(taints) + if err != nil { + return nil, err + } + ret[cluster.Name] = hash } - return sortMap(ret) + return sortMap(ret), nil } -func getClusterAPIResourceTypes( - clusters []*fedcorev1a1.FederatedCluster, -) []keyValue[string, []fedcorev1a1.APIResource] { - ret := make(map[string][]fedcorev1a1.APIResource) +func getClusterAPIResourceTypesHashes(clusters []*fedcorev1a1.FederatedCluster) ([]keyValue[string, string], error) { + ret := make(map[string]string, len(clusters)) for _, cluster := range clusters { types := make([]fedcorev1a1.APIResource, len(cluster.Status.APIResourceTypes)) @@ -259,7 +460,11 @@ func getClusterAPIResourceTypes( } }) - ret[cluster.Name] = types + hash, err := hashResult(types) + if err != nil { + return nil, err + } + ret[cluster.Name] = hash } - return sortMap(ret) + return sortMap(ret), nil } diff --git a/pkg/controllers/scheduler/schedulingtriggers_test.go b/pkg/controllers/scheduler/schedulingtriggers_test.go new file mode 100644 index 00000000..27ccc108 --- /dev/null +++ b/pkg/controllers/scheduler/schedulingtriggers_test.go @@ -0,0 +1,720 @@ +package scheduler + +import ( + "testing" + + corev1 "k8s.io/api/core/v1" + apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "github.com/kubewharf/kubeadmiral/pkg/apis/core/v1alpha1" + "github.com/kubewharf/kubeadmiral/pkg/util/annotation" +) + +var ( + dpFTC = &v1alpha1.FederatedTypeConfig{ + Spec: v1alpha1.FederatedTypeConfigSpec{ + PathDefinition: v1alpha1.PathDefinition{ + ReplicasSpec: "spec.replicas", + }, + SourceType: v1alpha1.APIResource{ + Group: "apps", + Version: "v1", + Kind: "Deployment", + PluralName: "deployments", + Scope: "Namespaced", + }, + }, + } + fdp = &v1alpha1.FederatedObject{ + Spec: v1alpha1.GenericFederatedObjectSpec{ + Template: apiextensionsv1.JSON{Raw: []byte(`{"apiVersion":"apps/v1","kind":"Deployment","metadata":{"annotations":{"deployment.kubernetes.io/revision":"1"},"labels":{},"name":"nginx-deployment","namespace":"default"},"spec":{"progressDeadlineSeconds":600,"replicas":2,"revisionHistoryLimit":10,"selector":{"matchLabels":{"app":"nginx"}},"strategy":{"rollingUpdate":{"maxSurge":"25%","maxUnavailable":"25%"},"type":"RollingUpdate"},"template":{"metadata":{"creationTimestamp":null,"labels":{"app":"nginx"}},"spec":{"containers":[{"image":"nginx:1.14.2","imagePullPolicy":"IfNotPresent","name":"nginx","ports":[{"containerPort":80,"protocol":"TCP"}],"resources":{},"terminationMessagePath":"/dev/termination-log","terminationMessagePolicy":"File"}],"dnsPolicy":"ClusterFirst","restartPolicy":"Always","schedulerName":"default-scheduler","securityContext":{},"serviceAccount":"wy","serviceAccountName":"wy","terminationGracePeriodSeconds":30}}}}`)}, + }, + } + + saFTC = &v1alpha1.FederatedTypeConfig{ + Spec: v1alpha1.FederatedTypeConfigSpec{ + SourceType: v1alpha1.APIResource{ + Group: "", + Version: "v1", + Kind: "ServiceAccount", + PluralName: "serviceaccounts", + Scope: "Namespaced", + }, + }, + } + fsa = &v1alpha1.FederatedObject{ + Spec: v1alpha1.GenericFederatedObjectSpec{ + Template: apiextensionsv1.JSON{Raw: []byte(`{"apiVersion":"v1","automountServiceAccountToken":false,"kind":"ServiceAccount","metadata":{"annotations":{},"labels":{},"name":"wy","namespace":"default"}}`)}, + }, + } +) + +func Test_isClusterTriggerChanged(t *testing.T) { + tests := []struct { + name string + newClusters []keyValue[string, string] + oldClusters []keyValue[string, string] + want bool + }{ + {"new 0, old 0", nil, nil, false}, + {"new 0, old 1", nil, []keyValue[string, string]{{"", ""}}, true}, + {"new 1, old 0", []keyValue[string, string]{{"", ""}}, nil, false}, + {"new 1, old 1, unchanged", []keyValue[string, string]{{"", ""}}, []keyValue[string, string]{{"", ""}}, false}, + {"new 1, old 1, value changed", []keyValue[string, string]{{"1", "1"}}, []keyValue[string, string]{{"1", ""}}, true}, + {"new 1, old 1, key changed", []keyValue[string, string]{{"2", "1"}}, []keyValue[string, string]{{"1", "1"}}, true}, + {"new 2, old 1, unchanged 1", []keyValue[string, string]{{"1", "1"}, {"2", "2"}}, []keyValue[string, string]{{"1", "1"}}, false}, + {"new 2, old 1, unchanged 2", []keyValue[string, string]{{"1", "1"}, {"2", "2"}}, []keyValue[string, string]{{"2", "2"}}, false}, + {"new 2, old 1, value1 changed", []keyValue[string, string]{{"1", "1"}, {"2", "2"}}, []keyValue[string, string]{{"1", "2"}}, true}, + {"new 2, old 1, value2 changed", []keyValue[string, string]{{"1", "1"}, {"2", "2"}}, []keyValue[string, string]{{"2", "1"}}, true}, + {"new 2, old 1, key changed", []keyValue[string, string]{{"1", "1"}, {"2", "2"}}, []keyValue[string, string]{{"3", "3"}}, true}, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if got := isClusterTriggerChanged(tt.newClusters, tt.oldClusters); got != tt.want { + t.Errorf("isClusterTriggerChanged() = %v, want %v", got, tt.want) + } + }) + } +} + +func Test_schedulingTriggers_updateAnnotationsIfTriggersChanged(t *testing.T) { + type Status struct { + policyName string + reschedulePolicy *v1alpha1.ReschedulePolicy + + clusterNames []string + clusterLabels []map[string]string + clusterTaints [][]corev1.Taint + clusterApiResources [][]v1alpha1.APIResource + } + + tests := []struct { + name string + + withReplicas bool + withOldTrigger bool + oldStatus Status + withOldDeferredReasons bool + oldDeferredReasons string + + newStatus Status + + wantTriggersChanged bool + wantAnnotationChanged bool + wantErr bool + }{ + // TODO: Add test cases. + { + name: "dp, new schedule", + withReplicas: true, + withOldTrigger: false, + oldStatus: Status{}, + withOldDeferredReasons: false, + oldDeferredReasons: "", + newStatus: Status{ + policyName: "pp1", + reschedulePolicy: &v1alpha1.ReschedulePolicy{DisableRescheduling: true}, + clusterNames: []string{"cluster1"}, + clusterLabels: nil, + clusterTaints: nil, + clusterApiResources: nil, + }, + wantTriggersChanged: true, + wantAnnotationChanged: true, + wantErr: false, + }, + { + name: "dp, change pp", + withReplicas: true, + withOldTrigger: true, + oldStatus: Status{ + policyName: "pp0", + reschedulePolicy: &v1alpha1.ReschedulePolicy{DisableRescheduling: true}, + clusterNames: []string{"cluster1"}, + clusterLabels: nil, + clusterTaints: nil, + clusterApiResources: nil, + }, + withOldDeferredReasons: false, + oldDeferredReasons: "", + newStatus: Status{ + policyName: "pp1", + reschedulePolicy: &v1alpha1.ReschedulePolicy{DisableRescheduling: true}, + clusterNames: []string{"cluster1"}, + clusterLabels: nil, + clusterTaints: nil, + clusterApiResources: nil, + }, + wantTriggersChanged: true, + wantAnnotationChanged: true, + wantErr: false, + }, + { + name: "dp, change pp PolicyContentChanged from true to false", + withReplicas: true, + withOldTrigger: true, + oldStatus: Status{ + policyName: "pp0", + reschedulePolicy: &v1alpha1.ReschedulePolicy{Trigger: &v1alpha1.RescheduleTrigger{ + PolicyContentChanged: true, + }}, + clusterNames: []string{"cluster1"}, + clusterLabels: nil, + clusterTaints: nil, + clusterApiResources: nil, + }, + withOldDeferredReasons: false, + oldDeferredReasons: "", + newStatus: Status{ + policyName: "pp0", + reschedulePolicy: &v1alpha1.ReschedulePolicy{Trigger: &v1alpha1.RescheduleTrigger{ + PolicyContentChanged: false, + }}, + clusterNames: []string{"cluster1"}, + clusterLabels: nil, + clusterTaints: nil, + clusterApiResources: nil, + }, + wantTriggersChanged: false, + wantAnnotationChanged: true, + wantErr: false, + }, + { + name: "dp, change pp PolicyContentChanged from true to false, with unchanged deferred reasons", + withReplicas: true, + withOldTrigger: true, + oldStatus: Status{ + policyName: "pp0", + reschedulePolicy: &v1alpha1.ReschedulePolicy{Trigger: &v1alpha1.RescheduleTrigger{ + PolicyContentChanged: true, + }}, + clusterNames: []string{"cluster1"}, + clusterLabels: nil, + clusterTaints: nil, + clusterApiResources: nil, + }, + withOldDeferredReasons: true, + oldDeferredReasons: "policyContentChanged: false", + newStatus: Status{ + policyName: "pp0", + reschedulePolicy: &v1alpha1.ReschedulePolicy{Trigger: &v1alpha1.RescheduleTrigger{ + PolicyContentChanged: false, + }}, + clusterNames: []string{"cluster1"}, + clusterLabels: nil, + clusterTaints: nil, + clusterApiResources: nil, + }, + wantTriggersChanged: false, + wantAnnotationChanged: false, + wantErr: false, + }, + { + name: "dp, change pp PolicyContentChanged from false to true", + withReplicas: true, + withOldTrigger: true, + oldStatus: Status{ + policyName: "pp0", + reschedulePolicy: &v1alpha1.ReschedulePolicy{Trigger: &v1alpha1.RescheduleTrigger{ + PolicyContentChanged: false, + }}, + clusterNames: []string{"cluster1"}, + clusterLabels: nil, + clusterTaints: nil, + clusterApiResources: nil, + }, + withOldDeferredReasons: false, + oldDeferredReasons: "", + newStatus: Status{ + policyName: "pp0", + reschedulePolicy: &v1alpha1.ReschedulePolicy{Trigger: &v1alpha1.RescheduleTrigger{ + PolicyContentChanged: true, + }}, + clusterNames: []string{"cluster1"}, + clusterLabels: nil, + clusterTaints: nil, + clusterApiResources: nil, + }, + wantTriggersChanged: true, + wantAnnotationChanged: true, + wantErr: false, + }, + { + name: "dp, join clusters, with ClusterJoined", + withReplicas: true, + withOldTrigger: true, + oldStatus: Status{ + policyName: "pp0", + reschedulePolicy: &v1alpha1.ReschedulePolicy{Trigger: &v1alpha1.RescheduleTrigger{ + ClusterJoined: true, + }}, + clusterNames: []string{"cluster1"}, + clusterLabels: nil, + clusterTaints: nil, + clusterApiResources: nil, + }, + withOldDeferredReasons: false, + oldDeferredReasons: "", + newStatus: Status{ + policyName: "pp0", + reschedulePolicy: &v1alpha1.ReschedulePolicy{Trigger: &v1alpha1.RescheduleTrigger{ + ClusterJoined: true, + }}, + clusterNames: []string{"cluster1", "cluster2"}, + clusterLabels: nil, + clusterTaints: nil, + clusterApiResources: nil, + }, + wantTriggersChanged: true, + wantAnnotationChanged: true, + wantErr: false, + }, + { + name: "dp, join clusters, without ClusterJoined", + withReplicas: true, + withOldTrigger: true, + oldStatus: Status{ + policyName: "pp0", + reschedulePolicy: &v1alpha1.ReschedulePolicy{Trigger: &v1alpha1.RescheduleTrigger{ + ClusterJoined: false, + }}, + clusterNames: []string{"cluster1"}, + clusterLabels: nil, + clusterTaints: nil, + clusterApiResources: nil, + }, + withOldDeferredReasons: false, + oldDeferredReasons: "", + newStatus: Status{ + policyName: "pp0", + reschedulePolicy: &v1alpha1.ReschedulePolicy{Trigger: &v1alpha1.RescheduleTrigger{ + ClusterJoined: false, + }}, + clusterNames: []string{"cluster1", "cluster2"}, + clusterLabels: nil, + clusterTaints: nil, + clusterApiResources: nil, + }, + wantTriggersChanged: false, + wantAnnotationChanged: true, + wantErr: false, + }, + { + name: "dp, join clusters with unchanged deferred reasons, without ClusterJoined", + withReplicas: true, + withOldTrigger: true, + oldStatus: Status{ + policyName: "pp0", + reschedulePolicy: &v1alpha1.ReschedulePolicy{Trigger: &v1alpha1.RescheduleTrigger{ + ClusterJoined: false, + }}, + clusterNames: []string{"cluster1"}, + clusterLabels: nil, + clusterTaints: nil, + clusterApiResources: nil, + }, + withOldDeferredReasons: true, + oldDeferredReasons: "clusterJoined: false", + newStatus: Status{ + policyName: "pp0", + reschedulePolicy: &v1alpha1.ReschedulePolicy{Trigger: &v1alpha1.RescheduleTrigger{ + ClusterJoined: false, + }}, + clusterNames: []string{"cluster1", "cluster2"}, + clusterLabels: nil, + clusterTaints: nil, + clusterApiResources: nil, + }, + wantTriggersChanged: false, + wantAnnotationChanged: false, + wantErr: false, + }, + { + name: "dp, change cluster labels, with ClusterLabelsChanged", + withReplicas: true, + withOldTrigger: true, + oldStatus: Status{ + policyName: "pp0", + reschedulePolicy: &v1alpha1.ReschedulePolicy{Trigger: &v1alpha1.RescheduleTrigger{ + ClusterLabelsChanged: true, + }}, + clusterNames: []string{"cluster1"}, + clusterLabels: []map[string]string{{"foo": "bar"}}, + clusterTaints: nil, + clusterApiResources: nil, + }, + withOldDeferredReasons: false, + oldDeferredReasons: "", + newStatus: Status{ + policyName: "pp0", + reschedulePolicy: &v1alpha1.ReschedulePolicy{Trigger: &v1alpha1.RescheduleTrigger{ + ClusterLabelsChanged: true, + }}, + clusterNames: []string{"cluster1"}, + clusterLabels: []map[string]string{{"bar": "foo"}}, + clusterTaints: nil, + clusterApiResources: nil, + }, + wantTriggersChanged: true, + wantAnnotationChanged: true, + wantErr: false, + }, + { + name: "dp, change cluster labels, without ClusterLabelsChanged", + withReplicas: true, + withOldTrigger: true, + oldStatus: Status{ + policyName: "pp0", + reschedulePolicy: &v1alpha1.ReschedulePolicy{Trigger: &v1alpha1.RescheduleTrigger{ + ClusterLabelsChanged: false, + }}, + clusterNames: []string{"cluster1"}, + clusterLabels: []map[string]string{{"foo": "bar"}}, + clusterTaints: nil, + clusterApiResources: nil, + }, + withOldDeferredReasons: false, + oldDeferredReasons: "", + newStatus: Status{ + policyName: "pp0", + reschedulePolicy: &v1alpha1.ReschedulePolicy{Trigger: &v1alpha1.RescheduleTrigger{ + ClusterLabelsChanged: false, + }}, + clusterNames: []string{"cluster1"}, + clusterLabels: []map[string]string{{"bar": "foo"}}, + clusterTaints: nil, + clusterApiResources: nil, + }, + wantTriggersChanged: false, + wantAnnotationChanged: true, + wantErr: false, + }, + { + name: "dp, change cluster labels with unchanged deferred reasons, without ClusterLabelsChanged", + withReplicas: true, + withOldTrigger: true, + oldStatus: Status{ + policyName: "pp0", + reschedulePolicy: &v1alpha1.ReschedulePolicy{Trigger: &v1alpha1.RescheduleTrigger{ + ClusterLabelsChanged: false, + }}, + clusterNames: []string{"cluster1"}, + clusterLabels: []map[string]string{{"foo": "bar"}}, + clusterTaints: nil, + clusterApiResources: nil, + }, + withOldDeferredReasons: true, + oldDeferredReasons: "clusterLabelsChanged: false", + newStatus: Status{ + policyName: "pp0", + reschedulePolicy: &v1alpha1.ReschedulePolicy{Trigger: &v1alpha1.RescheduleTrigger{ + ClusterLabelsChanged: false, + }}, + clusterNames: []string{"cluster1"}, + clusterLabels: []map[string]string{{"bar": "foo"}}, + clusterTaints: nil, + clusterApiResources: nil, + }, + wantTriggersChanged: false, + wantAnnotationChanged: false, + wantErr: false, + }, + { + name: "dp, change cluster apiresources, with ClusterAPIResourcesChanged", + withReplicas: true, + withOldTrigger: true, + oldStatus: Status{ + policyName: "pp0", + reschedulePolicy: &v1alpha1.ReschedulePolicy{Trigger: &v1alpha1.RescheduleTrigger{ + ClusterAPIResourcesChanged: true, + }}, + clusterNames: []string{"cluster1"}, + clusterLabels: nil, + clusterTaints: nil, + clusterApiResources: [][]v1alpha1.APIResource{{{Kind: "A", Version: "v1", PluralName: "as"}}}, + }, + withOldDeferredReasons: false, + oldDeferredReasons: "", + newStatus: Status{ + policyName: "pp0", + reschedulePolicy: &v1alpha1.ReschedulePolicy{Trigger: &v1alpha1.RescheduleTrigger{ + ClusterAPIResourcesChanged: true, + }}, + clusterNames: []string{"cluster1"}, + clusterLabels: nil, + clusterTaints: nil, + clusterApiResources: [][]v1alpha1.APIResource{{{Kind: "B", Version: "v1", PluralName: "bs"}}}, + }, + wantTriggersChanged: true, + wantAnnotationChanged: true, + wantErr: false, + }, + { + name: "dp, change cluster apiresources, without ClusterAPIResourcesChanged", + withReplicas: true, + withOldTrigger: true, + oldStatus: Status{ + policyName: "pp0", + reschedulePolicy: &v1alpha1.ReschedulePolicy{Trigger: &v1alpha1.RescheduleTrigger{ + ClusterAPIResourcesChanged: false, + }}, + clusterNames: []string{"cluster1"}, + clusterLabels: nil, + clusterTaints: nil, + clusterApiResources: [][]v1alpha1.APIResource{{{Kind: "A", Version: "v1", PluralName: "as"}}}, + }, + withOldDeferredReasons: false, + oldDeferredReasons: "", + newStatus: Status{ + policyName: "pp0", + reschedulePolicy: &v1alpha1.ReschedulePolicy{Trigger: &v1alpha1.RescheduleTrigger{ + ClusterAPIResourcesChanged: false, + }}, + clusterNames: []string{"cluster1"}, + clusterLabels: nil, + clusterTaints: nil, + clusterApiResources: [][]v1alpha1.APIResource{{{Kind: "B", Version: "v1", PluralName: "bs"}}}, + }, + wantTriggersChanged: false, + wantAnnotationChanged: true, + wantErr: false, + }, + { + name: "dp, change cluster apiresources with unchanged deferred reasons, without ClusterAPIResourcesChanged", + withReplicas: true, + withOldTrigger: true, + oldStatus: Status{ + policyName: "pp0", + reschedulePolicy: &v1alpha1.ReschedulePolicy{Trigger: &v1alpha1.RescheduleTrigger{ + ClusterAPIResourcesChanged: false, + }}, + clusterNames: []string{"cluster1"}, + clusterLabels: nil, + clusterTaints: nil, + clusterApiResources: [][]v1alpha1.APIResource{{{Kind: "A", Version: "v1", PluralName: "as"}}}, + }, + withOldDeferredReasons: true, + oldDeferredReasons: "clusterAPIResourcesChanged: false", + newStatus: Status{ + policyName: "pp0", + reschedulePolicy: &v1alpha1.ReschedulePolicy{Trigger: &v1alpha1.RescheduleTrigger{ + ClusterAPIResourcesChanged: false, + }}, + clusterNames: []string{"cluster1"}, + clusterLabels: nil, + clusterTaints: nil, + clusterApiResources: [][]v1alpha1.APIResource{{{Kind: "B", Version: "v1", PluralName: "bs"}}}, + }, + wantTriggersChanged: false, + wantAnnotationChanged: false, + wantErr: false, + }, + { + name: "dp, change pp contents, join cluster, change labels and apiresources, disabled all triggers", + withReplicas: true, + withOldTrigger: true, + oldStatus: Status{ + policyName: "pp0", + reschedulePolicy: &v1alpha1.ReschedulePolicy{Trigger: &v1alpha1.RescheduleTrigger{}}, + clusterNames: []string{"cluster1"}, + clusterLabels: []map[string]string{{"foo": "bar"}}, + clusterTaints: nil, + clusterApiResources: [][]v1alpha1.APIResource{{{Kind: "A", Version: "v1", PluralName: "as"}}}, + }, + withOldDeferredReasons: false, + oldDeferredReasons: "", + newStatus: Status{ + policyName: "pp0", + reschedulePolicy: &v1alpha1.ReschedulePolicy{Trigger: &v1alpha1.RescheduleTrigger{}, ReplicaRescheduling: &v1alpha1.ReplicaRescheduling{}}, + clusterNames: []string{"cluster1", "cluster2"}, + clusterLabels: []map[string]string{{"bar": "foo"}}, + clusterTaints: nil, + clusterApiResources: [][]v1alpha1.APIResource{{{Kind: "B", Version: "v1", PluralName: "bs"}}}, + }, + wantTriggersChanged: false, + wantAnnotationChanged: true, + wantErr: false, + }, + { + name: "dp, change pp, join cluster, change labels and apiresources, with unchanged deferred reasons, disabled all triggers", + withReplicas: true, + withOldTrigger: true, + oldStatus: Status{ + policyName: "pp0", + reschedulePolicy: &v1alpha1.ReschedulePolicy{Trigger: &v1alpha1.RescheduleTrigger{}}, + clusterNames: []string{"cluster1"}, + clusterLabels: []map[string]string{{"foo": "bar"}}, + clusterTaints: nil, + clusterApiResources: [][]v1alpha1.APIResource{{{Kind: "A", Version: "v1", PluralName: "as"}}}, + }, + withOldDeferredReasons: true, + oldDeferredReasons: "policyContentChanged: false;clusterLabelsChanged: false;clusterAPIResourcesChanged: false;clusterJoined: false", + newStatus: Status{ + policyName: "pp0", + reschedulePolicy: &v1alpha1.ReschedulePolicy{Trigger: &v1alpha1.RescheduleTrigger{}, ReplicaRescheduling: &v1alpha1.ReplicaRescheduling{}}, + clusterNames: []string{"cluster1", "cluster2"}, + clusterLabels: []map[string]string{{"bar": "foo"}}, + clusterTaints: nil, + clusterApiResources: [][]v1alpha1.APIResource{{{Kind: "B", Version: "v1", PluralName: "bs"}}}, + }, + wantTriggersChanged: false, + wantAnnotationChanged: false, + wantErr: false, + }, + { + name: "dp, change cluster taints", + withReplicas: true, + withOldTrigger: true, + oldStatus: Status{ + policyName: "pp0", + reschedulePolicy: &v1alpha1.ReschedulePolicy{Trigger: &v1alpha1.RescheduleTrigger{ + ClusterAPIResourcesChanged: false, + }}, + clusterNames: []string{"cluster1"}, + clusterLabels: nil, + clusterTaints: [][]corev1.Taint{{{Key: "foo", Value: "bar", Effect: corev1.TaintEffectNoSchedule}}}, + clusterApiResources: nil, + }, + withOldDeferredReasons: false, + oldDeferredReasons: "", + newStatus: Status{ + policyName: "pp0", + reschedulePolicy: &v1alpha1.ReschedulePolicy{Trigger: &v1alpha1.RescheduleTrigger{ + ClusterAPIResourcesChanged: false, + }}, + clusterNames: []string{"cluster1"}, + clusterLabels: nil, + clusterTaints: [][]corev1.Taint{{{Key: "foo", Value: "bar", Effect: corev1.TaintEffectNoExecute}}}, + clusterApiResources: nil, + }, + wantTriggersChanged: true, + wantAnnotationChanged: true, + wantErr: false, + }, + { + name: "dp, remove clusters", + withReplicas: true, + withOldTrigger: true, + oldStatus: Status{ + policyName: "pp0", + reschedulePolicy: &v1alpha1.ReschedulePolicy{Trigger: &v1alpha1.RescheduleTrigger{ + PolicyContentChanged: true, + }}, + clusterNames: []string{"cluster1"}, + clusterLabels: nil, + clusterTaints: nil, + clusterApiResources: nil, + }, + withOldDeferredReasons: false, + oldDeferredReasons: "", + newStatus: Status{ + policyName: "pp0", + reschedulePolicy: &v1alpha1.ReschedulePolicy{Trigger: &v1alpha1.RescheduleTrigger{ + PolicyContentChanged: true, + }}, + clusterNames: []string{"cluster2"}, + clusterLabels: nil, + clusterTaints: nil, + clusterApiResources: nil, + }, + wantTriggersChanged: true, + wantAnnotationChanged: true, + wantErr: false, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + ftc, fedObj := generateFTCAndFedObj(tt.withReplicas) + if tt.withOldTrigger { + oldPolicy := generatePolicy(tt.oldStatus.policyName, tt.oldStatus.reschedulePolicy) + oldClusters := generateFClusters(tt.oldStatus.clusterNames, tt.oldStatus.clusterLabels, tt.oldStatus.clusterTaints, tt.oldStatus.clusterApiResources) + oldTrigger, err := computeSchedulingTrigger(ftc, fedObj, oldPolicy, oldClusters) + if err != nil { + t.Errorf("computeSchedulingTrigger() unexpected err: %v", err) + return + } + oldTriggerText, err := oldTrigger.JsonMarshal() + if err != nil { + t.Errorf("JsonMarshal() unexpected err: %v", err) + return + } + _, err = annotation.AddAnnotation(fedObj, SchedulingTriggersAnnotation, oldTriggerText) + if err != nil { + t.Errorf("AddAnnotation() unexpected err: %v", err) + return + } + } + if tt.withOldDeferredReasons { + _, err := annotation.AddAnnotation(fedObj, SchedulingDeferredReasonsAnnotation, tt.oldDeferredReasons) + if err != nil { + t.Errorf("AddAnnotation() unexpected err: %v", err) + return + } + } + + newPolicy := generatePolicy(tt.newStatus.policyName, tt.newStatus.reschedulePolicy) + newClusters := generateFClusters(tt.newStatus.clusterNames, tt.newStatus.clusterLabels, tt.newStatus.clusterTaints, tt.newStatus.clusterApiResources) + newTrigger, err := computeSchedulingTrigger(ftc, fedObj, newPolicy, newClusters) + if err != nil { + t.Errorf("computeSchedulingTrigger() unexpected err: %v", err) + return + } + + gotTriggersChanged, gotAnnotationChanged, err := newTrigger.updateAnnotationsIfTriggersChanged(fedObj, newPolicy) + if (err != nil) != tt.wantErr { + t.Errorf("updateAnnotationsIfTriggersChanged() error = %v, wantErr = %v", err, tt.wantErr) + return + } + if gotTriggersChanged != tt.wantTriggersChanged { + t.Errorf("updateAnnotationsIfTriggersChanged() gotTriggersChanged = %v, want = %v", gotTriggersChanged, tt.wantTriggersChanged) + } + if gotAnnotationChanged != tt.wantAnnotationChanged { + t.Errorf("updateAnnotationsIfTriggersChanged() gotAnnotationChanged = %v, want = %v", gotAnnotationChanged, tt.wantAnnotationChanged) + } + }) + } +} + +func generateFTCAndFedObj(withReplicas bool) (ftc *v1alpha1.FederatedTypeConfig, fedObj v1alpha1.GenericFederatedObject) { + if !withReplicas { + return saFTC.DeepCopy(), fsa.DeepCopy() + } + return dpFTC.DeepCopy(), fdp.DeepCopy() +} + +func generatePolicy(name string, reschedulePolicy *v1alpha1.ReschedulePolicy) v1alpha1.GenericPropagationPolicy { + return &v1alpha1.PropagationPolicy{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + }, + Spec: v1alpha1.PropagationPolicySpec{ReschedulePolicy: reschedulePolicy}, + } +} + +func generateFClusters( + names []string, + labels []map[string]string, + taints [][]corev1.Taint, + apiResources [][]v1alpha1.APIResource, +) []*v1alpha1.FederatedCluster { + clusters := make([]*v1alpha1.FederatedCluster, 0, len(names)) + for i, name := range names { + cluster := &v1alpha1.FederatedCluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + }, + } + if i < len(labels) { + cluster.Labels = labels[i] + } + if i < len(taints) { + cluster.Spec.Taints = taints[i] + } + if i < len(apiResources) { + cluster.Status.APIResourceTypes = apiResources[i] + } + clusters = append(clusters, cluster) + } + return clusters +} diff --git a/pkg/controllers/scheduler/schedulingunit.go b/pkg/controllers/scheduler/schedulingunit.go index a69fc87f..184cd872 100644 --- a/pkg/controllers/scheduler/schedulingunit.go +++ b/pkg/controllers/scheduler/schedulingunit.go @@ -91,9 +91,7 @@ func schedulingUnitForFedObject( } } - if replicaRescheduling := policy.GetSpec().ReplicaRescheduling; replicaRescheduling != nil { - schedulingUnit.AvoidDisruption = replicaRescheduling.AvoidDisruption - } + schedulingUnit.AvoidDisruption = getIsAvoidDisruptionFromPolicy(policy) schedulingUnit.SchedulingMode = schedulingMode @@ -240,9 +238,19 @@ func getAutoMigrationInfo(fedObject fedcorev1a1.GenericFederatedObject) (*framew } func getIsStickyClusterFromPolicy(policy fedcorev1a1.GenericPropagationPolicy) bool { + if rp := policy.GetSpec().ReschedulePolicy; rp != nil { + return rp.DisableRescheduling + } return policy.GetSpec().StickyCluster } +func getIsAvoidDisruptionFromPolicy(policy fedcorev1a1.GenericPropagationPolicy) bool { + if rp := policy.GetSpec().ReschedulePolicy; rp != nil { + return rp.ReplicaRescheduling != nil && rp.ReplicaRescheduling.AvoidDisruption + } + return policy.GetSpec().ReplicaRescheduling != nil && policy.GetSpec().ReplicaRescheduling.AvoidDisruption +} + func getIsStickyClusterFromObject(object fedcorev1a1.GenericFederatedObject) (bool, bool) { // TODO: consider passing in the annotations directly to prevent incurring a deep copy for each call annotations := object.GetAnnotations() diff --git a/pkg/util/pod/pod.go b/pkg/util/pod/pod.go new file mode 100644 index 00000000..ab542fce --- /dev/null +++ b/pkg/util/pod/pod.go @@ -0,0 +1,78 @@ +/* +Copyright 2023 The KubeAdmiral Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package pod + +import ( + "errors" + "fmt" + "strings" + + appsv1 "k8s.io/api/apps/v1" + batchv1 "k8s.io/api/batch/v1" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + pkgruntime "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + + fedcorev1a1 "github.com/kubewharf/kubeadmiral/pkg/apis/core/v1alpha1" + "github.com/kubewharf/kubeadmiral/pkg/controllers/common" +) + +var ErrUnknownTypeToGetPodTemplate = errors.New("unknown type to get pod template") + +// PodTemplatePaths maps supported type to pod template path. +// TODO: think about whether PodTemplatePath/PodSpecPath should be specified in the FTC instead. +// Specifying in the FTC allows changing the path according to the api version. +// Other controllers should consider using the specified paths instead of hardcoded paths. +var PodTemplatePaths = map[schema.GroupKind]string{ + {Group: appsv1.GroupName, Kind: common.DeploymentKind}: "spec.template", + {Group: appsv1.GroupName, Kind: common.StatefulSetKind}: "spec.template", + {Group: appsv1.GroupName, Kind: common.DaemonSetKind}: "spec.template", + {Group: batchv1.GroupName, Kind: common.JobKind}: "spec.template", + {Group: batchv1.GroupName, Kind: common.CronJobKind}: "spec.jobTemplate.spec.template", + {Group: corev1.GroupName, Kind: common.PodKind}: "spec", +} + +func GetPodSpec(fedObject fedcorev1a1.GenericFederatedObject, podTemplatePath string) (*corev1.PodSpec, error) { + if fedObject == nil { + return nil, fmt.Errorf("fedObject is nil") + } + unsFedObject, err := fedObject.GetSpec().GetTemplateAsUnstructured() + if err != nil { + return nil, err + } + podTemplateMap, found, err := unstructured.NestedMap(unsFedObject.Object, strings.Split(podTemplatePath, ".")...) + if err != nil { + return nil, err + } + if !found { + return nil, fmt.Errorf("pod template does not exist at path %q", podTemplatePath) + } + podTemplate := &corev1.PodTemplateSpec{} + if err := pkgruntime.DefaultUnstructuredConverter.FromUnstructured(podTemplateMap, podTemplate); err != nil { + return nil, err + } + return &podTemplate.Spec, nil +} + +func GetResourcePodSpec(fedObject fedcorev1a1.GenericFederatedObject, gvk schema.GroupVersionKind) (*corev1.PodSpec, error) { + path, ok := PodTemplatePaths[gvk.GroupKind()] + if !ok { + return nil, fmt.Errorf("%w: %s", ErrUnknownTypeToGetPodTemplate, gvk.String()) + } + return GetPodSpec(fedObject, path) +} diff --git a/pkg/util/resource/resource.go b/pkg/util/resource/resource.go new file mode 100644 index 00000000..9a994ece --- /dev/null +++ b/pkg/util/resource/resource.go @@ -0,0 +1,62 @@ +/* +Copyright 2023 The KubeAdmiral Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package resource + +import ( + corev1 "k8s.io/api/core/v1" +) + +func AddResources(src, dest corev1.ResourceList) { + for k, v := range src { + if prevVal, ok := dest[k]; ok { + prevVal.Add(v) + dest[k] = prevVal + } else { + dest[k] = v.DeepCopy() + } + } +} + +// MaxResources sets dst to the greater of dst/src for every resource in src +func MaxResources(src, dst corev1.ResourceList) { + for name, srcQuantity := range src { + if dstQuantity, ok := dst[name]; !ok || srcQuantity.Cmp(dstQuantity) > 0 { + dst[name] = srcQuantity.DeepCopy() + } + } +} + +// podResourceRequest = max(sum(podSpec.Containers), podSpec.InitContainers...) + overHead +func GetPodResourceRequests(podSpec *corev1.PodSpec) corev1.ResourceList { + reqs := make(corev1.ResourceList) + + for _, container := range podSpec.Containers { + AddResources(container.Resources.Requests, reqs) + } + + for _, container := range podSpec.InitContainers { + MaxResources(container.Resources.Requests, reqs) + } + + // if PodOverhead feature is supported, add overhead for running a pod + // to the sum of requests and to non-zero limits: + if podSpec.Overhead != nil { + AddResources(podSpec.Overhead, reqs) + } + + return reqs +} diff --git a/test/e2e/framework/policies/propagationpolicy.go b/test/e2e/framework/policies/propagationpolicy.go index 22c3986c..2b9de1bf 100644 --- a/test/e2e/framework/policies/propagationpolicy.go +++ b/test/e2e/framework/policies/propagationpolicy.go @@ -38,7 +38,6 @@ func PropagationPolicyForClustersWithPlacements( }, Spec: fedcorev1a1.PropagationPolicySpec{ SchedulingMode: fedcorev1a1.SchedulingModeDuplicate, - StickyCluster: false, Placements: []fedcorev1a1.ClusterReference{}, }, }