diff --git a/docs/api-references/docs.md b/docs/api-references/docs.md
index d4404d4d728..561b6e71468 100644
--- a/docs/api-references/docs.md
+++ b/docs/api-references/docs.md
@@ -4733,6 +4733,9 @@ All topologySpreadConstraints are ANDed.
+ComponentStatus
+
+
ConfigMapRef
(Appears on:
@@ -8343,6 +8346,20 @@ map[github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.StorageVolumeName
Volumes contains the status of all volumes.
+
+
+conditions
+
+
+[]Kubernetes meta/v1.Condition
+
+
+ |
+
+(Optional)
+ Represents the latest available observations of a component’s state.
+ |
+
MemberPhase
@@ -10677,6 +10694,20 @@ map[github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.StorageVolumeName
Volumes contains the status of all volumes.
+
+
+conditions
+
+
+[]Kubernetes meta/v1.Condition
+
+
+ |
+
+(Optional)
+ Represents the latest available observations of a component’s state.
+ |
+
PDStoreLabel
@@ -11896,6 +11927,20 @@ map[github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.StorageVolumeName
Volumes contains the status of all volumes.
+
+
+conditions
+
+
+[]Kubernetes meta/v1.Condition
+
+
+ |
+
+(Optional)
+ Represents the latest available observations of a component’s state.
+ |
+
QueueConfig
@@ -14319,6 +14364,20 @@ map[github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.StorageVolumeName
Volumes contains the status of all volumes.
+
+
+conditions
+
+
+[]Kubernetes meta/v1.Condition
+
+
+ |
+
+(Optional)
+ Represents the latest available observations of a component’s state.
+ |
+
TiDBAccessConfig
@@ -15707,6 +15766,20 @@ map[github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.StorageVolumeName
Volumes contains the status of all volumes.
+
+
+conditions
+
+
+[]Kubernetes meta/v1.Condition
+
+
+ |
+
+(Optional)
+ Represents the latest available observations of a component’s state.
+ |
+
TiDBTLSClient
@@ -20340,6 +20413,20 @@ map[github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.StorageVolumeName
Volumes contains the status of all volumes.
+
+
+conditions
+
+
+[]Kubernetes meta/v1.Condition
+
+
+ |
+
+(Optional)
+ Represents the latest available observations of a component’s state.
+ |
+
TiKVStorageConfig
@@ -23699,6 +23786,20 @@ map[github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.StorageVolumeName
Volumes contains the status of all volumes.
+
+
+conditions
+
+
+[]Kubernetes meta/v1.Condition
+
+
+ |
+
+(Optional)
+ Represents the latest available observations of a component’s state.
+ |
+
diff --git a/manifests/crd.yaml b/manifests/crd.yaml
index d04f69fafd1..fb860da5048 100644
--- a/manifests/crd.yaml
+++ b/manifests/crd.yaml
@@ -10302,6 +10302,43 @@ spec:
type: array
master:
properties:
+ conditions:
+ items:
+ properties:
+ lastTransitionTime:
+ format: date-time
+ type: string
+ message:
+ maxLength: 32768
+ type: string
+ observedGeneration:
+ format: int64
+ minimum: 0
+ type: integer
+ reason:
+ maxLength: 1024
+ minLength: 1
+ pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$
+ type: string
+ status:
+ enum:
+ - "True"
+ - "False"
+ - Unknown
+ type: string
+ type:
+ maxLength: 316
+ pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$
+ type: string
+ required:
+ - lastTransitionTime
+ - message
+ - reason
+ - status
+ - type
+ type: object
+ nullable: true
+ type: array
failureMembers:
additionalProperties:
properties:
@@ -10462,6 +10499,43 @@ spec:
type: object
worker:
properties:
+ conditions:
+ items:
+ properties:
+ lastTransitionTime:
+ format: date-time
+ type: string
+ message:
+ maxLength: 32768
+ type: string
+ observedGeneration:
+ format: int64
+ minimum: 0
+ type: integer
+ reason:
+ maxLength: 1024
+ minLength: 1
+ pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$
+ type: string
+ status:
+ enum:
+ - "True"
+ - "False"
+ - Unknown
+ type: string
+ type:
+ maxLength: 316
+ pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$
+ type: string
+ required:
+ - lastTransitionTime
+ - message
+ - reason
+ - status
+ - type
+ type: object
+ nullable: true
+ type: array
failoverUID:
type: string
failureMembers:
@@ -29436,6 +29510,43 @@ spec:
type: array
pd:
properties:
+ conditions:
+ items:
+ properties:
+ lastTransitionTime:
+ format: date-time
+ type: string
+ message:
+ maxLength: 32768
+ type: string
+ observedGeneration:
+ format: int64
+ minimum: 0
+ type: integer
+ reason:
+ maxLength: 1024
+ minLength: 1
+ pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$
+ type: string
+ status:
+ enum:
+ - "True"
+ - "False"
+ - Unknown
+ type: string
+ type:
+ maxLength: 316
+ pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$
+ type: string
+ required:
+ - lastTransitionTime
+ - message
+ - reason
+ - status
+ - type
+ type: object
+ nullable: true
+ type: array
failureMembers:
additionalProperties:
properties:
@@ -29622,6 +29733,43 @@ spec:
type: object
pump:
properties:
+ conditions:
+ items:
+ properties:
+ lastTransitionTime:
+ format: date-time
+ type: string
+ message:
+ maxLength: 32768
+ type: string
+ observedGeneration:
+ format: int64
+ minimum: 0
+ type: integer
+ reason:
+ maxLength: 1024
+ minLength: 1
+ pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$
+ type: string
+ status:
+ enum:
+ - "True"
+ - "False"
+ - Unknown
+ type: string
+ type:
+ maxLength: 316
+ pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$
+ type: string
+ required:
+ - lastTransitionTime
+ - message
+ - reason
+ - status
+ - type
+ type: object
+ nullable: true
+ type: array
members:
items:
properties:
@@ -29732,6 +29880,43 @@ spec:
type: string
type: object
type: object
+ conditions:
+ items:
+ properties:
+ lastTransitionTime:
+ format: date-time
+ type: string
+ message:
+ maxLength: 32768
+ type: string
+ observedGeneration:
+ format: int64
+ minimum: 0
+ type: integer
+ reason:
+ maxLength: 1024
+ minLength: 1
+ pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$
+ type: string
+ status:
+ enum:
+ - "True"
+ - "False"
+ - Unknown
+ type: string
+ type:
+ maxLength: 316
+ pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$
+ type: string
+ required:
+ - lastTransitionTime
+ - message
+ - reason
+ - status
+ - type
+ type: object
+ nullable: true
+ type: array
phase:
type: string
statefulSet:
@@ -29814,6 +29999,43 @@ spec:
type: object
tidb:
properties:
+ conditions:
+ items:
+ properties:
+ lastTransitionTime:
+ format: date-time
+ type: string
+ message:
+ maxLength: 32768
+ type: string
+ observedGeneration:
+ format: int64
+ minimum: 0
+ type: integer
+ reason:
+ maxLength: 1024
+ minLength: 1
+ pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$
+ type: string
+ status:
+ enum:
+ - "True"
+ - "False"
+ - Unknown
+ type: string
+ type:
+ maxLength: 316
+ pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$
+ type: string
+ required:
+ - lastTransitionTime
+ - message
+ - reason
+ - status
+ - type
+ type: object
+ nullable: true
+ type: array
failureMembers:
additionalProperties:
properties:
@@ -29930,6 +30152,43 @@ spec:
type: object
tiflash:
properties:
+ conditions:
+ items:
+ properties:
+ lastTransitionTime:
+ format: date-time
+ type: string
+ message:
+ maxLength: 32768
+ type: string
+ observedGeneration:
+ format: int64
+ minimum: 0
+ type: integer
+ reason:
+ maxLength: 1024
+ minLength: 1
+ pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$
+ type: string
+ status:
+ enum:
+ - "True"
+ - "False"
+ - Unknown
+ type: string
+ type:
+ maxLength: 316
+ pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$
+ type: string
+ required:
+ - lastTransitionTime
+ - message
+ - reason
+ - status
+ - type
+ type: object
+ nullable: true
+ type: array
failoverUID:
type: string
failureStores:
@@ -30109,6 +30368,43 @@ spec:
properties:
bootStrapped:
type: boolean
+ conditions:
+ items:
+ properties:
+ lastTransitionTime:
+ format: date-time
+ type: string
+ message:
+ maxLength: 32768
+ type: string
+ observedGeneration:
+ format: int64
+ minimum: 0
+ type: integer
+ reason:
+ maxLength: 1024
+ minLength: 1
+ pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$
+ type: string
+ status:
+ enum:
+ - "True"
+ - "False"
+ - Unknown
+ type: string
+ type:
+ maxLength: 316
+ pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$
+ type: string
+ required:
+ - lastTransitionTime
+ - message
+ - reason
+ - status
+ - type
+ type: object
+ nullable: true
+ type: array
evictLeader:
additionalProperties:
properties:
diff --git a/manifests/crd/v1/pingcap.com_dmclusters.yaml b/manifests/crd/v1/pingcap.com_dmclusters.yaml
index 88d9b3d7ea6..9b53f427176 100644
--- a/manifests/crd/v1/pingcap.com_dmclusters.yaml
+++ b/manifests/crd/v1/pingcap.com_dmclusters.yaml
@@ -7508,6 +7508,43 @@ spec:
type: array
master:
properties:
+ conditions:
+ items:
+ properties:
+ lastTransitionTime:
+ format: date-time
+ type: string
+ message:
+ maxLength: 32768
+ type: string
+ observedGeneration:
+ format: int64
+ minimum: 0
+ type: integer
+ reason:
+ maxLength: 1024
+ minLength: 1
+ pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$
+ type: string
+ status:
+ enum:
+ - "True"
+ - "False"
+ - Unknown
+ type: string
+ type:
+ maxLength: 316
+ pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$
+ type: string
+ required:
+ - lastTransitionTime
+ - message
+ - reason
+ - status
+ - type
+ type: object
+ nullable: true
+ type: array
failureMembers:
additionalProperties:
properties:
@@ -7668,6 +7705,43 @@ spec:
type: object
worker:
properties:
+ conditions:
+ items:
+ properties:
+ lastTransitionTime:
+ format: date-time
+ type: string
+ message:
+ maxLength: 32768
+ type: string
+ observedGeneration:
+ format: int64
+ minimum: 0
+ type: integer
+ reason:
+ maxLength: 1024
+ minLength: 1
+ pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$
+ type: string
+ status:
+ enum:
+ - "True"
+ - "False"
+ - Unknown
+ type: string
+ type:
+ maxLength: 316
+ pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$
+ type: string
+ required:
+ - lastTransitionTime
+ - message
+ - reason
+ - status
+ - type
+ type: object
+ nullable: true
+ type: array
failoverUID:
type: string
failureMembers:
diff --git a/manifests/crd/v1/pingcap.com_tidbclusters.yaml b/manifests/crd/v1/pingcap.com_tidbclusters.yaml
index 66bc88634f1..85babd9c959 100644
--- a/manifests/crd/v1/pingcap.com_tidbclusters.yaml
+++ b/manifests/crd/v1/pingcap.com_tidbclusters.yaml
@@ -17237,6 +17237,43 @@ spec:
type: array
pd:
properties:
+ conditions:
+ items:
+ properties:
+ lastTransitionTime:
+ format: date-time
+ type: string
+ message:
+ maxLength: 32768
+ type: string
+ observedGeneration:
+ format: int64
+ minimum: 0
+ type: integer
+ reason:
+ maxLength: 1024
+ minLength: 1
+ pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$
+ type: string
+ status:
+ enum:
+ - "True"
+ - "False"
+ - Unknown
+ type: string
+ type:
+ maxLength: 316
+ pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$
+ type: string
+ required:
+ - lastTransitionTime
+ - message
+ - reason
+ - status
+ - type
+ type: object
+ nullable: true
+ type: array
failureMembers:
additionalProperties:
properties:
@@ -17423,6 +17460,43 @@ spec:
type: object
pump:
properties:
+ conditions:
+ items:
+ properties:
+ lastTransitionTime:
+ format: date-time
+ type: string
+ message:
+ maxLength: 32768
+ type: string
+ observedGeneration:
+ format: int64
+ minimum: 0
+ type: integer
+ reason:
+ maxLength: 1024
+ minLength: 1
+ pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$
+ type: string
+ status:
+ enum:
+ - "True"
+ - "False"
+ - Unknown
+ type: string
+ type:
+ maxLength: 316
+ pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$
+ type: string
+ required:
+ - lastTransitionTime
+ - message
+ - reason
+ - status
+ - type
+ type: object
+ nullable: true
+ type: array
members:
items:
properties:
@@ -17533,6 +17607,43 @@ spec:
type: string
type: object
type: object
+ conditions:
+ items:
+ properties:
+ lastTransitionTime:
+ format: date-time
+ type: string
+ message:
+ maxLength: 32768
+ type: string
+ observedGeneration:
+ format: int64
+ minimum: 0
+ type: integer
+ reason:
+ maxLength: 1024
+ minLength: 1
+ pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$
+ type: string
+ status:
+ enum:
+ - "True"
+ - "False"
+ - Unknown
+ type: string
+ type:
+ maxLength: 316
+ pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$
+ type: string
+ required:
+ - lastTransitionTime
+ - message
+ - reason
+ - status
+ - type
+ type: object
+ nullable: true
+ type: array
phase:
type: string
statefulSet:
@@ -17615,6 +17726,43 @@ spec:
type: object
tidb:
properties:
+ conditions:
+ items:
+ properties:
+ lastTransitionTime:
+ format: date-time
+ type: string
+ message:
+ maxLength: 32768
+ type: string
+ observedGeneration:
+ format: int64
+ minimum: 0
+ type: integer
+ reason:
+ maxLength: 1024
+ minLength: 1
+ pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$
+ type: string
+ status:
+ enum:
+ - "True"
+ - "False"
+ - Unknown
+ type: string
+ type:
+ maxLength: 316
+ pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$
+ type: string
+ required:
+ - lastTransitionTime
+ - message
+ - reason
+ - status
+ - type
+ type: object
+ nullable: true
+ type: array
failureMembers:
additionalProperties:
properties:
@@ -17731,6 +17879,43 @@ spec:
type: object
tiflash:
properties:
+ conditions:
+ items:
+ properties:
+ lastTransitionTime:
+ format: date-time
+ type: string
+ message:
+ maxLength: 32768
+ type: string
+ observedGeneration:
+ format: int64
+ minimum: 0
+ type: integer
+ reason:
+ maxLength: 1024
+ minLength: 1
+ pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$
+ type: string
+ status:
+ enum:
+ - "True"
+ - "False"
+ - Unknown
+ type: string
+ type:
+ maxLength: 316
+ pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$
+ type: string
+ required:
+ - lastTransitionTime
+ - message
+ - reason
+ - status
+ - type
+ type: object
+ nullable: true
+ type: array
failoverUID:
type: string
failureStores:
@@ -17910,6 +18095,43 @@ spec:
properties:
bootStrapped:
type: boolean
+ conditions:
+ items:
+ properties:
+ lastTransitionTime:
+ format: date-time
+ type: string
+ message:
+ maxLength: 32768
+ type: string
+ observedGeneration:
+ format: int64
+ minimum: 0
+ type: integer
+ reason:
+ maxLength: 1024
+ minLength: 1
+ pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$
+ type: string
+ status:
+ enum:
+ - "True"
+ - "False"
+ - Unknown
+ type: string
+ type:
+ maxLength: 316
+ pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$
+ type: string
+ required:
+ - lastTransitionTime
+ - message
+ - reason
+ - status
+ - type
+ type: object
+ nullable: true
+ type: array
evictLeader:
additionalProperties:
properties:
diff --git a/manifests/crd/v1beta1/pingcap.com_dmclusters.yaml b/manifests/crd/v1beta1/pingcap.com_dmclusters.yaml
index cbd430724de..00b5882c154 100644
--- a/manifests/crd/v1beta1/pingcap.com_dmclusters.yaml
+++ b/manifests/crd/v1beta1/pingcap.com_dmclusters.yaml
@@ -7498,6 +7498,43 @@ spec:
type: array
master:
properties:
+ conditions:
+ items:
+ properties:
+ lastTransitionTime:
+ format: date-time
+ type: string
+ message:
+ maxLength: 32768
+ type: string
+ observedGeneration:
+ format: int64
+ minimum: 0
+ type: integer
+ reason:
+ maxLength: 1024
+ minLength: 1
+ pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$
+ type: string
+ status:
+ enum:
+ - "True"
+ - "False"
+ - Unknown
+ type: string
+ type:
+ maxLength: 316
+ pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$
+ type: string
+ required:
+ - lastTransitionTime
+ - message
+ - reason
+ - status
+ - type
+ type: object
+ nullable: true
+ type: array
failureMembers:
additionalProperties:
properties:
@@ -7658,6 +7695,43 @@ spec:
type: object
worker:
properties:
+ conditions:
+ items:
+ properties:
+ lastTransitionTime:
+ format: date-time
+ type: string
+ message:
+ maxLength: 32768
+ type: string
+ observedGeneration:
+ format: int64
+ minimum: 0
+ type: integer
+ reason:
+ maxLength: 1024
+ minLength: 1
+ pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$
+ type: string
+ status:
+ enum:
+ - "True"
+ - "False"
+ - Unknown
+ type: string
+ type:
+ maxLength: 316
+ pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$
+ type: string
+ required:
+ - lastTransitionTime
+ - message
+ - reason
+ - status
+ - type
+ type: object
+ nullable: true
+ type: array
failoverUID:
type: string
failureMembers:
diff --git a/manifests/crd/v1beta1/pingcap.com_tidbclusters.yaml b/manifests/crd/v1beta1/pingcap.com_tidbclusters.yaml
index 346a1269c75..e2794923d12 100644
--- a/manifests/crd/v1beta1/pingcap.com_tidbclusters.yaml
+++ b/manifests/crd/v1beta1/pingcap.com_tidbclusters.yaml
@@ -17214,6 +17214,43 @@ spec:
type: array
pd:
properties:
+ conditions:
+ items:
+ properties:
+ lastTransitionTime:
+ format: date-time
+ type: string
+ message:
+ maxLength: 32768
+ type: string
+ observedGeneration:
+ format: int64
+ minimum: 0
+ type: integer
+ reason:
+ maxLength: 1024
+ minLength: 1
+ pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$
+ type: string
+ status:
+ enum:
+ - "True"
+ - "False"
+ - Unknown
+ type: string
+ type:
+ maxLength: 316
+ pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$
+ type: string
+ required:
+ - lastTransitionTime
+ - message
+ - reason
+ - status
+ - type
+ type: object
+ nullable: true
+ type: array
failureMembers:
additionalProperties:
properties:
@@ -17400,6 +17437,43 @@ spec:
type: object
pump:
properties:
+ conditions:
+ items:
+ properties:
+ lastTransitionTime:
+ format: date-time
+ type: string
+ message:
+ maxLength: 32768
+ type: string
+ observedGeneration:
+ format: int64
+ minimum: 0
+ type: integer
+ reason:
+ maxLength: 1024
+ minLength: 1
+ pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$
+ type: string
+ status:
+ enum:
+ - "True"
+ - "False"
+ - Unknown
+ type: string
+ type:
+ maxLength: 316
+ pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$
+ type: string
+ required:
+ - lastTransitionTime
+ - message
+ - reason
+ - status
+ - type
+ type: object
+ nullable: true
+ type: array
members:
items:
properties:
@@ -17510,6 +17584,43 @@ spec:
type: string
type: object
type: object
+ conditions:
+ items:
+ properties:
+ lastTransitionTime:
+ format: date-time
+ type: string
+ message:
+ maxLength: 32768
+ type: string
+ observedGeneration:
+ format: int64
+ minimum: 0
+ type: integer
+ reason:
+ maxLength: 1024
+ minLength: 1
+ pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$
+ type: string
+ status:
+ enum:
+ - "True"
+ - "False"
+ - Unknown
+ type: string
+ type:
+ maxLength: 316
+ pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$
+ type: string
+ required:
+ - lastTransitionTime
+ - message
+ - reason
+ - status
+ - type
+ type: object
+ nullable: true
+ type: array
phase:
type: string
statefulSet:
@@ -17592,6 +17703,43 @@ spec:
type: object
tidb:
properties:
+ conditions:
+ items:
+ properties:
+ lastTransitionTime:
+ format: date-time
+ type: string
+ message:
+ maxLength: 32768
+ type: string
+ observedGeneration:
+ format: int64
+ minimum: 0
+ type: integer
+ reason:
+ maxLength: 1024
+ minLength: 1
+ pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$
+ type: string
+ status:
+ enum:
+ - "True"
+ - "False"
+ - Unknown
+ type: string
+ type:
+ maxLength: 316
+ pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$
+ type: string
+ required:
+ - lastTransitionTime
+ - message
+ - reason
+ - status
+ - type
+ type: object
+ nullable: true
+ type: array
failureMembers:
additionalProperties:
properties:
@@ -17708,6 +17856,43 @@ spec:
type: object
tiflash:
properties:
+ conditions:
+ items:
+ properties:
+ lastTransitionTime:
+ format: date-time
+ type: string
+ message:
+ maxLength: 32768
+ type: string
+ observedGeneration:
+ format: int64
+ minimum: 0
+ type: integer
+ reason:
+ maxLength: 1024
+ minLength: 1
+ pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$
+ type: string
+ status:
+ enum:
+ - "True"
+ - "False"
+ - Unknown
+ type: string
+ type:
+ maxLength: 316
+ pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$
+ type: string
+ required:
+ - lastTransitionTime
+ - message
+ - reason
+ - status
+ - type
+ type: object
+ nullable: true
+ type: array
failoverUID:
type: string
failureStores:
@@ -17887,6 +18072,43 @@ spec:
properties:
bootStrapped:
type: boolean
+ conditions:
+ items:
+ properties:
+ lastTransitionTime:
+ format: date-time
+ type: string
+ message:
+ maxLength: 32768
+ type: string
+ observedGeneration:
+ format: int64
+ minimum: 0
+ type: integer
+ reason:
+ maxLength: 1024
+ minLength: 1
+ pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$
+ type: string
+ status:
+ enum:
+ - "True"
+ - "False"
+ - Unknown
+ type: string
+ type:
+ maxLength: 316
+ pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$
+ type: string
+ required:
+ - lastTransitionTime
+ - message
+ - reason
+ - status
+ - type
+ type: object
+ nullable: true
+ type: array
evictLeader:
additionalProperties:
properties:
diff --git a/manifests/crd_v1beta1.yaml b/manifests/crd_v1beta1.yaml
index 585a1621fd4..66643f913a6 100644
--- a/manifests/crd_v1beta1.yaml
+++ b/manifests/crd_v1beta1.yaml
@@ -10296,6 +10296,43 @@ spec:
type: array
master:
properties:
+ conditions:
+ items:
+ properties:
+ lastTransitionTime:
+ format: date-time
+ type: string
+ message:
+ maxLength: 32768
+ type: string
+ observedGeneration:
+ format: int64
+ minimum: 0
+ type: integer
+ reason:
+ maxLength: 1024
+ minLength: 1
+ pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$
+ type: string
+ status:
+ enum:
+ - "True"
+ - "False"
+ - Unknown
+ type: string
+ type:
+ maxLength: 316
+ pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$
+ type: string
+ required:
+ - lastTransitionTime
+ - message
+ - reason
+ - status
+ - type
+ type: object
+ nullable: true
+ type: array
failureMembers:
additionalProperties:
properties:
@@ -10456,6 +10493,43 @@ spec:
type: object
worker:
properties:
+ conditions:
+ items:
+ properties:
+ lastTransitionTime:
+ format: date-time
+ type: string
+ message:
+ maxLength: 32768
+ type: string
+ observedGeneration:
+ format: int64
+ minimum: 0
+ type: integer
+ reason:
+ maxLength: 1024
+ minLength: 1
+ pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$
+ type: string
+ status:
+ enum:
+ - "True"
+ - "False"
+ - Unknown
+ type: string
+ type:
+ maxLength: 316
+ pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$
+ type: string
+ required:
+ - lastTransitionTime
+ - message
+ - reason
+ - status
+ - type
+ type: object
+ nullable: true
+ type: array
failoverUID:
type: string
failureMembers:
@@ -29413,6 +29487,43 @@ spec:
type: array
pd:
properties:
+ conditions:
+ items:
+ properties:
+ lastTransitionTime:
+ format: date-time
+ type: string
+ message:
+ maxLength: 32768
+ type: string
+ observedGeneration:
+ format: int64
+ minimum: 0
+ type: integer
+ reason:
+ maxLength: 1024
+ minLength: 1
+ pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$
+ type: string
+ status:
+ enum:
+ - "True"
+ - "False"
+ - Unknown
+ type: string
+ type:
+ maxLength: 316
+ pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$
+ type: string
+ required:
+ - lastTransitionTime
+ - message
+ - reason
+ - status
+ - type
+ type: object
+ nullable: true
+ type: array
failureMembers:
additionalProperties:
properties:
@@ -29599,6 +29710,43 @@ spec:
type: object
pump:
properties:
+ conditions:
+ items:
+ properties:
+ lastTransitionTime:
+ format: date-time
+ type: string
+ message:
+ maxLength: 32768
+ type: string
+ observedGeneration:
+ format: int64
+ minimum: 0
+ type: integer
+ reason:
+ maxLength: 1024
+ minLength: 1
+ pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$
+ type: string
+ status:
+ enum:
+ - "True"
+ - "False"
+ - Unknown
+ type: string
+ type:
+ maxLength: 316
+ pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$
+ type: string
+ required:
+ - lastTransitionTime
+ - message
+ - reason
+ - status
+ - type
+ type: object
+ nullable: true
+ type: array
members:
items:
properties:
@@ -29709,6 +29857,43 @@ spec:
type: string
type: object
type: object
+ conditions:
+ items:
+ properties:
+ lastTransitionTime:
+ format: date-time
+ type: string
+ message:
+ maxLength: 32768
+ type: string
+ observedGeneration:
+ format: int64
+ minimum: 0
+ type: integer
+ reason:
+ maxLength: 1024
+ minLength: 1
+ pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$
+ type: string
+ status:
+ enum:
+ - "True"
+ - "False"
+ - Unknown
+ type: string
+ type:
+ maxLength: 316
+ pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$
+ type: string
+ required:
+ - lastTransitionTime
+ - message
+ - reason
+ - status
+ - type
+ type: object
+ nullable: true
+ type: array
phase:
type: string
statefulSet:
@@ -29791,6 +29976,43 @@ spec:
type: object
tidb:
properties:
+ conditions:
+ items:
+ properties:
+ lastTransitionTime:
+ format: date-time
+ type: string
+ message:
+ maxLength: 32768
+ type: string
+ observedGeneration:
+ format: int64
+ minimum: 0
+ type: integer
+ reason:
+ maxLength: 1024
+ minLength: 1
+ pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$
+ type: string
+ status:
+ enum:
+ - "True"
+ - "False"
+ - Unknown
+ type: string
+ type:
+ maxLength: 316
+ pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$
+ type: string
+ required:
+ - lastTransitionTime
+ - message
+ - reason
+ - status
+ - type
+ type: object
+ nullable: true
+ type: array
failureMembers:
additionalProperties:
properties:
@@ -29907,6 +30129,43 @@ spec:
type: object
tiflash:
properties:
+ conditions:
+ items:
+ properties:
+ lastTransitionTime:
+ format: date-time
+ type: string
+ message:
+ maxLength: 32768
+ type: string
+ observedGeneration:
+ format: int64
+ minimum: 0
+ type: integer
+ reason:
+ maxLength: 1024
+ minLength: 1
+ pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$
+ type: string
+ status:
+ enum:
+ - "True"
+ - "False"
+ - Unknown
+ type: string
+ type:
+ maxLength: 316
+ pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$
+ type: string
+ required:
+ - lastTransitionTime
+ - message
+ - reason
+ - status
+ - type
+ type: object
+ nullable: true
+ type: array
failoverUID:
type: string
failureStores:
@@ -30086,6 +30345,43 @@ spec:
properties:
bootStrapped:
type: boolean
+ conditions:
+ items:
+ properties:
+ lastTransitionTime:
+ format: date-time
+ type: string
+ message:
+ maxLength: 32768
+ type: string
+ observedGeneration:
+ format: int64
+ minimum: 0
+ type: integer
+ reason:
+ maxLength: 1024
+ minLength: 1
+ pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$
+ type: string
+ status:
+ enum:
+ - "True"
+ - "False"
+ - Unknown
+ type: string
+ type:
+ maxLength: 316
+ pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$
+ type: string
+ required:
+ - lastTransitionTime
+ - message
+ - reason
+ - status
+ - type
+ type: object
+ nullable: true
+ type: array
evictLeader:
additionalProperties:
properties:
diff --git a/pkg/apis/pingcap/v1alpha1/helpers.go b/pkg/apis/pingcap/v1alpha1/helpers.go
index aa0b6c86bb3..3c13fd7d48e 100644
--- a/pkg/apis/pingcap/v1alpha1/helpers.go
+++ b/pkg/apis/pingcap/v1alpha1/helpers.go
@@ -17,6 +17,8 @@ import (
"fmt"
"hash/fnv"
+ "k8s.io/apimachinery/pkg/api/meta"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/rand"
"k8s.io/apimachinery/pkg/util/sets"
)
@@ -105,3 +107,328 @@ func GetStorageVolumeName(storageVolumeName string, memberType MemberType) Stora
func GetStorageVolumeNameForTiFlash(index int) StorageVolumeName {
return StorageVolumeName(fmt.Sprintf("data%d", index))
}
+
+var (
+ _ ComponentStatus = &PDStatus{}
+ _ ComponentStatus = &TiKVStatus{}
+ _ ComponentStatus = &TiDBStatus{}
+ _ ComponentStatus = &PumpStatus{}
+ _ ComponentStatus = &TiFlashStatus{}
+ _ ComponentStatus = &TiCDCStatus{}
+ _ ComponentStatus = &MasterStatus{}
+ _ ComponentStatus = &WorkerStatus{}
+)
+
+type ComponentStatus interface {
+ GetMemberType() MemberType
+ // GetSynced returns `status.synced`
+ //
+ // For tidb and pump, it is always true.
+ GetSynced() bool
+ // GetSynced returns `status.phase`
+ GetPhase() MemberPhase
+ // GetVolumes return `status.volumes`
+ //
+ // NOTE: change the map will modify the status.
+ GetVolumes() map[StorageVolumeName]*StorageVolumeStatus
+ // GetConditions return `status.conditions`
+ //
+ // If need to change the condition, please use `SetCondition`
+ GetConditions() []metav1.Condition
+
+ // SetCondition sets the corresponding condition in conditions to newCondition.
+ // 1. if the condition of the specified type already exists (all fields of the existing condition are updated to
+ // newCondition, LastTransitionTime is set to now if the new status differs from the old status)
+ // 2. if a condition of the specified type does not exist (LastTransitionTime is set to now() if unset, and newCondition is appended)
+ SetCondition(condition metav1.Condition)
+ // RemoveStatusCondition removes the corresponding conditionType from conditions.
+ RemoveCondition(conditionType string)
+}
+
+func ComponentStatusFromTC(tc *TidbCluster) []ComponentStatus {
+ components := []ComponentStatus{}
+ if tc.Spec.PD != nil {
+ components = append(components, &tc.Status.PD)
+ }
+ if tc.Spec.TiDB != nil {
+ components = append(components, &tc.Status.TiDB)
+ }
+ if tc.Spec.TiKV != nil {
+ components = append(components, &tc.Status.TiKV)
+ }
+ if tc.Spec.TiFlash != nil {
+ components = append(components, &tc.Status.TiFlash)
+ }
+ if tc.Spec.TiCDC != nil {
+ components = append(components, &tc.Status.TiCDC)
+ }
+ if tc.Spec.Pump != nil {
+ components = append(components, &tc.Status.Pump)
+ }
+ return components
+}
+
+func ComponentStatusFromDC(dc *DMCluster) []ComponentStatus {
+ components := []ComponentStatus{}
+ components = append(components, &dc.Status.Master)
+ if dc.Spec.Worker != nil {
+ components = append(components, &dc.Status.Worker)
+ }
+ return components
+}
+
+func (s *PDStatus) GetMemberType() MemberType {
+ return PDMemberType
+}
+func (s *PDStatus) GetSynced() bool {
+ return s.Synced
+}
+func (s *PDStatus) GetPhase() MemberPhase {
+ return s.Phase
+}
+func (s *PDStatus) GetVolumes() map[StorageVolumeName]*StorageVolumeStatus {
+ return s.Volumes
+}
+func (s *PDStatus) GetConditions() []metav1.Condition {
+ return s.Conditions
+}
+func (s *PDStatus) SetCondition(newCondition metav1.Condition) {
+ if s.Conditions == nil {
+ s.Conditions = []metav1.Condition{}
+ }
+ conditions := s.Conditions
+ meta.SetStatusCondition(&conditions, newCondition)
+ s.Conditions = conditions
+}
+func (s *PDStatus) RemoveCondition(conditionType string) {
+ if s.Conditions == nil {
+ return
+ }
+ conditions := s.Conditions
+ meta.RemoveStatusCondition(&conditions, conditionType)
+ s.Conditions = conditions
+}
+
+func (s *TiKVStatus) GetMemberType() MemberType {
+ return TiKVMemberType
+}
+func (s *TiKVStatus) GetSynced() bool {
+ return s.Synced
+}
+func (s *TiKVStatus) GetPhase() MemberPhase {
+ return s.Phase
+}
+func (s *TiKVStatus) GetVolumes() map[StorageVolumeName]*StorageVolumeStatus {
+ return s.Volumes
+}
+func (s *TiKVStatus) GetConditions() []metav1.Condition {
+ return s.Conditions
+}
+func (s *TiKVStatus) SetCondition(newCondition metav1.Condition) {
+ if s.Conditions == nil {
+ s.Conditions = []metav1.Condition{}
+ }
+ conditions := s.Conditions
+ meta.SetStatusCondition(&conditions, newCondition)
+ s.Conditions = conditions
+}
+func (s *TiKVStatus) RemoveCondition(conditionType string) {
+ if s.Conditions == nil {
+ return
+ }
+ conditions := s.Conditions
+ meta.RemoveStatusCondition(&conditions, conditionType)
+ s.Conditions = conditions
+}
+
+func (s *TiDBStatus) GetMemberType() MemberType {
+ return TiDBMemberType
+}
+func (s *TiDBStatus) GetSynced() bool {
+ return true
+}
+func (s *TiDBStatus) GetPhase() MemberPhase {
+ return s.Phase
+}
+func (s *TiDBStatus) GetVolumes() map[StorageVolumeName]*StorageVolumeStatus {
+ return s.Volumes
+}
+func (s *TiDBStatus) GetConditions() []metav1.Condition {
+ return s.Conditions
+}
+func (s *TiDBStatus) SetCondition(newCondition metav1.Condition) {
+ if s.Conditions == nil {
+ s.Conditions = []metav1.Condition{}
+ }
+ conditions := s.Conditions
+ meta.SetStatusCondition(&conditions, newCondition)
+ s.Conditions = conditions
+}
+func (s *TiDBStatus) RemoveCondition(conditionType string) {
+ if s.Conditions == nil {
+ return
+ }
+ conditions := s.Conditions
+ meta.RemoveStatusCondition(&conditions, conditionType)
+ s.Conditions = conditions
+}
+
+func (s *PumpStatus) GetMemberType() MemberType {
+ return PumpMemberType
+}
+func (s *PumpStatus) GetSynced() bool {
+ return true
+}
+func (s *PumpStatus) GetPhase() MemberPhase {
+ return s.Phase
+}
+func (s *PumpStatus) GetVolumes() map[StorageVolumeName]*StorageVolumeStatus {
+ return s.Volumes
+}
+func (s *PumpStatus) GetConditions() []metav1.Condition {
+ return s.Conditions
+}
+func (s *PumpStatus) SetCondition(newCondition metav1.Condition) {
+ if s.Conditions == nil {
+ s.Conditions = []metav1.Condition{}
+ }
+ conditions := s.Conditions
+ meta.SetStatusCondition(&conditions, newCondition)
+ s.Conditions = conditions
+}
+func (s *PumpStatus) RemoveCondition(conditionType string) {
+ if s.Conditions == nil {
+ return
+ }
+ conditions := s.Conditions
+ meta.RemoveStatusCondition(&conditions, conditionType)
+ s.Conditions = conditions
+}
+
+func (s *TiFlashStatus) GetMemberType() MemberType {
+ return TiFlashMemberType
+}
+func (s *TiFlashStatus) GetSynced() bool {
+ return s.Synced
+}
+func (s *TiFlashStatus) GetPhase() MemberPhase {
+ return s.Phase
+}
+func (s *TiFlashStatus) GetVolumes() map[StorageVolumeName]*StorageVolumeStatus {
+ return s.Volumes
+}
+func (s *TiFlashStatus) GetConditions() []metav1.Condition {
+ return s.Conditions
+}
+func (s *TiFlashStatus) SetCondition(newCondition metav1.Condition) {
+ if s.Conditions == nil {
+ s.Conditions = []metav1.Condition{}
+ }
+ conditions := s.Conditions
+ meta.SetStatusCondition(&conditions, newCondition)
+ s.Conditions = conditions
+}
+func (s *TiFlashStatus) RemoveCondition(conditionType string) {
+ if s.Conditions == nil {
+ return
+ }
+ conditions := s.Conditions
+ meta.RemoveStatusCondition(&conditions, conditionType)
+ s.Conditions = conditions
+}
+
+func (s *TiCDCStatus) GetMemberType() MemberType {
+ return TiCDCMemberType
+}
+func (s *TiCDCStatus) GetSynced() bool {
+ return s.Synced
+}
+func (s *TiCDCStatus) GetPhase() MemberPhase {
+ return s.Phase
+}
+func (s *TiCDCStatus) GetVolumes() map[StorageVolumeName]*StorageVolumeStatus {
+ return s.Volumes
+}
+func (s *TiCDCStatus) GetConditions() []metav1.Condition {
+ return s.Conditions
+}
+func (s *TiCDCStatus) SetCondition(newCondition metav1.Condition) {
+ if s.Conditions == nil {
+ s.Conditions = []metav1.Condition{}
+ }
+ conditions := s.Conditions
+ meta.SetStatusCondition(&conditions, newCondition)
+ s.Conditions = conditions
+}
+func (s *TiCDCStatus) RemoveCondition(conditionType string) {
+ if s.Conditions == nil {
+ return
+ }
+ conditions := s.Conditions
+ meta.RemoveStatusCondition(&conditions, conditionType)
+ s.Conditions = conditions
+}
+
+func (s *MasterStatus) GetMemberType() MemberType {
+ return DMMasterMemberType
+}
+func (s *MasterStatus) GetSynced() bool {
+ return s.Synced
+}
+func (s *MasterStatus) GetPhase() MemberPhase {
+ return s.Phase
+}
+func (s *MasterStatus) GetVolumes() map[StorageVolumeName]*StorageVolumeStatus {
+ return s.Volumes
+}
+func (s *MasterStatus) GetConditions() []metav1.Condition {
+ return s.Conditions
+}
+func (s *MasterStatus) SetCondition(newCondition metav1.Condition) {
+ if s.Conditions == nil {
+ s.Conditions = []metav1.Condition{}
+ }
+ conditions := s.Conditions
+ meta.SetStatusCondition(&conditions, newCondition)
+ s.Conditions = conditions
+}
+func (s *MasterStatus) RemoveCondition(conditionType string) {
+ if s.Conditions == nil {
+ return
+ }
+ conditions := s.Conditions
+ meta.RemoveStatusCondition(&conditions, conditionType)
+ s.Conditions = conditions
+}
+
+func (s *WorkerStatus) GetMemberType() MemberType {
+ return DMWorkerMemberType
+}
+func (s *WorkerStatus) GetSynced() bool {
+ return s.Synced
+}
+func (s *WorkerStatus) GetPhase() MemberPhase {
+ return s.Phase
+}
+func (s *WorkerStatus) GetVolumes() map[StorageVolumeName]*StorageVolumeStatus {
+ return s.Volumes
+}
+func (s *WorkerStatus) GetConditions() []metav1.Condition {
+ return s.Conditions
+}
+func (s *WorkerStatus) SetCondition(newCondition metav1.Condition) {
+ if s.Conditions == nil {
+ s.Conditions = []metav1.Condition{}
+ }
+ conditions := s.Conditions
+ meta.SetStatusCondition(&conditions, newCondition)
+ s.Conditions = conditions
+}
+func (s *WorkerStatus) RemoveCondition(conditionType string) {
+ if s.Conditions == nil {
+ return
+ }
+ conditions := s.Conditions
+ meta.RemoveStatusCondition(&conditions, conditionType)
+ s.Conditions = conditions
+}
diff --git a/pkg/apis/pingcap/v1alpha1/helpers_test.go b/pkg/apis/pingcap/v1alpha1/helpers_test.go
new file mode 100644
index 00000000000..6b6ff980451
--- /dev/null
+++ b/pkg/apis/pingcap/v1alpha1/helpers_test.go
@@ -0,0 +1,118 @@
+// Copyright 2021 PingCAP, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package v1alpha1
+
+import (
+ "testing"
+
+ "github.com/google/go-cmp/cmp"
+ . "github.com/onsi/gomega"
+ "k8s.io/apimachinery/pkg/api/meta"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+)
+
+func TestComponentStatus(t *testing.T) {
+ tc := &TidbCluster{
+ Spec: TidbClusterSpec{
+ PD: &PDSpec{},
+ TiDB: &TiDBSpec{},
+ TiKV: &TiKVSpec{},
+ TiFlash: &TiFlashSpec{},
+ Pump: &PumpSpec{},
+ TiCDC: &TiCDCSpec{},
+ },
+ }
+ dc := &DMCluster{
+ Spec: DMClusterSpec{
+ Master: MasterSpec{},
+ Worker: &WorkerSpec{},
+ },
+ }
+
+ t.Run("MemberType", func(t *testing.T) {
+ g := NewGomegaWithT(t)
+ components := ComponentStatusFromTC(tc.DeepCopy())
+ components = append(components, ComponentStatusFromDC(dc.DeepCopy())...)
+ for _, status := range components {
+ switch status.GetMemberType() {
+ case PDMemberType:
+ _, ok := status.(*PDStatus)
+ g.Expect(ok).To(BeTrue())
+ case TiDBMemberType:
+ _, ok := status.(*TiDBStatus)
+ g.Expect(ok).To(BeTrue())
+ case TiKVMemberType:
+ _, ok := status.(*TiKVStatus)
+ g.Expect(ok).To(BeTrue())
+ case TiFlashMemberType:
+ _, ok := status.(*TiFlashStatus)
+ g.Expect(ok).To(BeTrue())
+ case TiCDCMemberType:
+ _, ok := status.(*TiCDCStatus)
+ g.Expect(ok).To(BeTrue())
+ case PumpMemberType:
+ _, ok := status.(*PumpStatus)
+ g.Expect(ok).To(BeTrue())
+ case DMMasterMemberType:
+ _, ok := status.(*MasterStatus)
+ g.Expect(ok).To(BeTrue())
+ case DMWorkerMemberType:
+ _, ok := status.(*WorkerStatus)
+ g.Expect(ok).To(BeTrue())
+ }
+ }
+ })
+
+ t.Run("Conditions", func(t *testing.T) {
+ g := NewGomegaWithT(t)
+
+ components := ComponentStatusFromTC(tc.DeepCopy())
+ components = append(components, ComponentStatusFromDC(dc.DeepCopy())...)
+ for _, status := range components {
+ conds := status.GetConditions()
+ g.Expect(conds).To(BeNil())
+
+ // test to add a condition
+ condInput := metav1.Condition{
+ Type: "Test",
+ Status: metav1.ConditionTrue,
+ Reason: "Test True Reason",
+ Message: "Test True Message",
+ }
+ status.SetCondition(condInput)
+ conds = status.GetConditions()
+ condOutput := meta.FindStatusCondition(conds, condInput.Type)
+ condOutput.LastTransitionTime = condInput.LastTransitionTime // ignore the last transition time
+ g.Expect(cmp.Diff(*condOutput, condInput)).To(BeEmpty())
+
+ // test to update a condition
+ condInput.Status = metav1.ConditionFalse
+ condInput.Reason = "Test False Reason"
+ condInput.Message = "Test False Message"
+ status.SetCondition(condInput)
+ conds = status.GetConditions()
+ condOutput = meta.FindStatusCondition(conds, condInput.Type)
+ condOutput.LastTransitionTime = condInput.LastTransitionTime // ignore the last transition time
+ g.Expect(cmp.Diff(*condOutput, condInput)).To(BeEmpty())
+
+ // test to remove a condition
+ status.RemoveCondition(condInput.Type)
+ conds = status.GetConditions()
+ g.Expect(conds).To(BeEmpty())
+ condOutput = meta.FindStatusCondition(conds, condInput.Type)
+ g.Expect(condOutput).To(BeNil())
+ }
+ })
+
+}
diff --git a/pkg/apis/pingcap/v1alpha1/tidbcluster.go b/pkg/apis/pingcap/v1alpha1/tidbcluster.go
index fa585b51477..ebae535670c 100644
--- a/pkg/apis/pingcap/v1alpha1/tidbcluster.go
+++ b/pkg/apis/pingcap/v1alpha1/tidbcluster.go
@@ -21,6 +21,7 @@ import (
"github.com/pingcap/tidb-operator/pkg/apis/label"
corev1 "k8s.io/api/core/v1"
+ "k8s.io/apimachinery/pkg/api/meta"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/klog/v2"
@@ -964,3 +965,15 @@ func (tc *TidbCluster) WithoutLocalTiDB() bool {
func (tc *TidbCluster) AcrossK8s() bool {
return tc.Spec.AcrossK8s
}
+
+// IsComponentVolumeResizing returns true if any volume of component is resizing.
+func (tc *TidbCluster) IsComponentVolumeResizing(compType MemberType) bool {
+ comps := ComponentStatusFromTC(tc)
+ for _, comp := range comps {
+ if comp.GetMemberType() == compType {
+ conds := comp.GetConditions()
+ return meta.IsStatusConditionTrue(conds, ComponentVolumeResizing)
+ }
+ }
+ return false
+}
diff --git a/pkg/apis/pingcap/v1alpha1/types.go b/pkg/apis/pingcap/v1alpha1/types.go
index e3afd49c780..aee73ad1648 100644
--- a/pkg/apis/pingcap/v1alpha1/types.go
+++ b/pkg/apis/pingcap/v1alpha1/types.go
@@ -376,6 +376,12 @@ const (
TidbClusterReady TidbClusterConditionType = "Ready"
)
+// The `Type` of the component condition
+const (
+ // ComponentVolumeResizing indicates that any volume of this component is resizing.
+ ComponentVolumeResizing string = "ComponentVolumeResizing"
+)
+
// +k8s:openapi-gen=true
// DiscoverySpec contains details of Discovery members
type DiscoverySpec struct {
@@ -1130,6 +1136,10 @@ type PDStatus struct {
Image string `json:"image,omitempty"`
// Volumes contains the status of all volumes.
Volumes map[StorageVolumeName]*StorageVolumeStatus `json:"volumes,omitempty"`
+ // Represents the latest available observations of a component's state.
+ // +optional
+ // +nullable
+ Conditions []metav1.Condition `json:"conditions,omitempty"`
}
// PDMember is PD member
@@ -1181,6 +1191,10 @@ type TiDBStatus struct {
PasswordInitialized *bool `json:"passwordInitialized,omitempty"`
// Volumes contains the status of all volumes.
Volumes map[StorageVolumeName]*StorageVolumeStatus `json:"volumes,omitempty"`
+ // Represents the latest available observations of a component's state.
+ // +optional
+ // +nullable
+ Conditions []metav1.Condition `json:"conditions,omitempty"`
}
// TiDBMember is TiDB member
@@ -1201,7 +1215,16 @@ type TiDBFailureMember struct {
CreatedAt metav1.Time `json:"createdAt,omitempty"`
}
-const EvictLeaderAnnKey = "tidb.pingcap.com/evict-leader"
+var (
+ EvictLeaderAnnKeys = []string{EvictLeaderAnnKey, EvictLeaderAnnKeyForResize}
+)
+
+const (
+ // EvictLeaderAnnKey is the annotation key to evict leader used by user.
+ EvictLeaderAnnKey = "tidb.pingcap.com/evict-leader"
+ // EvictLeaderAnnKeyForResize is the annotation key to evict leader user by pvc resizer.
+ EvictLeaderAnnKeyForResize = "tidb.pingcap.com/evict-leader-for-resize"
+)
// The `Value` of annotation controls the behavior when the leader count drops to zero, the valid value is one of:
//
@@ -1232,6 +1255,10 @@ type TiKVStatus struct {
EvictLeader map[string]*EvictLeaderStatus `json:"evictLeader,omitempty"`
// Volumes contains the status of all volumes.
Volumes map[StorageVolumeName]*StorageVolumeStatus `json:"volumes,omitempty"`
+ // Represents the latest available observations of a component's state.
+ // +optional
+ // +nullable
+ Conditions []metav1.Condition `json:"conditions,omitempty"`
}
// TiFlashStatus is TiFlash status
@@ -1247,6 +1274,10 @@ type TiFlashStatus struct {
Image string `json:"image,omitempty"`
// Volumes contains the status of all volumes.
Volumes map[StorageVolumeName]*StorageVolumeStatus `json:"volumes,omitempty"`
+ // Represents the latest available observations of a component's state.
+ // +optional
+ // +nullable
+ Conditions []metav1.Condition `json:"conditions,omitempty"`
}
// TiCDCStatus is TiCDC status
@@ -1257,6 +1288,10 @@ type TiCDCStatus struct {
Captures map[string]TiCDCCapture `json:"captures,omitempty"`
// Volumes contains the status of all volumes.
Volumes map[StorageVolumeName]*StorageVolumeStatus `json:"volumes,omitempty"`
+ // Represents the latest available observations of a component's state.
+ // +optional
+ // +nullable
+ Conditions []metav1.Condition `json:"conditions,omitempty"`
}
// TiCDCCapture is TiCDC Capture status
@@ -1310,6 +1345,10 @@ type PumpStatus struct {
Members []*PumpNodeStatus `json:"members,omitempty"`
// Volumes contains the status of all volumes.
Volumes map[StorageVolumeName]*StorageVolumeStatus `json:"volumes,omitempty"`
+ // Represents the latest available observations of a component's state.
+ // +optional
+ // +nullable
+ Conditions []metav1.Condition `json:"conditions,omitempty"`
}
// TiDBTLSClient can enable TLS connection between TiDB server and MySQL client
@@ -2364,6 +2403,10 @@ type MasterStatus struct {
Image string `json:"image,omitempty"`
// Volumes contains the status of all volumes.
Volumes map[StorageVolumeName]*StorageVolumeStatus `json:"volumes,omitempty"`
+ // Represents the latest available observations of a component's state.
+ // +optional
+ // +nullable
+ Conditions []metav1.Condition `json:"conditions,omitempty"`
}
// MasterMember is dm-master member status
@@ -2400,6 +2443,10 @@ type WorkerStatus struct {
Image string `json:"image,omitempty"`
// Volumes contains the status of all volumes.
Volumes map[StorageVolumeName]*StorageVolumeStatus `json:"volumes,omitempty"`
+ // Represents the latest available observations of a component's state.
+ // +optional
+ // +nullable
+ Conditions []metav1.Condition `json:"conditions,omitempty"`
}
// WorkerMember is dm-worker member status
diff --git a/pkg/apis/pingcap/v1alpha1/zz_generated.deepcopy.go b/pkg/apis/pingcap/v1alpha1/zz_generated.deepcopy.go
index 0604961b6fe..22a8055cd0b 100644
--- a/pkg/apis/pingcap/v1alpha1/zz_generated.deepcopy.go
+++ b/pkg/apis/pingcap/v1alpha1/zz_generated.deepcopy.go
@@ -25,6 +25,7 @@ import (
v1 "k8s.io/api/core/v1"
networkingv1 "k8s.io/api/networking/v1"
v1beta1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
runtime "k8s.io/apimachinery/pkg/runtime"
types "k8s.io/apimachinery/pkg/types"
)
@@ -2674,6 +2675,13 @@ func (in *MasterStatus) DeepCopyInto(out *MasterStatus) {
(*out)[key] = outVal
}
}
+ if in.Conditions != nil {
+ in, out := &in.Conditions, &out.Conditions
+ *out = make([]metav1.Condition, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
return
}
@@ -3710,6 +3718,13 @@ func (in *PDStatus) DeepCopyInto(out *PDStatus) {
(*out)[key] = outVal
}
}
+ if in.Conditions != nil {
+ in, out := &in.Conditions, &out.Conditions
+ *out = make([]metav1.Condition, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
return
}
@@ -4320,6 +4335,13 @@ func (in *PumpStatus) DeepCopyInto(out *PumpStatus) {
(*out)[key] = outVal
}
}
+ if in.Conditions != nil {
+ in, out := &in.Conditions, &out.Conditions
+ *out = make([]metav1.Condition, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
return
}
@@ -5222,6 +5244,13 @@ func (in *TiCDCStatus) DeepCopyInto(out *TiCDCStatus) {
(*out)[key] = outVal
}
}
+ if in.Conditions != nil {
+ in, out := &in.Conditions, &out.Conditions
+ *out = make([]metav1.Condition, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
return
}
@@ -5788,6 +5817,13 @@ func (in *TiDBStatus) DeepCopyInto(out *TiDBStatus) {
(*out)[key] = outVal
}
}
+ if in.Conditions != nil {
+ in, out := &in.Conditions, &out.Conditions
+ *out = make([]metav1.Condition, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
return
}
@@ -6015,6 +6051,13 @@ func (in *TiFlashStatus) DeepCopyInto(out *TiFlashStatus) {
(*out)[key] = outVal
}
}
+ if in.Conditions != nil {
+ in, out := &in.Conditions, &out.Conditions
+ *out = make([]metav1.Condition, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
return
}
@@ -7901,6 +7944,13 @@ func (in *TiKVStatus) DeepCopyInto(out *TiKVStatus) {
(*out)[key] = outVal
}
}
+ if in.Conditions != nil {
+ in, out := &in.Conditions, &out.Conditions
+ *out = make([]metav1.Condition, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
return
}
@@ -9422,6 +9472,13 @@ func (in *WorkerStatus) DeepCopyInto(out *WorkerStatus) {
(*out)[key] = outVal
}
}
+ if in.Conditions != nil {
+ in, out := &in.Conditions, &out.Conditions
+ *out = make([]metav1.Condition, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
return
}
diff --git a/pkg/controller/controller_utils.go b/pkg/controller/controller_utils.go
index dfeb3b25016..14d586a9d88 100644
--- a/pkg/controller/controller_utils.go
+++ b/pkg/controller/controller_utils.go
@@ -15,6 +15,7 @@ package controller
import (
"context"
+ stderrs "errors"
"fmt"
"regexp"
@@ -78,8 +79,8 @@ func RequeueErrorf(format string, a ...interface{}) error {
// IsRequeueError returns whether err is a RequeueError
func IsRequeueError(err error) bool {
- _, ok := err.(*RequeueError)
- return ok
+ rerr := &RequeueError{}
+ return stderrs.As(err, &rerr)
}
// IgnoreError is used to ignore this item, this error type should't be considered as a real error, no need to requeue
diff --git a/pkg/controller/tidbcluster/pod_control.go b/pkg/controller/tidbcluster/pod_control.go
index 288ea192614..99e0a647505 100644
--- a/pkg/controller/tidbcluster/pod_control.go
+++ b/pkg/controller/tidbcluster/pod_control.go
@@ -214,14 +214,14 @@ func (c *PodController) getPDClient(tc *v1alpha1.TidbCluster) pdapi.PDClient {
}
func (c *PodController) syncTiKVPod(ctx context.Context, pod *corev1.Pod, tc *v1alpha1.TidbCluster) (reconcile.Result, error) {
- value, ok := pod.Annotations[v1alpha1.EvictLeaderAnnKey]
+ key, value, ok := needEvictLeader(pod)
if ok {
switch value {
case v1alpha1.EvictLeaderValueNone:
case v1alpha1.EvictLeaderValueDeletePod:
default:
- klog.Warningf("Ignore unknown value %q of annotation %q for Pod %s/%s", value, v1alpha1.EvictLeaderAnnKey, pod.Namespace, pod.Name)
+ klog.Warningf("Ignore unknown value %q of annotation %q for Pod %s/%s", value, key, pod.Namespace, pod.Name)
return reconcile.Result{}, nil
}
}
@@ -340,3 +340,14 @@ func (c *PodController) syncTiKVPod(ctx context.Context, pod *corev1.Pod, tc *v1
return reconcile.Result{}, nil
}
+
+func needEvictLeader(pod *corev1.Pod) (string, string, bool) {
+ for _, key := range v1alpha1.EvictLeaderAnnKeys {
+ value, exist := pod.Annotations[key]
+ if exist {
+ return key, value, true
+ }
+ }
+
+ return "", "", false
+}
diff --git a/pkg/controller/tidbcluster/pod_control_test.go b/pkg/controller/tidbcluster/pod_control_test.go
index a0767c35965..a33c9dd39f9 100644
--- a/pkg/controller/tidbcluster/pod_control_test.go
+++ b/pkg/controller/tidbcluster/pod_control_test.go
@@ -126,6 +126,28 @@ func TestPodControllerSync(t *testing.T) {
}, timeout, interval).ShouldNot(Equal(0), "should finish annotation")
}
+func TestNeedEvictLeader(t *testing.T) {
+ g := NewGomegaWithT(t)
+
+ pod := &corev1.Pod{}
+ pod.Annotations = map[string]string{}
+
+ // none key
+ _, _, exist := needEvictLeader(pod.DeepCopy())
+ g.Expect(exist).To(BeFalse())
+
+ // any key is exist
+ for _, key := range v1alpha1.EvictLeaderAnnKeys {
+ cur := pod.DeepCopy()
+ cur.Annotations[key] = v1alpha1.EvictLeaderValueDeletePod
+ usedkey, val, exist := needEvictLeader(cur)
+ g.Expect(exist).To(BeTrue())
+ g.Expect(key).To(Equal(usedkey))
+ g.Expect(val).To(Equal(cur.Annotations[key]))
+ }
+
+}
+
func newTiKVPod(tc *v1alpha1.TidbCluster) *corev1.Pod {
return &corev1.Pod{
ObjectMeta: metav1.ObjectMeta{
diff --git a/pkg/manager/member/pvc_resizer.go b/pkg/manager/member/pvc_resizer.go
index d957c98adaa..68949a91217 100644
--- a/pkg/manager/member/pvc_resizer.go
+++ b/pkg/manager/member/pvc_resizer.go
@@ -25,12 +25,15 @@ import (
"github.com/pingcap/tidb-operator/pkg/util"
corev1 "k8s.io/api/core/v1"
+ "k8s.io/apimachinery/pkg/api/meta"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/selection"
"k8s.io/apimachinery/pkg/types"
errutil "k8s.io/apimachinery/pkg/util/errors"
+ "k8s.io/client-go/kubernetes"
+ storagelister "k8s.io/client-go/listers/storage/v1"
"k8s.io/klog/v2"
)
@@ -45,7 +48,7 @@ import (
// if storageClass does not support VolumeExpansion, skip and continue
// if not patched, patch
//
-// We patch all PVCs at the same time. For many cloud storage plugins (e.g.
+// We patch all PVCs of one Pod at the same time. For many cloud storage plugins (e.g.
// AWS-EBS, GCE-PD), they support online file system expansion in latest
// Kubernetes (1.15+).
//
@@ -78,19 +81,30 @@ var (
dmWorkerRequirement = util.MustNewRequirement(label.ComponentLabelKey, selection.Equals, []string{label.DMWorkerLabelVal})
)
-type pvcResizer struct {
- deps *controller.Dependencies
+type volumePhase string
+
+const (
+ // needResize means the storage request of PVC is different from the storage request in TC/DC.
+ needResize volumePhase = "NeedResize"
+ // resizing means the storage request of PVC is equal to the storage request in TC/DC and PVC is resizing.
+ resizing volumePhase = "Resizing"
+ // resized means the storage request of PVC is equal to the storage request in TC/DC and PVC has been resized.
+ resized volumePhase = "Resized"
+)
+
+type volume struct {
+ name v1alpha1.StorageVolumeName
+ pvc *corev1.PersistentVolumeClaim
}
type podVolumeContext struct {
- pod *corev1.Pod
- volToPVCs map[v1alpha1.StorageVolumeName]*corev1.PersistentVolumeClaim
+ pod *corev1.Pod
+ volumes []*volume
}
type componentVolumeContext struct {
- comp v1alpha1.MemberType
- namespace string
- name string
+ cluster metav1.Object
+ status v1alpha1.ComponentStatus
// label selector for pvc and pod
selector labels.Selector
@@ -98,40 +112,30 @@ type componentVolumeContext struct {
desiredVolumeQuantity map[v1alpha1.StorageVolumeName]resource.Quantity
// actualPodVolumes is the actual status for all volumes
actualPodVolumes []*podVolumeContext
+}
- // sourceVolumeStatus is the volume status in tc status
- // NOTE: modifying it will modify the status in tc
- sourceVolumeStatus map[v1alpha1.StorageVolumeName]*v1alpha1.StorageVolumeStatus
+func (c *componentVolumeContext) ComponentID() string {
+ return fmt.Sprintf("%s/%s:%s", c.cluster.GetNamespace(), c.cluster.GetName(), c.status.GetMemberType())
}
-func (p *pvcResizer) Sync(tc *v1alpha1.TidbCluster) error {
- id := fmt.Sprintf("%s/%s", tc.Namespace, tc.Name)
+type pvcResizer struct {
+ deps *controller.Dependencies
+}
- components := []v1alpha1.MemberType{}
- if tc.Spec.PD != nil {
- components = append(components, v1alpha1.PDMemberType)
- }
- if tc.Spec.TiDB != nil {
- components = append(components, v1alpha1.TiDBMemberType)
- }
- if tc.Spec.TiKV != nil {
- components = append(components, v1alpha1.TiKVMemberType)
- }
- if tc.Spec.TiFlash != nil {
- components = append(components, v1alpha1.TiFlashMemberType)
- }
- if tc.Spec.TiCDC != nil {
- components = append(components, v1alpha1.TiCDCMemberType)
- }
- if tc.Spec.Pump != nil {
- components = append(components, v1alpha1.PumpMemberType)
+func NewPVCResizer(deps *controller.Dependencies) PVCResizerInterface {
+ return &pvcResizer{
+ deps: deps,
}
+}
+func (p *pvcResizer) Sync(tc *v1alpha1.TidbCluster) error {
+ components := v1alpha1.ComponentStatusFromTC(tc)
errs := []error{}
+
for _, comp := range components {
ctx, err := p.buildContextForTC(tc, comp)
if err != nil {
- errs = append(errs, fmt.Errorf("sync pvc for %q in tc %q failed: failed to prepare: %v", comp, id, err))
+ errs = append(errs, fmt.Errorf("build ctx used by resize for %s failed: %w", ctx.ComponentID(), err))
continue
}
@@ -139,7 +143,7 @@ func (p *pvcResizer) Sync(tc *v1alpha1.TidbCluster) error {
err = p.resizeVolumes(ctx)
if err != nil {
- errs = append(errs, fmt.Errorf("sync pvc for %q in tc %q failed: resize volumes failed: %v", comp, id, err))
+ errs = append(errs, fmt.Errorf("resize volumes for %s failed: %w", ctx.ComponentID(), err))
continue
}
}
@@ -148,19 +152,13 @@ func (p *pvcResizer) Sync(tc *v1alpha1.TidbCluster) error {
}
func (p *pvcResizer) SyncDM(dc *v1alpha1.DMCluster) error {
- id := fmt.Sprintf("%s/%s", dc.Namespace, dc.Name)
-
- components := []v1alpha1.MemberType{}
- components = append(components, v1alpha1.DMMasterMemberType)
- if dc.Spec.Worker != nil {
- components = append(components, v1alpha1.DMWorkerMemberType)
- }
-
+ components := v1alpha1.ComponentStatusFromDC(dc)
errs := []error{}
+
for _, comp := range components {
ctx, err := p.buildContextForDM(dc, comp)
if err != nil {
- errs = append(errs, fmt.Errorf("sync pvc for %q in dc %q failed: failed to prepare: %v", comp, id, err))
+ errs = append(errs, fmt.Errorf("build ctx used by resize for %s failed: %w", ctx.ComponentID(), err))
continue
}
@@ -168,7 +166,7 @@ func (p *pvcResizer) SyncDM(dc *v1alpha1.DMCluster) error {
err = p.resizeVolumes(ctx)
if err != nil {
- errs = append(errs, fmt.Errorf("sync pvc for %q in dc %q failed: resize volumes failed: %v", comp, id, err))
+ errs = append(errs, fmt.Errorf("resize volumes for %s failed: %w", ctx.ComponentID(), err))
continue
}
}
@@ -176,14 +174,12 @@ func (p *pvcResizer) SyncDM(dc *v1alpha1.DMCluster) error {
return errutil.NewAggregate(errs)
}
-func (p *pvcResizer) buildContextForTC(tc *v1alpha1.TidbCluster, comp v1alpha1.MemberType) (*componentVolumeContext, error) {
- ns := tc.Namespace
- name := tc.Name
+func (p *pvcResizer) buildContextForTC(tc *v1alpha1.TidbCluster, status v1alpha1.ComponentStatus) (*componentVolumeContext, error) {
+ comp := status.GetMemberType()
ctx := &componentVolumeContext{
- comp: comp,
- namespace: ns,
- name: name,
+ cluster: tc,
+ status: status,
desiredVolumeQuantity: map[v1alpha1.StorageVolumeName]resource.Quantity{},
}
@@ -198,7 +194,6 @@ func (p *pvcResizer) buildContextForTC(tc *v1alpha1.TidbCluster, comp v1alpha1.M
if tc.Status.PD.Volumes == nil {
tc.Status.PD.Volumes = map[v1alpha1.StorageVolumeName]*v1alpha1.StorageVolumeStatus{}
}
- ctx.sourceVolumeStatus = tc.Status.PD.Volumes
if quantity, ok := tc.Spec.PD.Requests[corev1.ResourceStorage]; ok {
ctx.desiredVolumeQuantity[v1alpha1.GetStorageVolumeName("", v1alpha1.PDMemberType)] = quantity
}
@@ -208,14 +203,12 @@ func (p *pvcResizer) buildContextForTC(tc *v1alpha1.TidbCluster, comp v1alpha1.M
if tc.Status.TiDB.Volumes == nil {
tc.Status.TiDB.Volumes = map[v1alpha1.StorageVolumeName]*v1alpha1.StorageVolumeStatus{}
}
- ctx.sourceVolumeStatus = tc.Status.TiDB.Volumes
storageVolumes = tc.Spec.TiDB.StorageVolumes
case v1alpha1.TiKVMemberType:
ctx.selector = selector.Add(*tikvRequirement)
if tc.Status.TiKV.Volumes == nil {
tc.Status.TiKV.Volumes = map[v1alpha1.StorageVolumeName]*v1alpha1.StorageVolumeStatus{}
}
- ctx.sourceVolumeStatus = tc.Status.TiKV.Volumes
if quantity, ok := tc.Spec.TiKV.Requests[corev1.ResourceStorage]; ok {
ctx.desiredVolumeQuantity[v1alpha1.GetStorageVolumeName("", v1alpha1.TiKVMemberType)] = quantity
}
@@ -225,7 +218,6 @@ func (p *pvcResizer) buildContextForTC(tc *v1alpha1.TidbCluster, comp v1alpha1.M
if tc.Status.TiFlash.Volumes == nil {
tc.Status.TiFlash.Volumes = map[v1alpha1.StorageVolumeName]*v1alpha1.StorageVolumeStatus{}
}
- ctx.sourceVolumeStatus = tc.Status.TiFlash.Volumes
for i, claim := range tc.Spec.TiFlash.StorageClaims {
if quantity, ok := claim.Resources.Requests[corev1.ResourceStorage]; ok {
ctx.desiredVolumeQuantity[v1alpha1.GetStorageVolumeNameForTiFlash(i)] = quantity
@@ -236,14 +228,12 @@ func (p *pvcResizer) buildContextForTC(tc *v1alpha1.TidbCluster, comp v1alpha1.M
if tc.Status.TiCDC.Volumes == nil {
tc.Status.TiCDC.Volumes = map[v1alpha1.StorageVolumeName]*v1alpha1.StorageVolumeStatus{}
}
- ctx.sourceVolumeStatus = tc.Status.TiCDC.Volumes
storageVolumes = tc.Spec.TiCDC.StorageVolumes
case v1alpha1.PumpMemberType:
ctx.selector = selector.Add(*pumpRequirement)
if tc.Status.Pump.Volumes == nil {
tc.Status.Pump.Volumes = map[v1alpha1.StorageVolumeName]*v1alpha1.StorageVolumeStatus{}
}
- ctx.sourceVolumeStatus = tc.Status.Pump.Volumes
if quantity, ok := tc.Spec.Pump.Requests[corev1.ResourceStorage]; ok {
ctx.desiredVolumeQuantity[v1alpha1.GetStorageVolumeName("", v1alpha1.PumpMemberType)] = quantity
}
@@ -255,11 +245,11 @@ func (p *pvcResizer) buildContextForTC(tc *v1alpha1.TidbCluster, comp v1alpha1.M
if quantity, err := resource.ParseQuantity(sv.StorageSize); err == nil {
ctx.desiredVolumeQuantity[v1alpha1.GetStorageVolumeName(sv.Name, comp)] = quantity
} else {
- klog.Warningf("StorageVolume %q in %s/%s .spec.%s is invalid", sv.Name, ns, name, comp)
+ klog.Warningf("StorageVolume %q in %s .spec.%s is invalid", sv.Name, ctx.ComponentID(), comp)
}
}
- podVolumes, err := p.collectAcutalStatus(ns, ctx.selector)
+ podVolumes, err := p.collectAcutalStatus(ctx.cluster.GetNamespace(), ctx.selector)
if err != nil {
return nil, err
}
@@ -268,14 +258,12 @@ func (p *pvcResizer) buildContextForTC(tc *v1alpha1.TidbCluster, comp v1alpha1.M
return ctx, nil
}
-func (p *pvcResizer) buildContextForDM(dc *v1alpha1.DMCluster, comp v1alpha1.MemberType) (*componentVolumeContext, error) {
- ns := dc.Namespace
- name := dc.Name
+func (p *pvcResizer) buildContextForDM(dc *v1alpha1.DMCluster, status v1alpha1.ComponentStatus) (*componentVolumeContext, error) {
+ comp := status.GetMemberType()
ctx := &componentVolumeContext{
- comp: comp,
- namespace: ns,
- name: name,
+ cluster: dc,
+ status: status,
desiredVolumeQuantity: map[v1alpha1.StorageVolumeName]resource.Quantity{},
}
@@ -289,7 +277,6 @@ func (p *pvcResizer) buildContextForDM(dc *v1alpha1.DMCluster, comp v1alpha1.Mem
if dc.Status.Master.Volumes == nil {
dc.Status.Master.Volumes = map[v1alpha1.StorageVolumeName]*v1alpha1.StorageVolumeStatus{}
}
- ctx.sourceVolumeStatus = dc.Status.Master.Volumes
if quantity, err := resource.ParseQuantity(dc.Spec.Master.StorageSize); err == nil {
ctx.desiredVolumeQuantity[v1alpha1.GetStorageVolumeName("", v1alpha1.DMMasterMemberType)] = quantity
}
@@ -298,7 +285,6 @@ func (p *pvcResizer) buildContextForDM(dc *v1alpha1.DMCluster, comp v1alpha1.Mem
if dc.Status.Worker.Volumes == nil {
dc.Status.Worker.Volumes = map[v1alpha1.StorageVolumeName]*v1alpha1.StorageVolumeStatus{}
}
- ctx.sourceVolumeStatus = dc.Status.Worker.Volumes
if quantity, err := resource.ParseQuantity(dc.Spec.Worker.StorageSize); err == nil {
ctx.desiredVolumeQuantity[v1alpha1.GetStorageVolumeName("", v1alpha1.DMWorkerMemberType)] = quantity
}
@@ -306,7 +292,7 @@ func (p *pvcResizer) buildContextForDM(dc *v1alpha1.DMCluster, comp v1alpha1.Mem
return nil, fmt.Errorf("unsupported member type %s", comp)
}
- podVolumes, err := p.collectAcutalStatus(ns, ctx.selector)
+ podVolumes, err := p.collectAcutalStatus(ctx.cluster.GetNamespace(), ctx.selector)
if err != nil {
return nil, err
}
@@ -317,7 +303,8 @@ func (p *pvcResizer) buildContextForDM(dc *v1alpha1.DMCluster, comp v1alpha1.Mem
// updateVolumeStatus build volume status from `actualPodVolumes` and update `sourceVolumeStatus`.
func (p *pvcResizer) updateVolumeStatus(ctx *componentVolumeContext) {
- if ctx.sourceVolumeStatus == nil {
+ sourceVolumeStatus := ctx.status.GetVolumes()
+ if sourceVolumeStatus == nil {
return
}
@@ -345,8 +332,11 @@ func (p *pvcResizer) updateVolumeStatus(ctx *componentVolumeContext) {
// build observed status from `actualPodVolumes`
observedStatus := map[v1alpha1.StorageVolumeName]*v1alpha1.ObservedStorageVolumeStatus{}
- for _, podVolume := range ctx.actualPodVolumes {
- for volName, pvc := range podVolume.volToPVCs {
+ for _, podVolumes := range ctx.actualPodVolumes {
+ for _, volume := range podVolumes.volumes {
+ volName := volume.name
+ pvc := volume.pvc
+
desiredQuantity, actualQuantity, pred := getCapacity(volName, pvc)
if !pred {
continue
@@ -385,132 +375,291 @@ func (p *pvcResizer) updateVolumeStatus(ctx *componentVolumeContext) {
// sync volume status for `sourceVolumeStatus`
for volName, status := range observedStatus {
- if _, exist := ctx.sourceVolumeStatus[volName]; !exist {
- ctx.sourceVolumeStatus[volName] = &v1alpha1.StorageVolumeStatus{
+ if _, exist := sourceVolumeStatus[volName]; !exist {
+ sourceVolumeStatus[volName] = &v1alpha1.StorageVolumeStatus{
Name: volName,
}
}
- ctx.sourceVolumeStatus[volName].ObservedStorageVolumeStatus = *status
+ sourceVolumeStatus[volName].ObservedStorageVolumeStatus = *status
}
- for _, status := range ctx.sourceVolumeStatus {
+ for _, status := range sourceVolumeStatus {
if _, exist := observedStatus[status.Name]; !exist {
- delete(ctx.sourceVolumeStatus, status.Name)
+ delete(sourceVolumeStatus, status.Name)
}
}
}
// resizeVolumes resize PVCs by comparing `desiredVolumeQuantity` and `actualVolumeQuantity` in context.
func (p *pvcResizer) resizeVolumes(ctx *componentVolumeContext) error {
- desiredVolumeQuantity := ctx.desiredVolumeQuantity
- podVolumes := ctx.actualPodVolumes
+ var (
+ resizingPod *corev1.Pod
+ classifiedVolumes map[volumePhase][]*volume
+ )
+
+ // choose one pod whose volume need to be resized
+ for _, podVolumes := range ctx.actualPodVolumes {
+ curClassifiedVolumes, err := p.classifyVolumes(ctx, podVolumes.volumes)
+ if err != nil {
+ return fmt.Errorf("classify volumes for %s failed: %w", ctx.ComponentID(), err)
+ }
- id := fmt.Sprintf("%s/%s", ctx.namespace, ctx.name)
+ if len(curClassifiedVolumes[resizing]) != 0 || len(curClassifiedVolumes[needResize]) != 0 {
+ resizingPod = podVolumes.pod
+ classifiedVolumes = curClassifiedVolumes
+ break
+ }
+ }
- for _, podVolume := range podVolumes {
+ allResized := resizingPod == nil
+ condResizing := meta.IsStatusConditionTrue(ctx.status.GetConditions(), v1alpha1.ComponentVolumeResizing)
- allResized := true
- errors := []error{}
+ if allResized {
+ if condResizing {
+ return p.endResize(ctx)
+ }
+ klog.V(4).Infof("all volumes are resized for %s", ctx.ComponentID())
+ return nil
+ }
- for volName, pvc := range podVolume.volToPVCs {
- pvcID := fmt.Sprintf("%s/%s", pvc.Namespace, pvc.Name)
+ if !condResizing {
+ return p.beginResize(ctx)
+ }
- // check whether the PVC is resized
- quantityInSpec, exist := desiredVolumeQuantity[volName]
- if !exist {
- klog.Errorf("Check PVC %q of %q resized failed: not exist in desired volumes", pvcID, id)
- continue
- }
- currentRequest, ok := pvc.Spec.Resources.Requests[corev1.ResourceStorage]
- if !ok {
- klog.Errorf("Check PVC %q of %q resized failed: storage request is empty", pvcID, id)
- continue
- }
- currentCapacity, ok := pvc.Status.Capacity[corev1.ResourceStorage]
- if !ok {
- klog.Errorf("Check PVC %q of %q resized failed: storage capacity is empty", pvcID, id)
- continue
- }
+ // some volumes are resizing
+ for _, volume := range classifiedVolumes[resizing] {
+ klog.Infof("PVC %s/%s for %s is resizing", volume.pvc.Namespace, volume.pvc.Name, ctx.ComponentID())
+ }
- cmpVal := quantityInSpec.Cmp(currentRequest)
- resizing := currentRequest.Cmp(currentCapacity) != 0
- if cmpVal == 0 {
- if resizing {
- allResized = false
- klog.Infof("PVC %q of %q is resizing, request: %s capcaity: %s",
- pvcID, id, currentRequest.String(), currentCapacity.String())
- } else {
- klog.V(4).Infof("PVC %q of %q is already resized, request: %s",
- pvcID, id, quantityInSpec.String())
- }
- continue
+ // some volumes need to be resized
+ if len(classifiedVolumes[needResize]) != 0 {
+ klog.V(4).Infof("start to resize volumes of Pod %s/%s for %s", resizingPod.Namespace, resizingPod.Name, ctx.ComponentID())
+ return p.resizeVolumesForPod(ctx, resizingPod, classifiedVolumes[needResize])
+ }
+
+ return nil
+}
+
+func (p *pvcResizer) classifyVolumes(ctx *componentVolumeContext, volumes []*volume) (map[volumePhase][]*volume, error) {
+ desiredVolumeQuantity := ctx.desiredVolumeQuantity
+ cid := ctx.ComponentID()
+
+ needResizeVolumes := []*volume{}
+ resizingVolumes := []*volume{}
+ resizedVolumes := []*volume{}
+
+ for _, volume := range volumes {
+ volName := volume.name
+ pvc := volume.pvc
+ pvcID := fmt.Sprintf("%s/%s", pvc.Namespace, pvc.Name)
+
+ // check whether the PVC is resized
+ quantityInSpec, exist := desiredVolumeQuantity[volName]
+ if !exist {
+ klog.Errorf("Check PVC %q of %q resized failed: not exist in desired volumes", pvcID, cid)
+ continue
+ }
+ currentRequest, ok := pvc.Spec.Resources.Requests[corev1.ResourceStorage]
+ if !ok {
+ klog.Errorf("Check PVC %q of %q resized failed: storage request is empty", pvcID, cid)
+ continue
+ }
+ currentCapacity, ok := pvc.Status.Capacity[corev1.ResourceStorage]
+ if !ok {
+ klog.Errorf("Check PVC %q of %q resized failed: storage capacity is empty", pvcID, cid)
+ continue
+ }
+
+ cmpVal := quantityInSpec.Cmp(currentRequest)
+ resizing := currentRequest.Cmp(currentCapacity) != 0
+ if cmpVal == 0 {
+ if resizing {
+ resizingVolumes = append(resizingVolumes, volume)
+ } else {
+ resizedVolumes = append(resizedVolumes, volume)
}
+ continue
+ }
- // check whether the PVC can be resized
+ // check whether the PVC can be resized
- // not support shrink
- if cmpVal < 0 {
- klog.Warningf("Skip to resize PVC %q of %q: storage request cannot be shrunk (%s to %s)",
- pvcID, id, currentRequest.String(), quantityInSpec.String())
- continue
+ // not support shrink
+ if cmpVal < 0 {
+ klog.Warningf("Skip to resize PVC %q of %q: storage request cannot be shrunk (%s to %s)",
+ pvcID, cid, currentRequest.String(), quantityInSpec.String())
+ continue
+ }
+ // not support default storage class
+ if pvc.Spec.StorageClassName == nil {
+ klog.Warningf("Skip to resize PVC %q of %q: PVC have no storage class", pvcID, cid)
+ continue
+ }
+ // check whether the storage class support
+ if p.deps.StorageClassLister != nil {
+ volumeExpansionSupported, err := isVolumeExpansionSupported(p.deps.StorageClassLister, *pvc.Spec.StorageClassName)
+ if err != nil {
+ return nil, err
}
- // not support default storage class
- if pvc.Spec.StorageClassName == nil {
- klog.Warningf("Skip to resize PVC %q of %q: PVC have no storage class", pvcID, id)
+ if !volumeExpansionSupported {
+ klog.Warningf("Skip to resize PVC %q of %q: storage class %q does not support volume expansion",
+ *pvc.Spec.StorageClassName, pvcID, cid)
continue
}
- // check whether the storage class support
- if p.deps.StorageClassLister != nil {
- volumeExpansionSupported, err := p.isVolumeExpansionSupported(*pvc.Spec.StorageClassName)
- if err != nil {
- return err
- }
- if !volumeExpansionSupported {
- klog.Warningf("Skip to resize PVC %q of %q: storage class %q does not support volume expansion",
- *pvc.Spec.StorageClassName, pvcID, id)
- continue
- }
- } else {
- klog.V(4).Infof("Storage classes lister is unavailable, skip checking volume expansion support for PVC %q of %q with storage class %s. This may be caused by no relevant permissions",
- pvcID, id, *pvc.Spec.StorageClassName)
- }
+ } else {
+ klog.V(4).Infof("Storage classes lister is unavailable, skip checking volume expansion support for PVC %q of %q with storage class %s. This may be caused by no relevant permissions",
+ pvcID, cid, *pvc.Spec.StorageClassName)
+ }
+
+ needResizeVolumes = append(needResizeVolumes, volume)
+ }
+
+ return map[volumePhase][]*volume{
+ needResize: needResizeVolumes,
+ resizing: resizingVolumes,
+ resized: resizedVolumes,
+ }, nil
+}
+
+func (p *pvcResizer) resizeVolumesForPod(ctx *componentVolumeContext, pod *corev1.Pod, volumes []*volume) error {
+ if err := p.beforeResizeForPod(ctx, pod, volumes); err != nil {
+ return err
+ }
- allResized = false
+ desiredVolumeQuantity := ctx.desiredVolumeQuantity
+ errs := []error{}
+
+ for _, volume := range volumes {
+ volName := volume.name
+ pvc := volume.pvc
+ pvcID := fmt.Sprintf("%s/%s", pvc.Namespace, pvc.Name)
+
+ currentRequest, ok := pvc.Spec.Resources.Requests[corev1.ResourceStorage]
+ if !ok {
+ errs = append(errs, fmt.Errorf("resize PVC %s failed: storage request is empty", pvcID))
+ continue
+ }
+ quantityInSpec, exist := desiredVolumeQuantity[volName]
+ if !exist {
+ errs = append(errs, fmt.Errorf("resize PVC %s failed: not exist in desired volumes", pvcID))
+ continue
+ }
- // patch PVC to expand the storage
- mergePatch, err := json.Marshal(map[string]interface{}{
- "spec": map[string]interface{}{
- "resources": corev1.ResourceRequirements{
- Requests: corev1.ResourceList{
- corev1.ResourceStorage: quantityInSpec,
- },
+ mergePatch, err := json.Marshal(map[string]interface{}{
+ "spec": map[string]interface{}{
+ "resources": corev1.ResourceRequirements{
+ Requests: corev1.ResourceList{
+ corev1.ResourceStorage: quantityInSpec,
},
},
- })
- if err != nil {
- errors = append(errors, err)
- continue
+ },
+ })
+ if err != nil {
+ errs = append(errs, fmt.Errorf("resize PVC %s failed: %s", pvcID, err))
+ continue
+ }
+ _, err = p.deps.KubeClientset.CoreV1().PersistentVolumeClaims(pvc.Namespace).Patch(context.TODO(), pvc.Name, types.MergePatchType, mergePatch, metav1.PatchOptions{})
+ if err != nil {
+ errs = append(errs, fmt.Errorf("resize PVC %s failed: %s", pvcID, err))
+ continue
+ }
+
+ klog.Infof("resize PVC %s of %s: storage request is updated from %s to %s",
+ pvcID, ctx.ComponentID(), currentRequest.String(), quantityInSpec.String())
+ }
+
+ return errutil.NewAggregate(errs)
+}
+
+func (p *pvcResizer) beforeResizeForPod(ctx *componentVolumeContext, resizePod *corev1.Pod, volumes []*volume) error {
+ logPrefix := fmt.Sprintf("before resizing volumes of Pod %s/%s for %q", resizePod.Namespace, resizePod.Name, ctx.ComponentID())
+
+ switch ctx.status.GetMemberType() {
+ case v1alpha1.TiKVMemberType:
+ tc, ok := ctx.cluster.(*v1alpha1.TidbCluster)
+ if !ok {
+ return fmt.Errorf("%s: cluster is not tidb cluster", logPrefix)
+ }
+
+ // remove leader eviction ann from resized pods and ensure the store is UP.
+ // leader eviction ann of the lastest pod is removed in `endResize`.
+ for _, podVolume := range ctx.actualPodVolumes {
+ pod := podVolume.pod
+ if pod.Name == resizePod.Name {
+ break
}
- _, err = p.deps.KubeClientset.CoreV1().PersistentVolumeClaims(pvc.Namespace).Patch(context.TODO(), pvc.Name, types.MergePatchType, mergePatch, metav1.PatchOptions{})
+
+ updated, err := updateResizeAnnForTiKVPod(p.deps.KubeClientset, false, pod)
if err != nil {
- errors = append(errors, err)
- continue
+ return err
+ }
+ if updated {
+ klog.Infof("%s: remove leader eviction annotation from pod %s/%s", logPrefix, pod.Namespace, pod.Name)
+ }
+ for _, store := range tc.Status.TiKV.Stores {
+ if store.PodName == pod.Name && store.State != v1alpha1.TiKVStateUp {
+ return fmt.Errorf("%s: store %s of pod %s is not ready", logPrefix, store.ID, store.PodName)
+ }
}
-
- klog.V(2).Infof("PVC %q of %q storage request is updated from %s to %s",
- pvcID, id, currentRequest.String(), quantityInSpec.String())
}
- if len(errors) > 0 {
- return errutil.NewAggregate(errors)
+ // add leader eviction ann to the pod and wait the leader count to be 0
+ updated, err := updateResizeAnnForTiKVPod(p.deps.KubeClientset, true, resizePod)
+ if err != nil {
+ return err
+ }
+ if updated {
+ klog.Infof("%s: add leader eviction annotation to pod", logPrefix)
+ }
+ for _, store := range tc.Status.TiKV.Stores {
+ if store.PodName == resizePod.Name {
+ if store.LeaderCount == 0 {
+ klog.V(4).Infof("%s: leader count of store %s become 0", logPrefix, store.ID)
+ return nil
+ } else {
+ return controller.RequeueErrorf("%s: wait for leader count of store %s to be 0", logPrefix, store.ID)
+ }
+ }
}
- if !allResized {
- klog.Infof("Skip to resize other PVCs of %q: wait all PVCs in Pod %s/%s to be resized",
- id, podVolume.pod.Namespace, podVolume.pod.Name)
- break
+ return fmt.Errorf("%s: can't find store in tc", logPrefix)
+ }
+
+ return nil
+}
+
+func (p *pvcResizer) beginResize(ctx *componentVolumeContext) error {
+ ctx.status.SetCondition(metav1.Condition{
+ Type: v1alpha1.ComponentVolumeResizing,
+ Status: metav1.ConditionTrue,
+ Reason: "BeginResizing",
+ Message: "Set resizing condition to begin resizing",
+ })
+ klog.Infof("begin resizing for %s: set resizing condition", ctx.ComponentID())
+ return controller.RequeueErrorf("set condition before resizing volumes for %s", ctx.ComponentID())
+}
+
+func (p *pvcResizer) endResize(ctx *componentVolumeContext) error {
+ switch ctx.status.GetMemberType() {
+ case v1alpha1.TiKVMemberType:
+ // ensure all eviction annotations are removed
+ for _, podVolume := range ctx.actualPodVolumes {
+ updated, err := updateResizeAnnForTiKVPod(p.deps.KubeClientset, false, podVolume.pod)
+ if err != nil {
+ return fmt.Errorf("remove leader eviction annotation from pod failed: %s", err)
+ }
+ if updated {
+ klog.Infof("end resizing for %s: remove leader eviction annotation from pod %s/%s",
+ ctx.ComponentID(), podVolume.pod.Namespace, podVolume.pod.Name)
+ }
}
}
+ ctx.status.SetCondition(metav1.Condition{
+ Type: v1alpha1.ComponentVolumeResizing,
+ Status: metav1.ConditionFalse,
+ Reason: "EndResizing",
+ Message: "All volumes are resized",
+ })
+ klog.Infof("end resizing for %s: update resizing condition", ctx.ComponentID())
return nil
}
@@ -536,7 +685,7 @@ func (p *pvcResizer) collectAcutalStatus(ns string, selector labels.Selector) ([
}
for _, pod := range pods {
- volToPVCs := map[v1alpha1.StorageVolumeName]*corev1.PersistentVolumeClaim{}
+ volumes := []*volume{}
for _, vol := range pod.Spec.Volumes {
if vol.PersistentVolumeClaim != nil {
@@ -546,26 +695,33 @@ func (p *pvcResizer) collectAcutalStatus(ns string, selector labels.Selector) ([
vol.PersistentVolumeClaim.ClaimName, pod.Namespace, pod.Name)
continue
}
- volToPVCs[v1alpha1.StorageVolumeName(vol.Name)] = pvc
+ volumes = append(volumes, &volume{
+ name: v1alpha1.StorageVolumeName(vol.Name),
+ pvc: pvc.DeepCopy(),
+ })
}
}
result = append(result, &podVolumeContext{
- pod: pod,
- volToPVCs: volToPVCs,
+ pod: pod.DeepCopy(),
+ volumes: volumes,
})
}
// sort by pod name to ensure the order is stable
sort.Slice(result, func(i, j int) bool {
- return result[i].pod.Name < result[j].pod.Name
+ name1, name2 := result[i].pod.Name, result[j].pod.Name
+ if len(name1) != len(name2) {
+ return len(name1) < len(name2)
+ }
+ return name1 < name2
})
return result, nil
}
-func (p *pvcResizer) isVolumeExpansionSupported(storageClassName string) (bool, error) {
- sc, err := p.deps.StorageClassLister.Get(storageClassName)
+func isVolumeExpansionSupported(lister storagelister.StorageClassLister, storageClassName string) (bool, error) {
+ sc, err := lister.Get(storageClassName)
if err != nil {
return false, err
}
@@ -575,10 +731,30 @@ func (p *pvcResizer) isVolumeExpansionSupported(storageClassName string) (bool,
return *sc.AllowVolumeExpansion, nil
}
-func NewPVCResizer(deps *controller.Dependencies) PVCResizerInterface {
- return &pvcResizer{
- deps: deps,
+func updateResizeAnnForTiKVPod(client kubernetes.Interface, need bool, pod *corev1.Pod) (bool /*updated*/, error) {
+ _, exist := pod.Annotations[v1alpha1.EvictLeaderAnnKeyForResize]
+
+ if need && !exist {
+ if pod.Annotations == nil {
+ pod.Annotations = map[string]string{}
+ }
+ pod.Annotations[v1alpha1.EvictLeaderAnnKeyForResize] = v1alpha1.EvictLeaderValueNone
+ _, err := client.CoreV1().Pods(pod.Namespace).Update(context.TODO(), pod, metav1.UpdateOptions{})
+ if err != nil {
+ return false, fmt.Errorf("add leader eviction annotation to pod %s/%s failed: %s", pod.Namespace, pod.Name, err)
+ }
+ return true, nil
}
+ if !need && exist {
+ delete(pod.Annotations, v1alpha1.EvictLeaderAnnKeyForResize)
+ _, err := client.CoreV1().Pods(pod.Namespace).Update(context.TODO(), pod, metav1.UpdateOptions{})
+ if err != nil {
+ return false, fmt.Errorf("remove leader eviction annotation from pod %s/%s failed: %s", pod.Namespace, pod.Name, err)
+ }
+ return true, nil
+ }
+
+ return false, nil
}
type fakePVCResizer struct {
diff --git a/pkg/manager/member/pvc_resizer_test.go b/pkg/manager/member/pvc_resizer_test.go
index c65575183d9..c8b30772f6b 100644
--- a/pkg/manager/member/pvc_resizer_test.go
+++ b/pkg/manager/member/pvc_resizer_test.go
@@ -15,18 +15,15 @@ package member
import (
"context"
- "fmt"
"testing"
"github.com/google/go-cmp/cmp"
- "github.com/onsi/gomega"
- "github.com/pingcap/tidb-operator/pkg/apis/label"
+ . "github.com/onsi/gomega"
"github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1"
"github.com/pingcap/tidb-operator/pkg/controller"
corev1 "k8s.io/api/core/v1"
v1 "k8s.io/api/core/v1"
storagev1 "k8s.io/api/storage/v1"
- apierrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/utils/pointer"
@@ -55,25 +52,6 @@ func newMockPVC(name, storageClass, storageRequest, capacity string) *v1.Persist
}
}
-func newFullPVC(name, component, storageClass, storageRequest, capacity, nameLabel, instance string) *v1.PersistentVolumeClaim {
- pvc := newMockPVC(name, storageClass, storageRequest, capacity)
- pvc.Labels = map[string]string{
- label.NameLabelKey: nameLabel,
- label.ManagedByLabelKey: label.TiDBOperator,
- label.InstanceLabelKey: instance,
- label.ComponentLabelKey: component,
- }
- return pvc
-}
-
-func newPVCWithStorage(name string, component string, storageClass, storageRequest string) *v1.PersistentVolumeClaim {
- return newFullPVC(name, component, storageClass, storageRequest, storageRequest, "tidb-cluster", "tc")
-}
-
-func newDMPVCWithStorage(name string, component string, storageClass, storageRequest string) *v1.PersistentVolumeClaim {
- return newFullPVC(name, component, storageClass, storageRequest, storageRequest, "dm-cluster", "dc")
-}
-
func newStorageClass(name string, volumeExpansion bool) *storagev1.StorageClass {
return &storagev1.StorageClass{
ObjectMeta: metav1.ObjectMeta{
@@ -83,703 +61,309 @@ func newStorageClass(name string, volumeExpansion bool) *storagev1.StorageClass
}
}
-func TestPVCResizer(t *testing.T) {
- tests := []struct {
- name string
- tc *v1alpha1.TidbCluster
- sc *storagev1.StorageClass
- pvcs []*v1.PersistentVolumeClaim
- wantPVCs []*v1.PersistentVolumeClaim
- wantErr error
- expect func(g *gomega.WithT, tc *v1alpha1.TidbCluster)
+func TestResizeVolumes(t *testing.T) {
+ scName := "sc"
+
+ testcases := map[string]struct {
+ setup func(ctx *componentVolumeContext)
+ expect func(g *GomegaWithT, resizer *pvcResizer, ctx *componentVolumeContext, err error)
}{
- {
- name: "no PVCs",
- tc: &v1alpha1.TidbCluster{
- Spec: v1alpha1.TidbClusterSpec{},
- },
- },
- {
- name: "resize PD PVCs",
- tc: &v1alpha1.TidbCluster{
- ObjectMeta: metav1.ObjectMeta{
- Namespace: v1.NamespaceDefault,
- Name: "tc",
- },
- Spec: v1alpha1.TidbClusterSpec{
- PD: &v1alpha1.PDSpec{
- ResourceRequirements: v1.ResourceRequirements{
- Requests: v1.ResourceList{
- v1.ResourceStorage: resource.MustParse("2Gi"),
- },
+ "all volumes are resized": {
+ setup: func(ctx *componentVolumeContext) {
+ tc := ctx.cluster.(*v1alpha1.TidbCluster)
+ ctx.status = &tc.Status.PD
+
+ tc.Status.PD.Conditions = nil
+ ctx.desiredVolumeQuantity = map[v1alpha1.StorageVolumeName]resource.Quantity{
+ "volume-1": resource.MustParse("2Gi"),
+ }
+ ctx.actualPodVolumes = []*podVolumeContext{
+ {
+ pod: &corev1.Pod{
+ ObjectMeta: metav1.ObjectMeta{Namespace: "test-ns", Name: "test-cluster-pd-0"},
},
- StorageClassName: pointer.StringPtr("sc"),
- Replicas: 3,
- StorageVolumes: []v1alpha1.StorageVolume{
- {
- Name: "log",
- StorageSize: "2Gi",
- },
+ volumes: []*volume{
+ {name: "volume-1", pvc: newMockPVC("volume-1-pvc-1", scName, "2Gi", "2Gi")},
},
},
- },
- },
- sc: newStorageClass("sc", true),
- pvcs: []*v1.PersistentVolumeClaim{
- newPVCWithStorage("pd-tc-pd-0", label.PDLabelVal, "sc", "1Gi"),
- newPVCWithStorage("pd-tc-pd-1", label.PDLabelVal, "sc", "1Gi"),
- newPVCWithStorage("pd-tc-pd-2", label.PDLabelVal, "sc", "1Gi"),
- newPVCWithStorage("pd-log-tc-pd-0", label.PDLabelVal, "sc", "1Gi"),
- newPVCWithStorage("pd-log-tc-pd-1", label.PDLabelVal, "sc", "1Gi"),
- newPVCWithStorage("pd-log-tc-pd-2", label.PDLabelVal, "sc", "1Gi"),
- },
- wantPVCs: []*v1.PersistentVolumeClaim{
- newPVCWithStorage("pd-tc-pd-0", label.PDLabelVal, "sc", "2Gi"),
- newPVCWithStorage("pd-tc-pd-1", label.PDLabelVal, "sc", "1Gi"),
- newPVCWithStorage("pd-tc-pd-2", label.PDLabelVal, "sc", "1Gi"),
- newPVCWithStorage("pd-log-tc-pd-0", label.PDLabelVal, "sc", "2Gi"),
- newPVCWithStorage("pd-log-tc-pd-1", label.PDLabelVal, "sc", "1Gi"),
- newPVCWithStorage("pd-log-tc-pd-2", label.PDLabelVal, "sc", "1Gi"),
- },
- expect: func(g *gomega.WithT, tc *v1alpha1.TidbCluster) {
- expectStatus := map[v1alpha1.StorageVolumeName]*v1alpha1.StorageVolumeStatus{
- "pd": {
- Name: "pd",
- ObservedStorageVolumeStatus: v1alpha1.ObservedStorageVolumeStatus{
- BoundCount: 3,
- CurrentCount: 3,
- ResizedCount: 0,
- CurrentCapacity: resource.MustParse("1Gi"),
- ResizedCapacity: resource.MustParse("2Gi"),
+ {
+ pod: &corev1.Pod{
+ ObjectMeta: metav1.ObjectMeta{Namespace: "test-ns", Name: "test-cluster-pd-2"},
},
- },
- "pd-log": {
- Name: "pd-log",
- ObservedStorageVolumeStatus: v1alpha1.ObservedStorageVolumeStatus{
- BoundCount: 3,
- CurrentCount: 3,
- ResizedCount: 0,
- CurrentCapacity: resource.MustParse("1Gi"),
- ResizedCapacity: resource.MustParse("2Gi"),
+ volumes: []*volume{
+ {name: "volume-1", pvc: newMockPVC("volume-1-pvc-2", scName, "2Gi", "2Gi")},
},
},
- }
- diff := cmp.Diff(expectStatus, tc.Status.PD.Volumes)
- g.Expect(diff).To(gomega.BeEmpty(), "unexpected (-want, +got): %s", diff)
- },
- },
- {
- name: "resize TiDB PVCs",
- tc: &v1alpha1.TidbCluster{
- ObjectMeta: metav1.ObjectMeta{
- Namespace: v1.NamespaceDefault,
- Name: "tc",
- },
- Spec: v1alpha1.TidbClusterSpec{
- TiDB: &v1alpha1.TiDBSpec{
- StorageVolumes: []v1alpha1.StorageVolume{
- {
- Name: "log",
- StorageSize: "2Gi",
- },
+ {
+ pod: &corev1.Pod{
+ ObjectMeta: metav1.ObjectMeta{Namespace: "test-ns", Name: "test-cluster-pd-3"},
},
- Replicas: 3,
- },
- },
- },
- sc: newStorageClass("sc", true),
- pvcs: []*v1.PersistentVolumeClaim{
- newPVCWithStorage("tidb-log-tc-tidb-0", label.TiDBLabelVal, "sc", "1Gi"),
- newPVCWithStorage("tidb-log-tc-tidb-1", label.TiDBLabelVal, "sc", "1Gi"),
- newPVCWithStorage("tidb-log-tc-tidb-2", label.TiDBLabelVal, "sc", "1Gi"),
- },
- wantPVCs: []*v1.PersistentVolumeClaim{
- newPVCWithStorage("tidb-log-tc-tidb-0", label.TiDBLabelVal, "sc", "2Gi"),
- newPVCWithStorage("tidb-log-tc-tidb-1", label.TiDBLabelVal, "sc", "1Gi"),
- newPVCWithStorage("tidb-log-tc-tidb-2", label.TiDBLabelVal, "sc", "1Gi"),
- },
- expect: func(g *gomega.WithT, tc *v1alpha1.TidbCluster) {
- expectStatus := map[v1alpha1.StorageVolumeName]*v1alpha1.StorageVolumeStatus{
- "tidb-log": {
- Name: "tidb-log",
- ObservedStorageVolumeStatus: v1alpha1.ObservedStorageVolumeStatus{
- BoundCount: 3,
- CurrentCount: 3,
- ResizedCount: 0,
- CurrentCapacity: resource.MustParse("1Gi"),
- ResizedCapacity: resource.MustParse("2Gi"),
+ volumes: []*volume{
+ {name: "volume-1", pvc: newMockPVC("volume-1-pvc-3", scName, "2Gi", "2Gi")},
},
},
}
- diff := cmp.Diff(expectStatus, tc.Status.TiDB.Volumes)
- g.Expect(diff).To(gomega.BeEmpty(), "unexpected (-want, +got): %s", diff)
+ },
+ expect: func(g *WithT, resizer *pvcResizer, ctx *componentVolumeContext, err error) {
+ g.Expect(err).Should(Succeed())
},
},
- {
- name: "resize TiKV PVCs",
- tc: &v1alpha1.TidbCluster{
- ObjectMeta: metav1.ObjectMeta{
- Namespace: v1.NamespaceDefault,
- Name: "tc",
- },
- Spec: v1alpha1.TidbClusterSpec{
- TiKV: &v1alpha1.TiKVSpec{
- ResourceRequirements: v1.ResourceRequirements{
- Requests: v1.ResourceList{
- v1.ResourceStorage: resource.MustParse("2Gi"),
- },
+ "end resizing": {
+ setup: func(ctx *componentVolumeContext) {
+ tc := ctx.cluster.(*v1alpha1.TidbCluster)
+ ctx.status = &tc.Status.PD
+
+ tc.Status.PD.Conditions = []metav1.Condition{
+ {
+ Type: v1alpha1.ComponentVolumeResizing,
+ Status: metav1.ConditionTrue,
+ Reason: "BeginResizing",
+ Message: "Set resizing condition to begin resizing",
+ },
+ }
+ ctx.desiredVolumeQuantity = map[v1alpha1.StorageVolumeName]resource.Quantity{
+ "volume-1": resource.MustParse("2Gi"),
+ }
+ ctx.actualPodVolumes = []*podVolumeContext{
+ {
+ pod: &corev1.Pod{
+ ObjectMeta: metav1.ObjectMeta{Namespace: "test-ns", Name: "test-cluster-pd-0"},
},
- StorageClassName: pointer.StringPtr("sc"),
- StorageVolumes: []v1alpha1.StorageVolume{
- {
- Name: "log",
- StorageSize: "2Gi",
- },
+ volumes: []*volume{
+ {name: "volume-1", pvc: newMockPVC("volume-1-pvc-1", scName, "2Gi", "2Gi")},
},
- Replicas: 3,
},
- },
- },
- sc: newStorageClass("sc", true),
- pvcs: []*v1.PersistentVolumeClaim{
- newPVCWithStorage("tikv-tc-tikv-0", label.TiKVLabelVal, "sc", "1Gi"),
- newPVCWithStorage("tikv-tc-tikv-1", label.TiKVLabelVal, "sc", "1Gi"),
- newPVCWithStorage("tikv-tc-tikv-2", label.TiKVLabelVal, "sc", "1Gi"),
- newPVCWithStorage("tikv-log-tc-tikv-0", label.TiKVLabelVal, "sc", "1Gi"),
- newPVCWithStorage("tikv-log-tc-tikv-1", label.TiKVLabelVal, "sc", "1Gi"),
- newPVCWithStorage("tikv-log-tc-tikv-2", label.TiKVLabelVal, "sc", "1Gi"),
- },
- wantPVCs: []*v1.PersistentVolumeClaim{
- newPVCWithStorage("tikv-tc-tikv-0", label.TiKVLabelVal, "sc", "2Gi"),
- newPVCWithStorage("tikv-tc-tikv-1", label.TiKVLabelVal, "sc", "1Gi"),
- newPVCWithStorage("tikv-tc-tikv-2", label.TiKVLabelVal, "sc", "1Gi"),
- newPVCWithStorage("tikv-log-tc-tikv-0", label.TiKVLabelVal, "sc", "2Gi"),
- newPVCWithStorage("tikv-log-tc-tikv-1", label.TiKVLabelVal, "sc", "1Gi"),
- newPVCWithStorage("tikv-log-tc-tikv-2", label.TiKVLabelVal, "sc", "1Gi"),
- },
- expect: func(g *gomega.WithT, tc *v1alpha1.TidbCluster) {
- expectStatus := map[v1alpha1.StorageVolumeName]*v1alpha1.StorageVolumeStatus{
- "tikv": {
- Name: "tikv",
- ObservedStorageVolumeStatus: v1alpha1.ObservedStorageVolumeStatus{
- BoundCount: 3,
- CurrentCount: 3,
- ResizedCount: 0,
- CurrentCapacity: resource.MustParse("1Gi"),
- ResizedCapacity: resource.MustParse("2Gi"),
+ {
+ pod: &corev1.Pod{
+ ObjectMeta: metav1.ObjectMeta{Namespace: "test-ns", Name: "test-cluster-pd-2"},
+ },
+ volumes: []*volume{
+ {name: "volume-1", pvc: newMockPVC("volume-1-pvc-2", scName, "2Gi", "2Gi")},
},
},
- "tikv-log": {
- Name: "tikv-log",
- ObservedStorageVolumeStatus: v1alpha1.ObservedStorageVolumeStatus{
- BoundCount: 3,
- CurrentCount: 3,
- ResizedCount: 0,
- CurrentCapacity: resource.MustParse("1Gi"),
- ResizedCapacity: resource.MustParse("2Gi"),
+ {
+ pod: &corev1.Pod{
+ ObjectMeta: metav1.ObjectMeta{Namespace: "test-ns", Name: "test-cluster-pd-3"},
+ },
+ volumes: []*volume{
+ {name: "volume-1", pvc: newMockPVC("volume-1-pvc-3", scName, "2Gi", "2Gi")},
},
},
}
- diff := cmp.Diff(expectStatus, tc.Status.TiKV.Volumes)
- g.Expect(diff).To(gomega.BeEmpty(), "unexpected (-want, +got): %s", diff)
+ },
+ expect: func(g *WithT, resizer *pvcResizer, ctx *componentVolumeContext, err error) {
+ g.Expect(err).Should(Succeed())
+ g.Expect(ctx.cluster.(*v1alpha1.TidbCluster).IsComponentVolumeResizing(ctx.status.GetMemberType())).Should(BeFalse())
},
},
- {
- name: "resize TiFlash PVCs",
- tc: &v1alpha1.TidbCluster{
- ObjectMeta: metav1.ObjectMeta{
- Namespace: v1.NamespaceDefault,
- Name: "tc",
- },
- Spec: v1alpha1.TidbClusterSpec{
- TiFlash: &v1alpha1.TiFlashSpec{
- Replicas: 1,
- StorageClaims: []v1alpha1.StorageClaim{
- {
- Resources: v1.ResourceRequirements{
- Requests: v1.ResourceList{
- v1.ResourceStorage: resource.MustParse("2Gi"),
- },
- },
- StorageClassName: pointer.StringPtr("sc"),
- },
- {
- Resources: v1.ResourceRequirements{
- Requests: v1.ResourceList{
- v1.ResourceStorage: resource.MustParse("3Gi"),
- },
- },
- StorageClassName: pointer.StringPtr("sc"),
- },
+ "begin resizing": {
+ setup: func(ctx *componentVolumeContext) {
+ tc := ctx.cluster.(*v1alpha1.TidbCluster)
+ ctx.status = &tc.Status.PD
+
+ tc.Status.PD.Conditions = nil
+ ctx.desiredVolumeQuantity = map[v1alpha1.StorageVolumeName]resource.Quantity{
+ "volume-1": resource.MustParse("2Gi"),
+ }
+ ctx.actualPodVolumes = []*podVolumeContext{
+ {
+ pod: &corev1.Pod{
+ ObjectMeta: metav1.ObjectMeta{Namespace: "test-ns", Name: "test-cluster-pd-0"},
},
- },
- },
- },
- sc: newStorageClass("sc", true),
- pvcs: []*v1.PersistentVolumeClaim{
- newPVCWithStorage("data0-tc-tiflash-0", label.TiFlashLabelVal, "sc", "1Gi"),
- newPVCWithStorage("data1-tc-tiflash-0", label.TiFlashLabelVal, "sc", "1Gi"),
- },
- wantPVCs: []*v1.PersistentVolumeClaim{
- newPVCWithStorage("data0-tc-tiflash-0", label.TiFlashLabelVal, "sc", "2Gi"),
- newPVCWithStorage("data1-tc-tiflash-0", label.TiFlashLabelVal, "sc", "3Gi"),
- },
- expect: func(g *gomega.WithT, tc *v1alpha1.TidbCluster) {
- expectStatus := map[v1alpha1.StorageVolumeName]*v1alpha1.StorageVolumeStatus{
- "data0": {
- Name: "data0",
- ObservedStorageVolumeStatus: v1alpha1.ObservedStorageVolumeStatus{
- BoundCount: 1,
- CurrentCount: 1,
- ResizedCount: 0,
- CurrentCapacity: resource.MustParse("1Gi"),
- ResizedCapacity: resource.MustParse("2Gi"),
+ volumes: []*volume{
+ {name: "volume-1", pvc: newMockPVC("volume-1-pvc-1", scName, "2Gi", "1Gi")},
},
},
- "data1": {
- Name: "data1",
- ObservedStorageVolumeStatus: v1alpha1.ObservedStorageVolumeStatus{
- BoundCount: 1,
- CurrentCount: 1,
- ResizedCount: 0,
- CurrentCapacity: resource.MustParse("1Gi"),
- ResizedCapacity: resource.MustParse("3Gi"),
+ {
+ pod: &corev1.Pod{
+ ObjectMeta: metav1.ObjectMeta{Namespace: "test-ns", Name: "test-cluster-pd-2"},
},
- },
- }
- diff := cmp.Diff(expectStatus, tc.Status.TiFlash.Volumes)
- g.Expect(diff).To(gomega.BeEmpty(), "unexpected (-want, +got): %s", diff)
- },
- },
- {
- name: "resize TiCDC PVCs",
- tc: &v1alpha1.TidbCluster{
- ObjectMeta: metav1.ObjectMeta{
- Namespace: v1.NamespaceDefault,
- Name: "tc",
- },
- Spec: v1alpha1.TidbClusterSpec{
- TiCDC: &v1alpha1.TiCDCSpec{
- Replicas: 3,
- StorageVolumes: []v1alpha1.StorageVolume{
- {
- Name: "sort-dir",
- StorageSize: "2Gi",
- },
+ volumes: []*volume{
+ {name: "volume-1", pvc: newMockPVC("volume-1-pvc-2", scName, "2Gi", "2Gi")},
},
},
- },
- },
- sc: newStorageClass("sc", true),
- pvcs: []*v1.PersistentVolumeClaim{
- newPVCWithStorage("ticdc-sort-dir-tc-ticdc-0", label.TiCDCLabelVal, "sc", "1Gi"),
- newPVCWithStorage("ticdc-sort-dir-tc-ticdc-1", label.TiCDCLabelVal, "sc", "1Gi"),
- newPVCWithStorage("ticdc-sort-dir-tc-ticdc-2", label.TiCDCLabelVal, "sc", "1Gi"),
- },
- wantPVCs: []*v1.PersistentVolumeClaim{
- newPVCWithStorage("ticdc-sort-dir-tc-ticdc-0", label.TiCDCLabelVal, "sc", "2Gi"),
- newPVCWithStorage("ticdc-sort-dir-tc-ticdc-1", label.TiCDCLabelVal, "sc", "1Gi"),
- newPVCWithStorage("ticdc-sort-dir-tc-ticdc-2", label.TiCDCLabelVal, "sc", "1Gi"),
- },
- expect: func(g *gomega.WithT, tc *v1alpha1.TidbCluster) {
- expectStatus := map[v1alpha1.StorageVolumeName]*v1alpha1.StorageVolumeStatus{
- "ticdc-sort-dir": {
- Name: "ticdc-sort-dir",
- ObservedStorageVolumeStatus: v1alpha1.ObservedStorageVolumeStatus{
- BoundCount: 3,
- CurrentCount: 3,
- ResizedCount: 0,
- CurrentCapacity: resource.MustParse("1Gi"),
- ResizedCapacity: resource.MustParse("2Gi"),
+ {
+ pod: &corev1.Pod{
+ ObjectMeta: metav1.ObjectMeta{Namespace: "test-ns", Name: "test-cluster-pd-3"},
},
- },
- }
- diff := cmp.Diff(expectStatus, tc.Status.TiCDC.Volumes)
- g.Expect(diff).To(gomega.BeEmpty(), "unexpected (-want, +got): %s", diff)
- },
- },
- {
- name: "resize Pump PVCs",
- tc: &v1alpha1.TidbCluster{
- ObjectMeta: metav1.ObjectMeta{
- Namespace: v1.NamespaceDefault,
- Name: "tc",
- },
- Spec: v1alpha1.TidbClusterSpec{
- Pump: &v1alpha1.PumpSpec{
- StorageClassName: pointer.StringPtr("sc"),
- Replicas: 1,
- ResourceRequirements: v1.ResourceRequirements{
- Requests: v1.ResourceList{
- v1.ResourceStorage: resource.MustParse("2Gi"),
- },
+ volumes: []*volume{
+ {name: "volume-1", pvc: newMockPVC("volume-1-pvc-3", scName, "2Gi", "2Gi")},
},
},
- },
- },
- sc: newStorageClass("sc", true),
- pvcs: []*v1.PersistentVolumeClaim{
- newPVCWithStorage("data-tc-pump-0", label.PumpLabelVal, "sc", "1Gi"),
+ }
},
- wantPVCs: []*v1.PersistentVolumeClaim{
- newPVCWithStorage("data-tc-pump-0", label.PumpLabelVal, "sc", "2Gi"),
+ expect: func(g *WithT, resizer *pvcResizer, ctx *componentVolumeContext, err error) {
+ g.Expect(err).Should(HaveOccurred())
+ g.Expect(err.Error()).Should(ContainSubstring("set condition before resizing volumes"))
+ g.Expect(ctx.cluster.(*v1alpha1.TidbCluster).IsComponentVolumeResizing(ctx.status.GetMemberType())).Should(BeTrue())
},
- expect: func(g *gomega.WithT, tc *v1alpha1.TidbCluster) {
- expectStatus := map[v1alpha1.StorageVolumeName]*v1alpha1.StorageVolumeStatus{
- "data": {
- Name: "data",
- ObservedStorageVolumeStatus: v1alpha1.ObservedStorageVolumeStatus{
- BoundCount: 1,
- CurrentCount: 1,
- ResizedCount: 0,
- CurrentCapacity: resource.MustParse("1Gi"),
- ResizedCapacity: resource.MustParse("2Gi"),
- },
+ },
+ "need to resize some volumes": {
+ setup: func(ctx *componentVolumeContext) {
+ tc := ctx.cluster.(*v1alpha1.TidbCluster)
+ ctx.status = &tc.Status.PD
+
+ tc.Status.PD.Conditions = []metav1.Condition{
+ {
+ Type: v1alpha1.ComponentVolumeResizing,
+ Status: metav1.ConditionTrue,
+ Reason: "BeginResizing",
+ Message: "Set resizing condition to begin resizing",
},
}
- diff := cmp.Diff(expectStatus, tc.Status.Pump.Volumes)
- g.Expect(diff).To(gomega.BeEmpty(), "unexpected (-want, +got): %s", diff)
- },
- },
- {
- name: "storage class is missing",
- tc: &v1alpha1.TidbCluster{
- ObjectMeta: metav1.ObjectMeta{
- Namespace: v1.NamespaceDefault,
- Name: "tc",
- },
- Spec: v1alpha1.TidbClusterSpec{
- PD: &v1alpha1.PDSpec{
- Replicas: 1,
- StorageClassName: pointer.StringPtr("sc"),
- ResourceRequirements: v1.ResourceRequirements{
- Requests: v1.ResourceList{
- v1.ResourceStorage: resource.MustParse("2Gi"),
- },
+ ctx.desiredVolumeQuantity = map[v1alpha1.StorageVolumeName]resource.Quantity{
+ "volume-1": resource.MustParse("2Gi"),
+ }
+ ctx.actualPodVolumes = []*podVolumeContext{
+ {
+ pod: &corev1.Pod{
+ ObjectMeta: metav1.ObjectMeta{Namespace: "test-ns", Name: "test-cluster-pd-0"},
+ },
+ volumes: []*volume{
+ {name: "volume-1", pvc: newMockPVC("volume-1-pvc-1", scName, "2Gi", "2Gi")},
},
},
- },
- },
- pvcs: []*v1.PersistentVolumeClaim{
- newPVCWithStorage("pd-tc-pd-0", label.PDLabelVal, "sc", "1Gi"),
- },
- wantPVCs: []*v1.PersistentVolumeClaim{
- newPVCWithStorage("pd-tc-pd-0", label.PDLabelVal, "sc", "1Gi"),
- },
- wantErr: apierrors.NewNotFound(storagev1.Resource("storageclass"), "sc"),
- },
- {
- name: "storage class does not support volume expansion",
- tc: &v1alpha1.TidbCluster{
- ObjectMeta: metav1.ObjectMeta{
- Namespace: v1.NamespaceDefault,
- Name: "tc",
- },
- Spec: v1alpha1.TidbClusterSpec{
- PD: &v1alpha1.PDSpec{
- Replicas: 1,
- StorageClassName: pointer.StringPtr("sc"),
- ResourceRequirements: v1.ResourceRequirements{
- Requests: v1.ResourceList{
- v1.ResourceStorage: resource.MustParse("2Gi"),
- },
+ {
+ pod: &corev1.Pod{
+ ObjectMeta: metav1.ObjectMeta{Namespace: "test-ns", Name: "test-cluster-pd-2"},
+ },
+ volumes: []*volume{
+ {name: "volume-1", pvc: newMockPVC("volume-1-pvc-2", scName, "1Gi", "1Gi")},
},
},
- },
- },
- sc: newStorageClass("sc", false),
- pvcs: []*v1.PersistentVolumeClaim{
- newPVCWithStorage("pd-0", label.PDLabelVal, "sc", "1Gi"),
- },
- wantPVCs: []*v1.PersistentVolumeClaim{
- newPVCWithStorage("pd-0", label.PDLabelVal, "sc", "1Gi"),
- },
- wantErr: nil,
- },
- {
- name: "shrinking is not supported",
- tc: &v1alpha1.TidbCluster{
- ObjectMeta: metav1.ObjectMeta{
- Namespace: v1.NamespaceDefault,
- Name: "tc",
- },
- Spec: v1alpha1.TidbClusterSpec{
- PD: &v1alpha1.PDSpec{
- Replicas: 1,
- StorageClassName: pointer.StringPtr("sc"),
- ResourceRequirements: v1.ResourceRequirements{
- Requests: v1.ResourceList{
- v1.ResourceStorage: resource.MustParse("1Gi"),
- },
+ {
+ pod: &corev1.Pod{
+ ObjectMeta: metav1.ObjectMeta{Namespace: "test-ns", Name: "test-cluster-pd-3"},
+ },
+ volumes: []*volume{
+ {name: "volume-1", pvc: newMockPVC("volume-1-pvc-3", scName, "1Gi", "1Gi")},
},
},
- },
- },
- sc: newStorageClass("sc", false),
- pvcs: []*v1.PersistentVolumeClaim{
- newPVCWithStorage("pd-0", label.PDLabelVal, "sc", "2Gi"),
+ }
},
- wantPVCs: []*v1.PersistentVolumeClaim{
- newPVCWithStorage("pd-0", label.PDLabelVal, "sc", "2Gi"),
+ expect: func(g *WithT, resizer *pvcResizer, ctx *componentVolumeContext, err error) {
+ g.Expect(err).Should(Succeed())
+ g.Expect(ctx.cluster.(*v1alpha1.TidbCluster).IsComponentVolumeResizing(ctx.status.GetMemberType())).Should(BeTrue())
+
+ cli := resizer.deps.KubeClientset.CoreV1().PersistentVolumeClaims(ctx.cluster.GetNamespace())
+
+ pvc, err := cli.Get(context.TODO(), "volume-1-pvc-1", metav1.GetOptions{})
+ g.Expect(err).To(Succeed())
+ g.Expect(cmp.Diff(pvc, newMockPVC("volume-1-pvc-1", scName, "2Gi", "2Gi"))).To(BeEmpty(), "-want, +got")
+
+ pvc, err = cli.Get(context.TODO(), "volume-1-pvc-2", metav1.GetOptions{})
+ g.Expect(err).To(Succeed())
+ g.Expect(cmp.Diff(pvc, newMockPVC("volume-1-pvc-2", scName, "2Gi", "1Gi"))).To(BeEmpty(), "-want, +got")
+
+ pvc, err = cli.Get(context.TODO(), "volume-1-pvc-3", metav1.GetOptions{})
+ g.Expect(err).To(Succeed())
+ g.Expect(cmp.Diff(pvc, newMockPVC("volume-1-pvc-3", scName, "1Gi", "1Gi"))).To(BeEmpty(), "-want, +got")
},
- wantErr: nil,
},
- {
- name: "resize PVCs pod by pod",
- tc: &v1alpha1.TidbCluster{
- ObjectMeta: metav1.ObjectMeta{
- Namespace: v1.NamespaceDefault,
- Name: "tc",
- },
- Spec: v1alpha1.TidbClusterSpec{
- PD: &v1alpha1.PDSpec{
- ResourceRequirements: v1.ResourceRequirements{
- Requests: v1.ResourceList{
- v1.ResourceStorage: resource.MustParse("2Gi"),
- },
+ "some volumes are resizing": {
+ setup: func(ctx *componentVolumeContext) {
+ tc := ctx.cluster.(*v1alpha1.TidbCluster)
+ ctx.status = &tc.Status.PD
+
+ tc.Status.PD.Conditions = []metav1.Condition{
+ {
+ Type: v1alpha1.ComponentVolumeResizing,
+ Status: metav1.ConditionTrue,
+ Reason: "BeginResizing",
+ Message: "Set resizing condition to begin resizing",
+ },
+ }
+ ctx.desiredVolumeQuantity = map[v1alpha1.StorageVolumeName]resource.Quantity{
+ "volume-1": resource.MustParse("2Gi"),
+ }
+ ctx.actualPodVolumes = []*podVolumeContext{
+ {
+ pod: &corev1.Pod{
+ ObjectMeta: metav1.ObjectMeta{Namespace: "test-ns", Name: "test-cluster-pd-0"},
},
- StorageClassName: pointer.StringPtr("sc"),
- Replicas: 3,
- StorageVolumes: []v1alpha1.StorageVolume{
- {
- Name: "log",
- StorageSize: "2Gi",
- },
+ volumes: []*volume{
+ {name: "volume-1", pvc: newMockPVC("volume-1-pvc-1", scName, "2Gi", "2Gi")},
},
},
- },
- },
- sc: newStorageClass("sc", true),
- pvcs: []*v1.PersistentVolumeClaim{
- newPVCWithStorage("pd-tc-pd-0", label.PDLabelVal, "sc", "2Gi"),
- newPVCWithStorage("pd-tc-pd-1", label.PDLabelVal, "sc", "1Gi"),
- newPVCWithStorage("pd-tc-pd-2", label.PDLabelVal, "sc", "1Gi"),
- newPVCWithStorage("pd-log-tc-pd-0", label.PDLabelVal, "sc", "2Gi"),
- newPVCWithStorage("pd-log-tc-pd-1", label.PDLabelVal, "sc", "2Gi"),
- newPVCWithStorage("pd-log-tc-pd-2", label.PDLabelVal, "sc", "1Gi"),
- },
- wantPVCs: []*v1.PersistentVolumeClaim{
- newPVCWithStorage("pd-tc-pd-0", label.PDLabelVal, "sc", "2Gi"),
- newPVCWithStorage("pd-tc-pd-1", label.PDLabelVal, "sc", "2Gi"),
- newPVCWithStorage("pd-tc-pd-2", label.PDLabelVal, "sc", "1Gi"),
- newPVCWithStorage("pd-log-tc-pd-0", label.PDLabelVal, "sc", "2Gi"),
- newPVCWithStorage("pd-log-tc-pd-1", label.PDLabelVal, "sc", "2Gi"),
- newPVCWithStorage("pd-log-tc-pd-2", label.PDLabelVal, "sc", "1Gi"),
- },
- expect: func(g *gomega.WithT, tc *v1alpha1.TidbCluster) {
- expectStatus := map[v1alpha1.StorageVolumeName]*v1alpha1.StorageVolumeStatus{
- "pd": {
- Name: "pd",
- ObservedStorageVolumeStatus: v1alpha1.ObservedStorageVolumeStatus{
- BoundCount: 3,
- CurrentCount: 2,
- ResizedCount: 1,
- CurrentCapacity: resource.MustParse("1Gi"),
- ResizedCapacity: resource.MustParse("2Gi"),
+ {
+ pod: &corev1.Pod{
+ ObjectMeta: metav1.ObjectMeta{Namespace: "test-ns", Name: "test-cluster-pd-2"},
+ },
+ volumes: []*volume{
+ {name: "volume-1", pvc: newMockPVC("volume-1-pvc-2", scName, "2Gi", "1Gi")},
},
},
- "pd-log": {
- Name: "pd-log",
- ObservedStorageVolumeStatus: v1alpha1.ObservedStorageVolumeStatus{
- BoundCount: 3,
- CurrentCount: 1,
- ResizedCount: 2,
- CurrentCapacity: resource.MustParse("1Gi"),
- ResizedCapacity: resource.MustParse("2Gi"),
+ {
+ pod: &corev1.Pod{
+ ObjectMeta: metav1.ObjectMeta{Namespace: "test-ns", Name: "test-cluster-pd-3"},
+ },
+ volumes: []*volume{
+ {name: "volume-1", pvc: newMockPVC("volume-1-pvc-3", scName, "1Gi", "1Gi")},
},
},
}
- diff := cmp.Diff(expectStatus, tc.Status.PD.Volumes)
- g.Expect(diff).To(gomega.BeEmpty(), "unexpected (-want, +got): %s", diff)
},
- },
- }
-
- for _, tt := range tests {
- t.Run(tt.name, func(t *testing.T) {
- g := gomega.NewGomegaWithT(t)
-
- ctx, cancel := context.WithCancel(context.Background())
- defer cancel()
-
- fakeDeps := controller.NewFakeDependencies()
- for _, pvc := range tt.pvcs {
- fakeDeps.KubeClientset.CoreV1().PersistentVolumeClaims(pvc.Namespace).Create(context.TODO(), pvc, metav1.CreateOptions{})
- }
- if tt.sc != nil {
- fakeDeps.KubeClientset.StorageV1().StorageClasses().Create(context.TODO(), tt.sc, metav1.CreateOptions{})
- }
- for _, pod := range newPodsFromTC(tt.tc) {
- fakeDeps.KubeClientset.CoreV1().Pods(tt.tc.Namespace).Create(context.TODO(), pod, metav1.CreateOptions{})
- }
-
- resizer := NewPVCResizer(fakeDeps)
-
- informerFactory := fakeDeps.KubeInformerFactory
- informerFactory.Start(ctx.Done())
- informerFactory.WaitForCacheSync(ctx.Done())
+ expect: func(g *WithT, resizer *pvcResizer, ctx *componentVolumeContext, err error) {
+ g.Expect(err).Should(Succeed())
+ g.Expect(ctx.cluster.(*v1alpha1.TidbCluster).IsComponentVolumeResizing(ctx.status.GetMemberType())).Should(BeTrue())
- err := resizer.Sync(tt.tc)
- if tt.wantErr != nil {
- g.Expect(err.Error()).To(gomega.ContainSubstring(tt.wantErr.Error()))
- } else {
- g.Expect(err).To(gomega.Succeed())
- }
+ cli := resizer.deps.KubeClientset.CoreV1().PersistentVolumeClaims(ctx.cluster.GetNamespace())
- for i, pvc := range tt.pvcs {
- wantPVC := tt.wantPVCs[i]
- got, err := fakeDeps.KubeClientset.CoreV1().PersistentVolumeClaims(pvc.Namespace).Get(context.TODO(), pvc.Name, metav1.GetOptions{})
- g.Expect(err).To(gomega.Succeed())
- got.Status.Capacity[v1.ResourceStorage] = got.Spec.Resources.Requests[v1.ResourceStorage] // to ignore resource status
- diff := cmp.Diff(wantPVC, got)
- g.Expect(diff).To(gomega.BeEmpty(), "unexpected (-want, +got): %s", diff)
- }
+ pvc, err := cli.Get(context.TODO(), "volume-1-pvc-1", metav1.GetOptions{})
+ g.Expect(err).To(Succeed())
+ g.Expect(cmp.Diff(pvc, newMockPVC("volume-1-pvc-1", scName, "2Gi", "2Gi"))).To(BeEmpty(), "-want, +got")
- if tt.expect != nil {
- tt.expect(g, tt.tc)
- }
- })
- }
-}
+ pvc, err = cli.Get(context.TODO(), "volume-1-pvc-2", metav1.GetOptions{})
+ g.Expect(err).To(Succeed())
+ g.Expect(cmp.Diff(pvc, newMockPVC("volume-1-pvc-2", scName, "2Gi", "1Gi"))).To(BeEmpty(), "-want, +got")
-func TestDMPVCResizer(t *testing.T) {
- tests := []struct {
- name string
- dc *v1alpha1.DMCluster
- sc *storagev1.StorageClass
- pvcs []*v1.PersistentVolumeClaim
- wantPVCs []*v1.PersistentVolumeClaim
- wantErr error
- }{
- {
- name: "no PVCs",
- dc: &v1alpha1.DMCluster{
- Spec: v1alpha1.DMClusterSpec{},
- },
- },
- {
- name: "resize dm-master PVCs",
- dc: &v1alpha1.DMCluster{
- ObjectMeta: metav1.ObjectMeta{
- Namespace: v1.NamespaceDefault,
- Name: "dc",
- },
- Spec: v1alpha1.DMClusterSpec{
- Master: v1alpha1.MasterSpec{
- StorageSize: "2Gi",
- StorageClassName: pointer.StringPtr("sc"),
- Replicas: 3,
- },
- },
- },
- sc: newStorageClass("sc", true),
- pvcs: []*v1.PersistentVolumeClaim{
- newDMPVCWithStorage("dm-master-dc-dm-master-0", label.DMMasterLabelVal, "sc", "1Gi"),
- newDMPVCWithStorage("dm-master-dc-dm-master-1", label.DMMasterLabelVal, "sc", "1Gi"),
- newDMPVCWithStorage("dm-master-dc-dm-master-2", label.DMMasterLabelVal, "sc", "1Gi"),
- },
- wantPVCs: []*v1.PersistentVolumeClaim{
- newDMPVCWithStorage("dm-master-dc-dm-master-0", label.DMMasterLabelVal, "sc", "2Gi"),
- newDMPVCWithStorage("dm-master-dc-dm-master-1", label.DMMasterLabelVal, "sc", "1Gi"),
- newDMPVCWithStorage("dm-master-dc-dm-master-2", label.DMMasterLabelVal, "sc", "1Gi"),
- },
- },
- {
- name: "resize dm-worker PVCs",
- dc: &v1alpha1.DMCluster{
- ObjectMeta: metav1.ObjectMeta{
- Namespace: v1.NamespaceDefault,
- Name: "dc",
- },
- Spec: v1alpha1.DMClusterSpec{
- Worker: &v1alpha1.WorkerSpec{
- StorageSize: "2Gi",
- StorageClassName: pointer.StringPtr("sc"),
- Replicas: 3,
- },
- },
- },
- sc: newStorageClass("sc", true),
- pvcs: []*v1.PersistentVolumeClaim{
- newDMPVCWithStorage("dm-worker-dc-dm-worker-0", label.DMWorkerLabelVal, "sc", "1Gi"),
- newDMPVCWithStorage("dm-worker-dc-dm-worker-1", label.DMWorkerLabelVal, "sc", "1Gi"),
- newDMPVCWithStorage("dm-worker-dc-dm-worker-2", label.DMWorkerLabelVal, "sc", "1Gi"),
- },
- wantPVCs: []*v1.PersistentVolumeClaim{
- newDMPVCWithStorage("dm-worker-dc-dm-worker-0", label.DMWorkerLabelVal, "sc", "2Gi"),
- newDMPVCWithStorage("dm-worker-dc-dm-worker-1", label.DMWorkerLabelVal, "sc", "1Gi"),
- newDMPVCWithStorage("dm-worker-dc-dm-worker-2", label.DMWorkerLabelVal, "sc", "1Gi"),
- },
- },
- {
- name: "resize PVCs pod by pod",
- dc: &v1alpha1.DMCluster{
- ObjectMeta: metav1.ObjectMeta{
- Namespace: v1.NamespaceDefault,
- Name: "dc",
- },
- Spec: v1alpha1.DMClusterSpec{
- Worker: &v1alpha1.WorkerSpec{
- StorageSize: "2Gi",
- StorageClassName: pointer.StringPtr("sc"),
- Replicas: 3,
- },
- },
- },
- sc: newStorageClass("sc", true),
- pvcs: []*v1.PersistentVolumeClaim{
- newDMPVCWithStorage("dm-worker-dc-dm-worker-0", label.DMWorkerLabelVal, "sc", "2Gi"),
- newDMPVCWithStorage("dm-worker-dc-dm-worker-1", label.DMWorkerLabelVal, "sc", "1Gi"), // should be resized
- newDMPVCWithStorage("dm-worker-dc-dm-worker-2", label.DMWorkerLabelVal, "sc", "1Gi"),
- },
- wantPVCs: []*v1.PersistentVolumeClaim{
- newDMPVCWithStorage("dm-worker-dc-dm-worker-0", label.DMWorkerLabelVal, "sc", "2Gi"),
- newDMPVCWithStorage("dm-worker-dc-dm-worker-1", label.DMWorkerLabelVal, "sc", "2Gi"),
- newDMPVCWithStorage("dm-worker-dc-dm-worker-2", label.DMWorkerLabelVal, "sc", "1Gi"),
+ pvc, err = cli.Get(context.TODO(), "volume-1-pvc-3", metav1.GetOptions{})
+ g.Expect(err).To(Succeed())
+ g.Expect(cmp.Diff(pvc, newMockPVC("volume-1-pvc-3", scName, "1Gi", "1Gi"))).To(BeEmpty(), "-want, +got")
},
},
}
- for _, tt := range tests {
- t.Run(tt.name, func(t *testing.T) {
- g := gomega.NewGomegaWithT(t)
-
+ for name, tt := range testcases {
+ t.Run(name, func(t *testing.T) {
+ g := NewGomegaWithT(t)
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
-
fakeDeps := controller.NewFakeDependencies()
+ informerFactory := fakeDeps.KubeInformerFactory
- for _, pvc := range tt.pvcs {
- fakeDeps.KubeClientset.CoreV1().PersistentVolumeClaims(pvc.Namespace).Create(context.TODO(), pvc, metav1.CreateOptions{})
+ resizer := &pvcResizer{
+ deps: fakeDeps,
}
- if tt.sc != nil {
- fakeDeps.KubeClientset.StorageV1().StorageClasses().Create(context.TODO(), tt.sc, metav1.CreateOptions{})
+ tc := &v1alpha1.TidbCluster{
+ ObjectMeta: metav1.ObjectMeta{Namespace: v1.NamespaceDefault, Name: "test-cluster"},
+ Spec: v1alpha1.TidbClusterSpec{
+ PD: &v1alpha1.PDSpec{
+ Replicas: 3,
+ },
+ },
}
- for _, pod := range newPodsFromDC(tt.dc) {
- fakeDeps.KubeClientset.CoreV1().Pods(tt.dc.Namespace).Create(context.TODO(), pod, metav1.CreateOptions{})
+ vctx := &componentVolumeContext{
+ cluster: tc,
}
+ tt.setup(vctx)
- informerFactory := fakeDeps.KubeInformerFactory
- resizer := NewPVCResizer(fakeDeps)
-
+ for _, podVol := range vctx.actualPodVolumes {
+ if podVol.pod != nil {
+ fakeDeps.KubeClientset.CoreV1().Pods(tc.Namespace).Create(context.TODO(), podVol.pod, metav1.CreateOptions{})
+ }
+ for _, vol := range podVol.volumes {
+ if vol.pvc != nil {
+ fakeDeps.KubeClientset.CoreV1().PersistentVolumeClaims(tc.Namespace).Create(context.TODO(), vol.pvc, metav1.CreateOptions{})
+ }
+ }
+ }
+ fakeDeps.KubeClientset.StorageV1().StorageClasses().Create(context.TODO(), newStorageClass(scName, true), metav1.CreateOptions{})
informerFactory.Start(ctx.Done())
informerFactory.WaitForCacheSync(ctx.Done())
- err := resizer.SyncDM(tt.dc)
- if tt.wantErr != nil {
- g.Expect(err.Error()).To(gomega.ContainSubstring(tt.wantErr.Error()))
- } else {
- g.Expect(err).To(gomega.Succeed())
- }
-
- for i, pvc := range tt.pvcs {
- wantPVC := tt.wantPVCs[i]
- got, err := fakeDeps.KubeClientset.CoreV1().PersistentVolumeClaims(pvc.Namespace).Get(context.TODO(), pvc.Name, metav1.GetOptions{})
- g.Expect(err).To(gomega.Succeed())
- got.Status.Capacity[v1.ResourceStorage] = got.Spec.Resources.Requests[v1.ResourceStorage] // to ignore resource status
- diff := cmp.Diff(wantPVC, got)
- g.Expect(diff).To(gomega.BeEmpty(), "unexpected (-want, +got): %s", diff)
- }
+ err := resizer.resizeVolumes(vctx)
+ tt.expect(g, resizer, vctx, err)
})
}
}
@@ -801,21 +385,39 @@ func TestUpdateVolumeStatus(t *testing.T) {
}
ctx.actualPodVolumes = []*podVolumeContext{
{
- volToPVCs: map[v1alpha1.StorageVolumeName]*v1.PersistentVolumeClaim{
- "volume-1": newMockPVC("volume-1-pvc-1", scName, "2Gi", "1Gi"),
- "volume-2": newMockPVC("volume-2-pvc-1", scName, "2Gi", "1Gi"),
+ volumes: []*volume{
+ {
+ name: "volume-1",
+ pvc: newMockPVC("volume-1-pvc-1", scName, "2Gi", "1Gi"),
+ },
+ {
+ name: "volume-2",
+ pvc: newMockPVC("volume-2-pvc-1", scName, "2Gi", "1Gi"),
+ },
},
},
{
- volToPVCs: map[v1alpha1.StorageVolumeName]*v1.PersistentVolumeClaim{
- "volume-1": newMockPVC("volume-1-pvc-2", scName, "2Gi", "1Gi"),
- "volume-2": newMockPVC("volume-2-pvc-2", scName, "2Gi", "1Gi"),
+ volumes: []*volume{
+ {
+ name: "volume-1",
+ pvc: newMockPVC("volume-1-pvc-2", scName, "2Gi", "1Gi"),
+ },
+ {
+ name: "volume-2",
+ pvc: newMockPVC("volume-2-pvc-2", scName, "2Gi", "1Gi"),
+ },
},
},
{
- volToPVCs: map[v1alpha1.StorageVolumeName]*v1.PersistentVolumeClaim{
- "volume-1": newMockPVC("volume-1-pvc-3", scName, "2Gi", "1Gi"),
- "volume-2": newMockPVC("volume-2-pvc-3", scName, "2Gi", "1Gi"),
+ volumes: []*volume{
+ {
+ name: "volume-1",
+ pvc: newMockPVC("volume-1-pvc-3", scName, "2Gi", "1Gi"),
+ },
+ {
+ name: "volume-2",
+ pvc: newMockPVC("volume-2-pvc-3", scName, "2Gi", "1Gi"),
+ },
},
},
}
@@ -852,21 +454,39 @@ func TestUpdateVolumeStatus(t *testing.T) {
}
ctx.actualPodVolumes = []*podVolumeContext{
{
- volToPVCs: map[v1alpha1.StorageVolumeName]*v1.PersistentVolumeClaim{
- "volume-1": newMockPVC("volume-1-pvc-1", scName, "2Gi", "2Gi"), // resized
- "volume-2": newMockPVC("volume-2-pvc-1", scName, "2Gi", "1Gi"),
+ volumes: []*volume{
+ {
+ name: "volume-1",
+ pvc: newMockPVC("volume-1-pvc-1", scName, "2Gi", "2Gi"), // resized
+ },
+ {
+ name: "volume-2",
+ pvc: newMockPVC("volume-2-pvc-1", scName, "2Gi", "1Gi"),
+ },
},
},
{
- volToPVCs: map[v1alpha1.StorageVolumeName]*v1.PersistentVolumeClaim{
- "volume-1": newMockPVC("volume-1-pvc-2", scName, "2Gi", "1Gi"),
- "volume-2": newMockPVC("volume-2-pvc-2", scName, "2Gi", "2Gi"), // resized
+ volumes: []*volume{
+ {
+ name: "volume-1",
+ pvc: newMockPVC("volume-1-pvc-2", scName, "2Gi", "1Gi"),
+ },
+ {
+ name: "volume-2",
+ pvc: newMockPVC("volume-2-pvc-2", scName, "2Gi", "2Gi"), // resized
+ },
},
},
{
- volToPVCs: map[v1alpha1.StorageVolumeName]*v1.PersistentVolumeClaim{
- "volume-1": newMockPVC("volume-1-pvc-3", scName, "2Gi", "1Gi"),
- "volume-2": newMockPVC("volume-2-pvc-3", scName, "2Gi", "2Gi"), // resized
+ volumes: []*volume{
+ {
+ name: "volume-1",
+ pvc: newMockPVC("volume-1-pvc-3", scName, "2Gi", "1Gi"),
+ },
+ {
+ name: "volume-2",
+ pvc: newMockPVC("volume-2-pvc-3", scName, "2Gi", "2Gi"), // resized
+ },
},
},
}
@@ -903,21 +523,39 @@ func TestUpdateVolumeStatus(t *testing.T) {
}
ctx.actualPodVolumes = []*podVolumeContext{
{
- volToPVCs: map[v1alpha1.StorageVolumeName]*v1.PersistentVolumeClaim{
- "volume-1": newMockPVC("volume-1-pvc-1", scName, "2Gi", "2Gi"),
- "volume-2": newMockPVC("volume-2-pvc-1", scName, "2Gi", "2Gi"),
+ volumes: []*volume{
+ {
+ name: "volume-1",
+ pvc: newMockPVC("volume-1-pvc-1", scName, "2Gi", "2Gi"),
+ },
+ {
+ name: "volume-2",
+ pvc: newMockPVC("volume-2-pvc-1", scName, "2Gi", "2Gi"),
+ },
},
},
{
- volToPVCs: map[v1alpha1.StorageVolumeName]*v1.PersistentVolumeClaim{
- "volume-1": newMockPVC("volume-1-pvc-2", scName, "2Gi", "2Gi"),
- "volume-2": newMockPVC("volume-2-pvc-2", scName, "2Gi", "2Gi"),
+ volumes: []*volume{
+ {
+ name: "volume-1",
+ pvc: newMockPVC("volume-1-pvc-2", scName, "2Gi", "2Gi"),
+ },
+ {
+ name: "volume-2",
+ pvc: newMockPVC("volume-2-pvc-2", scName, "2Gi", "2Gi"),
+ },
},
},
{
- volToPVCs: map[v1alpha1.StorageVolumeName]*v1.PersistentVolumeClaim{
- "volume-1": newMockPVC("volume-1-pvc-3", scName, "2Gi", "2Gi"),
- "volume-2": newMockPVC("volume-2-pvc-3", scName, "2Gi", "2Gi"),
+ volumes: []*volume{
+ {
+ name: "volume-1",
+ pvc: newMockPVC("volume-1-pvc-3", scName, "2Gi", "2Gi"),
+ },
+ {
+ name: "volume-2",
+ pvc: newMockPVC("volume-2-pvc-3", scName, "2Gi", "2Gi"),
+ },
},
},
}
@@ -948,27 +586,38 @@ func TestUpdateVolumeStatus(t *testing.T) {
{
name: "remove lost volume status",
setup: func(ctx *componentVolumeContext) {
- ctx.sourceVolumeStatus = map[v1alpha1.StorageVolumeName]*v1alpha1.StorageVolumeStatus{
- "volume-1": {Name: "volume-1"},
- "volume-2": {Name: "volume-2"},
+ ctx.status = &v1alpha1.PDStatus{
+ Volumes: map[v1alpha1.StorageVolumeName]*v1alpha1.StorageVolumeStatus{
+ "volume-1": {Name: "volume-1"},
+ "volume-2": {Name: "volume-2"},
+ },
}
ctx.desiredVolumeQuantity = map[v1alpha1.StorageVolumeName]resource.Quantity{
"volume-1": resource.MustParse("2Gi"),
}
ctx.actualPodVolumes = []*podVolumeContext{
{
- volToPVCs: map[v1alpha1.StorageVolumeName]*v1.PersistentVolumeClaim{
- "volume-1": newMockPVC("volume-1-pvc-1", scName, "2Gi", "2Gi"),
+ volumes: []*volume{
+ {
+ name: "volume-1",
+ pvc: newMockPVC("volume-1-pvc-1", scName, "2Gi", "2Gi"),
+ },
},
},
{
- volToPVCs: map[v1alpha1.StorageVolumeName]*v1.PersistentVolumeClaim{
- "volume-1": newMockPVC("volume-1-pvc-2", scName, "2Gi", "2Gi"),
+ volumes: []*volume{
+ {
+ name: "volume-1",
+ pvc: newMockPVC("volume-1-pvc-2", scName, "2Gi", "2Gi"),
+ },
},
},
{
- volToPVCs: map[v1alpha1.StorageVolumeName]*v1.PersistentVolumeClaim{
- "volume-1": newMockPVC("volume-1-pvc-3", scName, "2Gi", "2Gi"),
+ volumes: []*volume{
+ {
+ name: "volume-1",
+ pvc: newMockPVC("volume-1-pvc-3", scName, "2Gi", "2Gi"),
+ },
},
},
}
@@ -989,26 +638,37 @@ func TestUpdateVolumeStatus(t *testing.T) {
{
name: "update existing volume status",
setup: func(ctx *componentVolumeContext) {
- ctx.sourceVolumeStatus = map[v1alpha1.StorageVolumeName]*v1alpha1.StorageVolumeStatus{
- "volume-1": {Name: "volume-1"},
+ ctx.status = &v1alpha1.PDStatus{
+ Volumes: map[v1alpha1.StorageVolumeName]*v1alpha1.StorageVolumeStatus{
+ "volume-1": {Name: "volume-1"},
+ },
}
ctx.desiredVolumeQuantity = map[v1alpha1.StorageVolumeName]resource.Quantity{
"volume-1": resource.MustParse("2Gi"),
}
ctx.actualPodVolumes = []*podVolumeContext{
{
- volToPVCs: map[v1alpha1.StorageVolumeName]*v1.PersistentVolumeClaim{
- "volume-1": newMockPVC("volume-1-pvc-1", scName, "2Gi", "2Gi"),
+ volumes: []*volume{
+ {
+ name: "volume-1",
+ pvc: newMockPVC("volume-1-pvc-1", scName, "2Gi", "2Gi"),
+ },
},
},
{
- volToPVCs: map[v1alpha1.StorageVolumeName]*v1.PersistentVolumeClaim{
- "volume-1": newMockPVC("volume-1-pvc-2", scName, "2Gi", "2Gi"),
+ volumes: []*volume{
+ {
+ name: "volume-1",
+ pvc: newMockPVC("volume-1-pvc-2", scName, "2Gi", "2Gi"),
+ },
},
},
{
- volToPVCs: map[v1alpha1.StorageVolumeName]*v1.PersistentVolumeClaim{
- "volume-1": newMockPVC("volume-1-pvc-3", scName, "2Gi", "2Gi"),
+ volumes: []*volume{
+ {
+ name: "volume-1",
+ pvc: newMockPVC("volume-1-pvc-3", scName, "2Gi", "2Gi"),
+ },
},
},
}
@@ -1030,140 +690,621 @@ func TestUpdateVolumeStatus(t *testing.T) {
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
- g := gomega.NewGomegaWithT(t)
+ g := NewGomegaWithT(t)
resizer := &pvcResizer{
deps: controller.NewFakeDependencies(),
}
ctx := &componentVolumeContext{}
- ctx.sourceVolumeStatus = map[v1alpha1.StorageVolumeName]*v1alpha1.StorageVolumeStatus{}
+ ctx.status = &v1alpha1.PDStatus{
+ Volumes: map[v1alpha1.StorageVolumeName]*v1alpha1.StorageVolumeStatus{},
+ }
if tt.setup != nil {
tt.setup(ctx)
}
resizer.updateVolumeStatus(ctx)
- diff := cmp.Diff(ctx.sourceVolumeStatus, tt.expect)
- g.Expect(diff).Should(gomega.BeEmpty(), "unexpected (-want, +got)")
+ diff := cmp.Diff(ctx.status.GetVolumes(), tt.expect)
+ g.Expect(diff).Should(BeEmpty(), "unexpected (-want, +got)")
})
}
}
-func newPodsFromTC(tc *v1alpha1.TidbCluster) []*corev1.Pod {
- pods := []*corev1.Pod{}
-
- addPods := func(replica int32, comp v1alpha1.MemberType,
- dataVolumeStorageClass *string, storageVolumes []v1alpha1.StorageVolume, storageClaims []v1alpha1.StorageClaim) {
- for i := int32(0); i < replica; i++ {
- pods = append(pods, &corev1.Pod{
- TypeMeta: metav1.TypeMeta{
- Kind: "Pod",
- APIVersion: "v1",
- },
- ObjectMeta: metav1.ObjectMeta{
- Name: func() string {
- name, _ := MemberPodName(tc.Name, v1alpha1.TiDBClusterKind, i, comp)
- return name
- }(),
- Namespace: tc.GetNamespace(),
- Labels: map[string]string{
- label.NameLabelKey: "tidb-cluster",
- label.ComponentLabelKey: comp.String(),
- label.ManagedByLabelKey: label.TiDBOperator,
- label.InstanceLabelKey: tc.GetInstanceName(),
- },
- },
- Spec: corev1.PodSpec{
- Volumes: []corev1.Volume{},
- },
- })
+func TestClassifyVolumes(t *testing.T) {
+ scName := "sc-1"
+ diffVolumes := func(g *GomegaWithT, vols1 map[volumePhase][]*volume, vols2 map[volumePhase][]*volume) {
+ phases := []volumePhase{needResize, resizing, resized}
+ for _, phase := range phases {
+ g.Expect(len(vols1[phase])).Should(Equal(len(vols2[phase])))
+ for i := range vols1[phase] {
+ g.Expect(diffVolume(vols1[phase][i], vols2[phase][i])).Should(BeEmpty(), "unexpected (-want, +got)")
+ }
}
- addVolumeForPod(pods, comp, dataVolumeStorageClass, storageVolumes, storageClaims)
}
- if tc.Spec.PD != nil {
- addPods(tc.Spec.PD.Replicas, v1alpha1.PDMemberType, tc.Spec.PD.StorageClassName, tc.Spec.PD.StorageVolumes, nil)
- }
- if tc.Spec.TiDB != nil {
- addPods(tc.Spec.TiDB.Replicas, v1alpha1.TiDBMemberType, nil, tc.Spec.TiDB.StorageVolumes, nil)
- }
- if tc.Spec.TiKV != nil {
- addPods(tc.Spec.TiKV.Replicas, v1alpha1.TiKVMemberType, tc.Spec.TiKV.StorageClassName, tc.Spec.TiKV.StorageVolumes, nil)
- }
- if tc.Spec.TiFlash != nil {
- addPods(tc.Spec.TiFlash.Replicas, v1alpha1.TiFlashMemberType, nil, nil, tc.Spec.TiFlash.StorageClaims)
- }
- if tc.Spec.TiCDC != nil {
- addPods(tc.Spec.TiCDC.Replicas, v1alpha1.TiCDCMemberType, nil, tc.Spec.TiCDC.StorageVolumes, nil)
- }
- if tc.Spec.Pump != nil {
- addPods(tc.Spec.Pump.Replicas, v1alpha1.PumpMemberType, tc.Spec.Pump.StorageClassName, nil, nil)
+ tests := map[string]struct {
+ setup func(*componentVolumeContext) []*volume
+ sc *storagev1.StorageClass
+
+ expect func(*GomegaWithT, map[volumePhase][]*volume, error)
+ }{
+ "normal": {
+ setup: func(ctx *componentVolumeContext) []*volume {
+ ctx.desiredVolumeQuantity = map[v1alpha1.StorageVolumeName]resource.Quantity{
+ "volume-1": resource.MustParse("2Gi"),
+ "volume-2": resource.MustParse("2Gi"),
+ "volume-3": resource.MustParse("2Gi"),
+ }
+ volumes := []*volume{
+ newVolume("volume-1", newMockPVC("volume-1-pvc-1", scName, "2Gi", "2Gi")), // resized
+ newVolume("volume-2", newMockPVC("volume-2-pvc-1", scName, "2Gi", "1Gi")), // resizing
+ newVolume("volume-3", newMockPVC("volume-3-pvc-1", scName, "1Gi", "1Gi")), // need resize
+ }
+
+ return volumes
+ },
+ sc: newStorageClass(scName, true),
+ expect: func(g *GomegaWithT, volumes map[volumePhase][]*volume, err error) {
+ expectVolumes := map[volumePhase][]*volume{
+ needResize: {
+ newVolume("volume-3", newMockPVC("volume-3-pvc-1", scName, "1Gi", "1Gi")), // need resize
+ },
+ resized: {
+ newVolume("volume-1", newMockPVC("volume-1-pvc-1", scName, "2Gi", "2Gi")), // resized
+ },
+ resizing: {
+ newVolume("volume-2", newMockPVC("volume-2-pvc-1", scName, "2Gi", "1Gi")), // resizing
+ },
+ }
+
+ g.Expect(err).To(Succeed())
+ diffVolumes(g, volumes, expectVolumes)
+ },
+ },
+ "shink storage": {
+ setup: func(ctx *componentVolumeContext) []*volume {
+ ctx.desiredVolumeQuantity = map[v1alpha1.StorageVolumeName]resource.Quantity{
+ "volume-1": resource.MustParse("2Gi"),
+ }
+ volumes := []*volume{
+ newVolume("volume-1", newMockPVC("volume-1-pvc-1", scName, "3Gi", "3Gi")), // need shink
+ }
+ return volumes
+ },
+ sc: newStorageClass(scName, true),
+ expect: func(g *GomegaWithT, volumes map[volumePhase][]*volume, err error) {
+ expectVolumes := map[volumePhase][]*volume{
+ needResize: {},
+ resized: {},
+ resizing: {},
+ }
+
+ g.Expect(err).To(Succeed())
+ diffVolumes(g, volumes, expectVolumes)
+ },
+ },
+ "default storage class": {
+ setup: func(ctx *componentVolumeContext) []*volume {
+ ctx.desiredVolumeQuantity = map[v1alpha1.StorageVolumeName]resource.Quantity{
+ "volume-1": resource.MustParse("2Gi"),
+ }
+ vol := newVolume("volume-1", newMockPVC("volume-1-pvc-1", scName, "1Gi", "1Gi"))
+ vol.pvc.Spec.StorageClassName = nil
+ volumes := []*volume{
+ vol,
+ }
+
+ return volumes
+ },
+ sc: newStorageClass(scName, true),
+ expect: func(g *GomegaWithT, volumes map[volumePhase][]*volume, err error) {
+ expectVolumes := map[volumePhase][]*volume{
+ needResize: {},
+ resized: {},
+ resizing: {},
+ }
+
+ g.Expect(err).To(Succeed())
+ diffVolumes(g, volumes, expectVolumes)
+ },
+ },
+ "unsupported storage class": {
+ setup: func(ctx *componentVolumeContext) []*volume {
+ ctx.desiredVolumeQuantity = map[v1alpha1.StorageVolumeName]resource.Quantity{
+ "volume-1": resource.MustParse("2Gi"),
+ }
+ volumes := []*volume{
+ newVolume("volume-1", newMockPVC("volume-1-pvc-1", scName, "1Gi", "1Gi")),
+ }
+
+ return volumes
+ },
+ sc: newStorageClass(scName, false),
+ expect: func(g *GomegaWithT, volumes map[volumePhase][]*volume, err error) {
+ expectVolumes := map[volumePhase][]*volume{
+ needResize: {},
+ resized: {},
+ resizing: {},
+ }
+
+ g.Expect(err).To(Succeed())
+ diffVolumes(g, volumes, expectVolumes)
+ },
+ },
+ "not exist in desired volumes": {
+ setup: func(ctx *componentVolumeContext) []*volume {
+ ctx.desiredVolumeQuantity = map[v1alpha1.StorageVolumeName]resource.Quantity{}
+ volumes := []*volume{
+ newVolume("volume-1", newMockPVC("volume-1-pvc-1", scName, "1Gi", "1Gi")),
+ }
+
+ return volumes
+ },
+ sc: newStorageClass(scName, false),
+ expect: func(g *GomegaWithT, volumes map[volumePhase][]*volume, err error) {
+ expectVolumes := map[volumePhase][]*volume{
+ needResize: {},
+ resized: {},
+ resizing: {},
+ }
+
+ g.Expect(err).To(Succeed())
+ diffVolumes(g, volumes, expectVolumes)
+ },
+ },
}
- return pods
+ for name, tt := range tests {
+ t.Run(name, func(t *testing.T) {
+ g := NewGomegaWithT(t)
+ resizer := &pvcResizer{
+ deps: controller.NewFakeDependencies(),
+ }
+
+ ctx, cancel := context.WithCancel(context.Background())
+ defer cancel()
+ if tt.sc != nil {
+ resizer.deps.KubeClientset.StorageV1().StorageClasses().Create(context.TODO(), tt.sc, metav1.CreateOptions{})
+ }
+ informerFactory := resizer.deps.KubeInformerFactory
+ informerFactory.Start(ctx.Done())
+ informerFactory.WaitForCacheSync(ctx.Done())
+
+ vctx := &componentVolumeContext{}
+ vctx.cluster = &v1alpha1.TidbCluster{
+ ObjectMeta: metav1.ObjectMeta{Namespace: "test-ns", Name: "test-cluster"},
+ }
+ vctx.status = &v1alpha1.PDStatus{}
+ vctx.desiredVolumeQuantity = map[v1alpha1.StorageVolumeName]resource.Quantity{}
+ volumes := tt.setup(vctx)
+ classifiedVolumes, err := resizer.classifyVolumes(vctx, volumes)
+ tt.expect(g, classifiedVolumes, err)
+ })
+ }
}
-func newPodsFromDC(dc *v1alpha1.DMCluster) []*corev1.Pod {
- pods := []*corev1.Pod{}
+func TestResizeHook(t *testing.T) {
+ t.Run("beginResize", func(t *testing.T) {
+ g := NewGomegaWithT(t)
+ resizer := &pvcResizer{}
+ tc := &v1alpha1.TidbCluster{
+ ObjectMeta: metav1.ObjectMeta{Namespace: "test-ns", Name: "test-cluster"},
+ Spec: v1alpha1.TidbClusterSpec{
+ TiKV: &v1alpha1.TiKVSpec{},
+ },
+ }
+ ctx := &componentVolumeContext{
+ cluster: tc,
+ status: &tc.Status.TiKV,
+ }
+
+ err := resizer.beginResize(ctx)
+ g.Expect(err).To(MatchError(controller.RequeueErrorf("set condition before resizing volumes for test-ns/test-cluster:tikv")))
+ g.Expect(len(tc.Status.TiKV.Conditions)).To(Equal(1))
+ g.Expect(tc.Status.TiKV.Conditions[0].Reason).To(Equal("BeginResizing"))
+ g.Expect(tc.Status.TiKV.Conditions[0].Status).To(Equal(metav1.ConditionTrue))
+ g.Expect(tc.IsComponentVolumeResizing(v1alpha1.TiKVMemberType)).To(BeTrue())
+ })
- addPods := func(replica int32, comp v1alpha1.MemberType,
- dataVolumeStorageClass *string, storageVolumes []v1alpha1.StorageVolume) {
- for i := int32(0); i < replica; i++ {
- pods = append(pods, &corev1.Pod{
- TypeMeta: metav1.TypeMeta{
- Kind: "Pod",
- APIVersion: "v1",
+ t.Run("endResize", func(t *testing.T) {
+ testcases := map[string]struct {
+ setup func(ctx *componentVolumeContext)
+ expect func(g *GomegaWithT, p *pvcResizer, ctx *componentVolumeContext, err error)
+ }{
+ "set condition": {
+ setup: func(ctx *componentVolumeContext) {
+ ctx.status = &v1alpha1.PDStatus{
+ Conditions: []metav1.Condition{
+ {
+ Type: v1alpha1.ComponentVolumeResizing,
+ Status: metav1.ConditionTrue,
+ Reason: "BeginResizing",
+ Message: "Set resizing condition to begin resizing",
+ },
+ },
+ }
},
- ObjectMeta: metav1.ObjectMeta{
- Name: fmt.Sprintf("%s-%s-%d", dc.Name, comp.String(), i),
- Namespace: dc.GetNamespace(),
- Labels: map[string]string{
- label.NameLabelKey: "dm-cluster",
- label.ComponentLabelKey: comp.String(),
- label.ManagedByLabelKey: label.TiDBOperator,
- label.InstanceLabelKey: dc.GetInstanceName(),
- },
+ expect: func(g *GomegaWithT, p *pvcResizer, ctx *componentVolumeContext, err error) {
+ conds := ctx.status.GetConditions()
+ g.Expect(len(conds)).To(Equal(1))
+ g.Expect(conds[0].Reason).To(Equal("EndResizing"))
+ g.Expect(conds[0].Status).To(Equal(metav1.ConditionFalse))
+ g.Expect(ctx.cluster.(*v1alpha1.TidbCluster).IsComponentVolumeResizing(v1alpha1.TiKVMemberType)).To(BeFalse())
},
- Spec: corev1.PodSpec{
- Volumes: []corev1.Volume{},
+ },
+ "remove eviction annotation for tikv": {
+ setup: func(ctx *componentVolumeContext) {
+ ctx.status = &v1alpha1.TiKVStatus{}
+ ctx.actualPodVolumes = []*podVolumeContext{
+ {
+ pod: &corev1.Pod{
+ ObjectMeta: metav1.ObjectMeta{
+ Namespace: "test-ns", Name: "test-cluster-tikv-0",
+ Annotations: map[string]string{v1alpha1.EvictLeaderAnnKeyForResize: "1"},
+ },
+ },
+ },
+ {
+ pod: &corev1.Pod{
+ ObjectMeta: metav1.ObjectMeta{
+ Namespace: "test-ns", Name: "test-cluster-tikv-1",
+ }},
+ },
+ {
+ pod: &corev1.Pod{
+ ObjectMeta: metav1.ObjectMeta{
+ Namespace: "test-ns", Name: "test-cluster-tikv-2",
+ Annotations: map[string]string{v1alpha1.EvictLeaderAnnKeyForResize: "123"},
+ },
+ },
+ },
+ }
},
- })
+ expect: func(g *GomegaWithT, p *pvcResizer, ctx *componentVolumeContext, err error) {
+ g.Expect(err).To(Succeed())
+ cli := p.deps.KubeClientset.CoreV1().Pods(ctx.cluster.GetNamespace())
+ pod, err := cli.Get(context.TODO(), "test-cluster-tikv-0", metav1.GetOptions{})
+ g.Expect(err).To(Succeed())
+ g.Expect(pod.Annotations).NotTo(HaveKey(v1alpha1.EvictLeaderAnnKeyForResize))
+ pod, err = cli.Get(context.TODO(), "test-cluster-tikv-1", metav1.GetOptions{})
+ g.Expect(err).To(Succeed())
+ g.Expect(pod.Annotations).NotTo(HaveKey(v1alpha1.EvictLeaderAnnKeyForResize))
+ pod, err = cli.Get(context.TODO(), "test-cluster-tikv-2", metav1.GetOptions{})
+ g.Expect(err).To(Succeed())
+ g.Expect(pod.Annotations).NotTo(HaveKey(v1alpha1.EvictLeaderAnnKeyForResize))
+ },
+ },
}
- addVolumeForPod(pods, comp, dataVolumeStorageClass, storageVolumes, nil)
- }
- addPods(dc.Spec.Master.Replicas, v1alpha1.DMMasterMemberType, dc.Spec.Master.StorageClassName, nil)
- if dc.Spec.Worker != nil {
- addPods(dc.Spec.Worker.Replicas, v1alpha1.DMWorkerMemberType, dc.Spec.Worker.StorageClassName, nil)
- }
+ for name, tt := range testcases {
+ t.Run(name, func(t *testing.T) {
+ g := NewGomegaWithT(t)
+ ctx, cancel := context.WithCancel(context.Background())
+ defer cancel()
+ fakeDeps := controller.NewFakeDependencies()
+ informerFactory := fakeDeps.KubeInformerFactory
- return pods
-}
+ resizer := &pvcResizer{
+ deps: fakeDeps,
+ }
+ tc := &v1alpha1.TidbCluster{
+ ObjectMeta: metav1.ObjectMeta{Namespace: "test-ns", Name: "test-cluster"},
+ }
+ vctx := &componentVolumeContext{
+ cluster: tc,
+ }
+ tt.setup(vctx)
-func addVolumeForPod(pods []*corev1.Pod, comp v1alpha1.MemberType,
- dataVolumeStorageClass *string, storageVolumes []v1alpha1.StorageVolume, storageClaims []v1alpha1.StorageClaim) {
- volumeNames := []string{}
+ for _, podVol := range vctx.actualPodVolumes {
+ if podVol.pod != nil {
+ fakeDeps.KubeClientset.CoreV1().Pods(tc.Namespace).Create(context.TODO(), podVol.pod, metav1.CreateOptions{})
+ }
+ }
+ informerFactory.Start(ctx.Done())
+ informerFactory.WaitForCacheSync(ctx.Done())
- if dataVolumeStorageClass != nil {
- volumeNames = append(volumeNames, string(v1alpha1.GetStorageVolumeName("", comp)))
- }
- for _, sv := range storageVolumes {
- volumeNames = append(volumeNames, string(v1alpha1.GetStorageVolumeName(sv.Name, comp)))
- }
- for i := range storageClaims {
- volumeNames = append(volumeNames, string(v1alpha1.GetStorageVolumeNameForTiFlash(i)))
- }
+ err := resizer.endResize(vctx)
+ tt.expect(g, resizer, vctx, err)
+ })
+ }
+ })
- for _, volName := range volumeNames {
- for _, pod := range pods {
- volume := corev1.Volume{}
- volume.Name = volName
- volume.PersistentVolumeClaim = &corev1.PersistentVolumeClaimVolumeSource{
- ClaimName: fmt.Sprintf("%s-%s", volume.Name, pod.Name),
- }
- pod.Spec.Volumes = append(pod.Spec.Volumes, volume)
+ t.Run("beforeResizeForPod", func(t *testing.T) {
+ testcases := map[string]struct {
+ setup func(ctx *componentVolumeContext) *corev1.Pod
+ expect func(g *GomegaWithT, p *pvcResizer, ctx *componentVolumeContext, err error)
+ }{
+ "succeed": {
+ setup: func(ctx *componentVolumeContext) *corev1.Pod {
+ tc := ctx.cluster.(*v1alpha1.TidbCluster)
+ ctx.status = &tc.Status.TiKV
+
+ tc.Status.TiKV = v1alpha1.TiKVStatus{
+ Stores: map[string]v1alpha1.TiKVStore{
+ "0": {
+ ID: "0",
+ PodName: "test-cluster-tikv-0",
+ State: v1alpha1.TiKVStateUp,
+ LeaderCount: 10,
+ },
+ "1": {
+ ID: "1",
+ PodName: "test-cluster-tikv-1",
+ State: v1alpha1.TiKVStateUp,
+ LeaderCount: 0,
+ },
+ "2": {
+ ID: "2",
+ PodName: "test-cluster-tikv-2",
+ State: v1alpha1.TiKVStateUp,
+ LeaderCount: 10,
+ },
+ },
+ }
+ ctx.actualPodVolumes = []*podVolumeContext{
+ {
+ pod: &corev1.Pod{
+ ObjectMeta: metav1.ObjectMeta{
+ Namespace: "test-ns", Name: "test-cluster-tikv-0",
+ },
+ },
+ },
+ {
+ pod: &corev1.Pod{
+ ObjectMeta: metav1.ObjectMeta{
+ Namespace: "test-ns", Name: "test-cluster-tikv-1",
+ Annotations: map[string]string{v1alpha1.EvictLeaderAnnKeyForResize: "none"},
+ }},
+ },
+ {
+ pod: &corev1.Pod{
+ ObjectMeta: metav1.ObjectMeta{
+ Namespace: "test-ns", Name: "test-cluster-tikv-2",
+ },
+ },
+ },
+ }
+
+ return ctx.actualPodVolumes[1].pod
+ },
+ expect: func(g *GomegaWithT, p *pvcResizer, ctx *componentVolumeContext, err error) {
+ g.Expect(err).To(Succeed())
+ },
+ },
+ "any store is not Up": {
+ setup: func(ctx *componentVolumeContext) *corev1.Pod {
+ tc := ctx.cluster.(*v1alpha1.TidbCluster)
+ ctx.status = &tc.Status.TiKV
+
+ tc.Status.TiKV = v1alpha1.TiKVStatus{
+ Stores: map[string]v1alpha1.TiKVStore{
+ "0": {
+ ID: "0",
+ PodName: "test-cluster-tikv-0",
+ State: v1alpha1.TiKVStateUp,
+ },
+ "1": {
+ ID: "1",
+ PodName: "test-cluster-tikv-1",
+ State: v1alpha1.TiKVStateDown,
+ },
+ "2": {
+ ID: "2",
+ PodName: "test-cluster-tikv-2",
+ State: v1alpha1.TiKVStateUp,
+ },
+ },
+ }
+ ctx.actualPodVolumes = []*podVolumeContext{
+ {
+ pod: &corev1.Pod{
+ ObjectMeta: metav1.ObjectMeta{
+ Namespace: "test-ns", Name: "test-cluster-tikv-0",
+ },
+ },
+ },
+ {
+ pod: &corev1.Pod{
+ ObjectMeta: metav1.ObjectMeta{
+ Namespace: "test-ns", Name: "test-cluster-tikv-1",
+ }},
+ },
+ {
+ pod: &corev1.Pod{
+ ObjectMeta: metav1.ObjectMeta{
+ Namespace: "test-ns", Name: "test-cluster-tikv-2",
+ },
+ },
+ },
+ }
+
+ return ctx.actualPodVolumes[2].pod
+ },
+ expect: func(g *GomegaWithT, p *pvcResizer, ctx *componentVolumeContext, err error) {
+ g.Expect(err).To(HaveOccurred())
+ g.Expect(err.Error()).To(ContainSubstring("store 1 of pod test-cluster-tikv-1 is not ready"))
+ },
+ },
+ "sync leader eviction annotation": {
+ setup: func(ctx *componentVolumeContext) *corev1.Pod {
+ tc := ctx.cluster.(*v1alpha1.TidbCluster)
+ ctx.status = &tc.Status.TiKV
+
+ tc.Status.TiKV = v1alpha1.TiKVStatus{
+ Stores: map[string]v1alpha1.TiKVStore{
+ "0": {
+ ID: "0",
+ PodName: "test-cluster-tikv-0",
+ State: v1alpha1.TiKVStateUp,
+ },
+ "1": {
+ ID: "1",
+ PodName: "test-cluster-tikv-1",
+ State: v1alpha1.TiKVStateUp,
+ },
+ "2": {
+ ID: "2",
+ PodName: "test-cluster-tikv-2",
+ State: v1alpha1.TiKVStateUp,
+ },
+ },
+ }
+ ctx.actualPodVolumes = []*podVolumeContext{
+ {
+ pod: &corev1.Pod{
+ ObjectMeta: metav1.ObjectMeta{
+ Namespace: "test-ns", Name: "test-cluster-tikv-0",
+ Annotations: map[string]string{v1alpha1.EvictLeaderAnnKeyForResize: "none"},
+ },
+ },
+ },
+ {
+ pod: &corev1.Pod{
+ ObjectMeta: metav1.ObjectMeta{
+ Namespace: "test-ns", Name: "test-cluster-tikv-1",
+ }},
+ },
+ {
+ pod: &corev1.Pod{
+ ObjectMeta: metav1.ObjectMeta{
+ Namespace: "test-ns", Name: "test-cluster-tikv-2",
+ Annotations: map[string]string{v1alpha1.EvictLeaderAnnKeyForResize: "none"},
+ },
+ },
+ },
+ }
+
+ return ctx.actualPodVolumes[1].pod
+ },
+ expect: func(g *GomegaWithT, p *pvcResizer, ctx *componentVolumeContext, err error) {
+ g.Expect(err).To(Succeed())
+
+ cli := p.deps.KubeClientset.CoreV1().Pods(ctx.cluster.GetNamespace())
+ pod, err := cli.Get(context.TODO(), "test-cluster-tikv-0", metav1.GetOptions{})
+ g.Expect(err).To(Succeed())
+ g.Expect(pod.Annotations).NotTo(HaveKey(v1alpha1.EvictLeaderAnnKeyForResize))
+ pod, err = cli.Get(context.TODO(), "test-cluster-tikv-1", metav1.GetOptions{})
+ g.Expect(err).To(Succeed())
+ g.Expect(pod.Annotations).To(HaveKey(v1alpha1.EvictLeaderAnnKeyForResize))
+ pod, err = cli.Get(context.TODO(), "test-cluster-tikv-2", metav1.GetOptions{})
+ g.Expect(err).To(Succeed())
+ g.Expect(pod.Annotations).To(HaveKey(v1alpha1.EvictLeaderAnnKeyForResize))
+ },
+ },
+ "wait store to be 0": {
+ setup: func(ctx *componentVolumeContext) *corev1.Pod {
+ tc := ctx.cluster.(*v1alpha1.TidbCluster)
+ ctx.status = &tc.Status.TiKV
+
+ tc.Status.TiKV = v1alpha1.TiKVStatus{
+ Stores: map[string]v1alpha1.TiKVStore{
+ "0": {
+ ID: "0",
+ PodName: "test-cluster-tikv-0",
+ State: v1alpha1.TiKVStateUp,
+ LeaderCount: 10,
+ },
+ "1": {
+ ID: "1",
+ PodName: "test-cluster-tikv-1",
+ State: v1alpha1.TiKVStateUp,
+ LeaderCount: 10,
+ },
+ "2": {
+ ID: "2",
+ PodName: "test-cluster-tikv-2",
+ State: v1alpha1.TiKVStateUp,
+ LeaderCount: 10,
+ },
+ },
+ }
+ ctx.actualPodVolumes = []*podVolumeContext{
+ {
+ pod: &corev1.Pod{
+ ObjectMeta: metav1.ObjectMeta{
+ Namespace: "test-ns", Name: "test-cluster-tikv-0",
+ },
+ },
+ },
+ {
+ pod: &corev1.Pod{
+ ObjectMeta: metav1.ObjectMeta{
+ Namespace: "test-ns", Name: "test-cluster-tikv-1",
+ Annotations: map[string]string{v1alpha1.EvictLeaderAnnKeyForResize: "none"},
+ }},
+ },
+ {
+ pod: &corev1.Pod{
+ ObjectMeta: metav1.ObjectMeta{
+ Namespace: "test-ns", Name: "test-cluster-tikv-2",
+ },
+ },
+ },
+ }
+
+ return ctx.actualPodVolumes[1].pod
+ },
+ expect: func(g *GomegaWithT, p *pvcResizer, ctx *componentVolumeContext, err error) {
+ g.Expect(err).To(HaveOccurred())
+ g.Expect(err.Error()).To(ContainSubstring("wait for leader count of store 1 to be 0"))
+ },
+ },
}
+ for name, tt := range testcases {
+ t.Run(name, func(t *testing.T) {
+ g := NewGomegaWithT(t)
+ ctx, cancel := context.WithCancel(context.Background())
+ defer cancel()
+ fakeDeps := controller.NewFakeDependencies()
+ informerFactory := fakeDeps.KubeInformerFactory
+ informerFactory.Start(ctx.Done())
+ informerFactory.WaitForCacheSync(ctx.Done())
+
+ resizer := &pvcResizer{
+ deps: fakeDeps,
+ }
+ tc := &v1alpha1.TidbCluster{
+ ObjectMeta: metav1.ObjectMeta{Namespace: "test-ns", Name: "test-cluster"},
+ Spec: v1alpha1.TidbClusterSpec{
+ TiKV: &v1alpha1.TiKVSpec{
+ Replicas: 3,
+ },
+ },
+ }
+ vctx := &componentVolumeContext{
+ cluster: tc,
+ }
+ resizePod := tt.setup(vctx)
+ volumes := []*volume{}
+ for _, podVol := range vctx.actualPodVolumes {
+ if podVol.pod != nil {
+ fakeDeps.KubeClientset.CoreV1().Pods(tc.Namespace).Create(context.TODO(), podVol.pod, metav1.CreateOptions{})
+ }
+ if podVol.pod == resizePod {
+ volumes = podVol.volumes
+ }
+ }
+
+ err := resizer.beforeResizeForPod(vctx, resizePod, volumes)
+ tt.expect(g, resizer, vctx, err)
+ })
+ }
+ })
+}
+
+func newVolume(name v1alpha1.StorageVolumeName, pvc *corev1.PersistentVolumeClaim) *volume {
+ return &volume{name: name, pvc: pvc}
+}
+
+func diffVolume(v1 *volume, v2 *volume) string {
+ if diff := cmp.Diff(v1.name, v2.name); diff != "" {
+ return diff
+ }
+ if diff := cmp.Diff(v1.pvc, v2.pvc); diff != "" {
+ return diff
}
+ return ""
}
diff --git a/pkg/manager/member/tikv_upgrader.go b/pkg/manager/member/tikv_upgrader.go
index 023c8deeffd..54b3507c719 100644
--- a/pkg/manager/member/tikv_upgrader.go
+++ b/pkg/manager/member/tikv_upgrader.go
@@ -58,13 +58,8 @@ func (u *tikvUpgrader) Upgrade(meta metav1.Object, oldSet *apps.StatefulSet, new
var status *v1alpha1.TiKVStatus
switch meta := meta.(type) {
case *v1alpha1.TidbCluster:
- if meta.Status.TiFlash.Phase == v1alpha1.UpgradePhase ||
- meta.Status.PD.Phase == v1alpha1.UpgradePhase ||
- meta.TiKVScaling() {
- klog.Infof("TidbCluster: [%s/%s]'s tiflash status is %v, pd status is %v, "+
- "tikv status is %v, can not upgrade tikv",
- ns, tcName,
- meta.Status.TiFlash.Phase, meta.Status.PD.Phase, meta.Status.TiKV.Phase)
+ if ready, reason := isTiKVReadyToUpgrade(meta); !ready {
+ klog.Infof("TidbCluster: [%s/%s], can not upgrade tikv because: %s", ns, tcName, reason)
_, podSpec, err := GetLastAppliedConfig(oldSet)
if err != nil {
return err
@@ -325,6 +320,23 @@ func getStoreByOrdinal(name string, status v1alpha1.TiKVStatus, ordinal int32) *
return nil
}
+func isTiKVReadyToUpgrade(tc *v1alpha1.TidbCluster) (bool, string) {
+ if tc.Status.TiFlash.Phase == v1alpha1.UpgradePhase {
+ return false, fmt.Sprintf("tiflash status is %s", tc.Status.TiFlash.Phase)
+ }
+ if tc.Status.PD.Phase == v1alpha1.UpgradePhase {
+ return false, fmt.Sprintf("pd status is %s", tc.Status.PD.Phase)
+ }
+ if tc.TiKVScaling() {
+ return false, fmt.Sprintf("tikv status is %s", tc.Status.TiKV.Phase)
+ }
+ if tc.IsComponentVolumeResizing(v1alpha1.TiKVMemberType) {
+ return false, "tikv is resizing volumes"
+ }
+
+ return true, ""
+}
+
type fakeTiKVUpgrader struct{}
// NewFakeTiKVUpgrader returns a fake tikv upgrader