From 0b7cdaf15ca6fcf7788d35c9882f0ef9b881de86 Mon Sep 17 00:00:00 2001 From: drivebyer Date: Wed, 5 Jun 2024 10:33:13 +0800 Subject: [PATCH] feat: rolling update sequence from leader to follower Signed-off-by: drivebyer --- controllers/rediscluster_controller.go | 11 ++- k8sutils/statefulset.go | 54 ++++++++++ main.go | 12 ++- .../redis-cluster/chainsaw-test.yaml | 98 +++++++++++++++++++ .../redis-cluster/cluster-hscale.yaml | 36 +++++++ .../redis-cluster/cluster.yaml | 36 +++++++ .../redis-cluster/ready-cluster.yaml | 10 ++ 7 files changed, 248 insertions(+), 9 deletions(-) create mode 100644 tests/e2e-chainsaw/v1beta2/disable-persistence/redis-cluster/chainsaw-test.yaml create mode 100644 tests/e2e-chainsaw/v1beta2/disable-persistence/redis-cluster/cluster-hscale.yaml create mode 100644 tests/e2e-chainsaw/v1beta2/disable-persistence/redis-cluster/cluster.yaml create mode 100644 tests/e2e-chainsaw/v1beta2/disable-persistence/redis-cluster/ready-cluster.yaml diff --git a/controllers/rediscluster_controller.go b/controllers/rediscluster_controller.go index fc280c916..f385b978a 100644 --- a/controllers/rediscluster_controller.go +++ b/controllers/rediscluster_controller.go @@ -36,6 +36,7 @@ import ( // RedisClusterReconciler reconciles a RedisCluster object type RedisClusterReconciler struct { client.Client + k8sutils.StatefulSet K8sClient kubernetes.Interface Dk8sClient dynamic.Interface Log logr.Logger @@ -125,6 +126,7 @@ func (r *RedisClusterReconciler) Reconcile(ctx context.Context, req ctrl.Request return ctrl.Result{}, err } + // todo: remove me after watch statefulset in controller redisLeaderInfo, err := k8sutils.GetStatefulSet(r.K8sClient, r.Log, instance.GetNamespace(), instance.GetName()+"-leader") if err != nil { if errors.IsNotFound(err) { @@ -133,7 +135,7 @@ func (r *RedisClusterReconciler) Reconcile(ctx context.Context, req ctrl.Request return ctrl.Result{}, err } - if redisLeaderInfo.Status.ReadyReplicas == leaderReplicas { + if r.IsStatefulSetReady(ctx, instance.Namespace, instance.Name+"-leader") { // Mark the cluster status as initializing if there are no follower nodes if (instance.Status.ReadyLeaderReplicas == 0 && instance.Status.ReadyFollowerReplicas == 0) || instance.Status.ReadyFollowerReplicas != followerReplicas { @@ -158,6 +160,7 @@ func (r *RedisClusterReconciler) Reconcile(ctx context.Context, req ctrl.Request return ctrl.Result{}, err } } + // todo: remove me after watch statefulset in controller redisFollowerInfo, err := k8sutils.GetStatefulSet(r.K8sClient, r.Log, instance.GetNamespace(), instance.GetName()+"-follower") if err != nil { if errors.IsNotFound(err) { @@ -171,9 +174,9 @@ func (r *RedisClusterReconciler) Reconcile(ctx context.Context, req ctrl.Request return ctrl.Result{RequeueAfter: time.Second * 60}, nil } - if !(redisLeaderInfo.Status.ReadyReplicas == leaderReplicas && redisFollowerInfo.Status.ReadyReplicas == followerReplicas) { - reqLogger.Info("Redis leader and follower nodes are not ready yet", "Ready.Replicas", strconv.Itoa(int(redisLeaderInfo.Status.ReadyReplicas)), "Expected.Replicas", leaderReplicas) - return ctrl.Result{RequeueAfter: time.Second * 60}, nil + if !(r.IsStatefulSetReady(ctx, instance.Namespace, instance.Name+"-leader") && r.IsStatefulSetReady(ctx, instance.Namespace, instance.Name+"-follower")) { + reqLogger.Info("Redis leader and follower nodes are not ready yet") + return ctrl.Result{RequeueAfter: time.Second * 30}, nil } // Mark the cluster status as bootstrapping if all the leader and follower nodes are ready diff --git a/k8sutils/statefulset.go b/k8sutils/statefulset.go index c654b8d52..2a0a5c675 100644 --- a/k8sutils/statefulset.go +++ b/k8sutils/statefulset.go @@ -24,6 +24,60 @@ import ( "k8s.io/utils/ptr" ) +type StatefulSet interface { + IsStatefulSetReady(ctx context.Context, namespace, name string) bool +} + +type StatefulSetService struct { + kubeClient kubernetes.Interface + log logr.Logger +} + +func NewStatefulSetService(kubeClient kubernetes.Interface, log logr.Logger) *StatefulSetService { + log = log.WithValues("service", "k8s.statefulset") + return &StatefulSetService{ + kubeClient: kubeClient, + log: log, + } +} + +func (s *StatefulSetService) IsStatefulSetReady(ctx context.Context, namespace, name string) bool { + var ( + partition = 0 + replicas = 1 + + logger = s.log.WithValues("namespace", namespace, "name", name) + ) + + sts, err := s.kubeClient.AppsV1().StatefulSets(namespace).Get(ctx, name, metav1.GetOptions{}) + if err != nil { + logger.Error(err, "failed to get statefulset") + return false + } + + if sts.Spec.UpdateStrategy.RollingUpdate != nil && sts.Spec.UpdateStrategy.RollingUpdate.Partition != nil { + partition = int(*sts.Spec.UpdateStrategy.RollingUpdate.Partition) + } + if sts.Spec.Replicas != nil { + replicas = int(*sts.Spec.Replicas) + } + + if expectedUpdateReplicas := replicas - partition; sts.Status.UpdatedReplicas < int32(expectedUpdateReplicas) { + logger.V(1).Info("StatefulSet is not ready", "Status.UpdatedReplicas", sts.Status.UpdatedReplicas, "ExpectedUpdateReplicas", expectedUpdateReplicas) + return false + } + if partition == 0 && sts.Status.CurrentRevision != sts.Status.UpdateRevision { + logger.V(1).Info("StatefulSet is not ready", "Status.CurrentRevision", sts.Status.CurrentRevision, "Status.UpdateRevision", sts.Status.UpdateRevision) + return false + } + if sts.Status.ObservedGeneration != sts.ObjectMeta.Generation { + logger.V(1).Info("StatefulSet is not ready", "Status.ObservedGeneration", sts.Status.ObservedGeneration, "ObjectMeta.Generation", sts.ObjectMeta.Generation) + return false + } + + return true +} + const ( redisExporterContainer = "redis-exporter" ) diff --git a/main.go b/main.go index a45dfd53a..8c29c42e9 100644 --- a/main.go +++ b/main.go @@ -124,12 +124,14 @@ func main() { setupLog.Error(err, "unable to create controller", "controller", "Redis") os.Exit(1) } + rcLog := ctrl.Log.WithName("controllers").WithName("RedisCluster") if err = (&controllers.RedisClusterReconciler{ - Client: mgr.GetClient(), - K8sClient: k8sclient, - Dk8sClient: dk8sClient, - Log: ctrl.Log.WithName("controllers").WithName("RedisCluster"), - Scheme: mgr.GetScheme(), + Client: mgr.GetClient(), + K8sClient: k8sclient, + Dk8sClient: dk8sClient, + Log: ctrl.Log.WithName("controllers").WithName("RedisCluster"), + Scheme: mgr.GetScheme(), + StatefulSet: k8sutils.NewStatefulSetService(k8sclient, rcLog), }).SetupWithManager(mgr); err != nil { setupLog.Error(err, "unable to create controller", "controller", "RedisCluster") os.Exit(1) diff --git a/tests/e2e-chainsaw/v1beta2/disable-persistence/redis-cluster/chainsaw-test.yaml b/tests/e2e-chainsaw/v1beta2/disable-persistence/redis-cluster/chainsaw-test.yaml new file mode 100644 index 000000000..902751e45 --- /dev/null +++ b/tests/e2e-chainsaw/v1beta2/disable-persistence/redis-cluster/chainsaw-test.yaml @@ -0,0 +1,98 @@ +--- +# yaml-language-server: $schema=https://raw.githubusercontent.com/kyverno/chainsaw/main/.schemas/json/test-chainsaw-v1alpha1.json +apiVersion: chainsaw.kyverno.io/v1alpha1 +kind: Test +metadata: + name: redis-cluster +spec: + steps: + - try: + - apply: + file: cluster.yaml + - assert: + file: ready-cluster.yaml + + - name: Try saving a key + try: + - script: + timeout: 30s + content: > + kubectl exec --namespace ${NAMESPACE} --container redis-cluster-v1beta2-leader redis-cluster-v1beta2-leader-0 -- redis-cli -c -p 6379 set foo-0 bar-0 + check: + ($stdout=='OK'): true + - script: + timeout: 30s + content: > + kubectl exec --namespace ${NAMESPACE} --container redis-cluster-v1beta2-leader redis-cluster-v1beta2-leader-1 -- redis-cli -c -p 6379 set foo-1 bar-1 + check: + ($stdout=='OK'): true + - script: + timeout: 30s + content: > + kubectl exec --namespace ${NAMESPACE} --container redis-cluster-v1beta2-leader redis-cluster-v1beta2-leader-2 -- redis-cli -c -p 6379 set foo-2 bar-2 + check: + ($stdout=='OK'): true + - script: + timeout: 30s + content: > + kubectl exec --namespace ${NAMESPACE} --container redis-cluster-v1beta2-follower redis-cluster-v1beta2-follower-0 -- redis-cli -c -p 6379 set foo-3 bar-3 + check: + ($stdout=='OK'): true + - script: + timeout: 30s + content: > + kubectl exec --namespace ${NAMESPACE} --container redis-cluster-v1beta2-follower redis-cluster-v1beta2-follower-1 -- redis-cli -c -p 6379 set foo-4 bar-4 + check: + ($stdout=='OK'): true + - script: + timeout: 30s + content: > + kubectl exec --namespace ${NAMESPACE} --container redis-cluster-v1beta2-follower redis-cluster-v1beta2-follower-2 -- redis-cli -c -p 6379 set foo-5 bar-5 + check: + ($stdout=='OK'): true + + - name: Rolling update the cluster + try: + - apply: + file: cluster-hscale.yaml + - assert: + file: ready-cluster.yaml + + - name: Check if all keys exist + try: + - script: + timeout: 30s + content: > + kubectl exec --namespace ${NAMESPACE} --container redis-cluster-v1beta2-leader redis-cluster-v1beta2-leader-0 -- redis-cli -c -p 6379 get foo-0 + check: + ($stdout=='bar-0'): true + - script: + timeout: 30s + content: > + kubectl exec --namespace ${NAMESPACE} --container redis-cluster-v1beta2-leader redis-cluster-v1beta2-leader-1 -- redis-cli -c -p 6379 get foo-1 + check: + ($stdout=='bar-1'): true + - script: + timeout: 30s + content: > + kubectl exec --namespace ${NAMESPACE} --container redis-cluster-v1beta2-leader redis-cluster-v1beta2-leader-2 -- redis-cli -c -p 6379 get foo-2 + check: + ($stdout=='bar-2'): true + - script: + timeout: 30s + content: > + kubectl exec --namespace ${NAMESPACE} --container redis-cluster-v1beta2-follower redis-cluster-v1beta2-follower-0 -- redis-cli -c -p 6379 get foo-3 + check: + ($stdout=='bar-3'): true + - script: + timeout: 30s + content: > + kubectl exec --namespace ${NAMESPACE} --container redis-cluster-v1beta2-follower redis-cluster-v1beta2-follower-1 -- redis-cli -c -p 6379 get foo-4 + check: + ($stdout=='bar-4'): true + - script: + timeout: 30s + content: > + kubectl exec --namespace ${NAMESPACE} --container redis-cluster-v1beta2-follower redis-cluster-v1beta2-follower-2 -- redis-cli -c -p 6379 get foo-5 + check: + ($stdout=='bar-5'): true diff --git a/tests/e2e-chainsaw/v1beta2/disable-persistence/redis-cluster/cluster-hscale.yaml b/tests/e2e-chainsaw/v1beta2/disable-persistence/redis-cluster/cluster-hscale.yaml new file mode 100644 index 000000000..aec22cf24 --- /dev/null +++ b/tests/e2e-chainsaw/v1beta2/disable-persistence/redis-cluster/cluster-hscale.yaml @@ -0,0 +1,36 @@ +--- +apiVersion: redis.redis.opstreelabs.in/v1beta2 +kind: RedisCluster +metadata: + name: redis-cluster-v1beta2 +spec: + clusterSize: 3 + clusterVersion: v7 + persistenceEnabled: false + podSecurityContext: + runAsUser: 1000 + fsGroup: 1000 + kubernetesConfig: + image: quay.io/opstree/redis:v7.0.12 + imagePullPolicy: Always + resources: + requests: + cpu: 101m + memory: 128Mi + limits: + cpu: 101m + memory: 256Mi # Increased memory limit + storage: + volumeClaimTemplate: + spec: + accessModes: [ReadWriteOnce] + resources: + requests: + storage: 1Gi + nodeConfVolume: true + nodeConfVolumeClaimTemplate: + spec: + accessModes: [ReadWriteOnce] + resources: + requests: + storage: 1Gi diff --git a/tests/e2e-chainsaw/v1beta2/disable-persistence/redis-cluster/cluster.yaml b/tests/e2e-chainsaw/v1beta2/disable-persistence/redis-cluster/cluster.yaml new file mode 100644 index 000000000..5bf34bb88 --- /dev/null +++ b/tests/e2e-chainsaw/v1beta2/disable-persistence/redis-cluster/cluster.yaml @@ -0,0 +1,36 @@ +--- +apiVersion: redis.redis.opstreelabs.in/v1beta2 +kind: RedisCluster +metadata: + name: redis-cluster-v1beta2 +spec: + clusterSize: 3 + clusterVersion: v7 + persistenceEnabled: false + podSecurityContext: + runAsUser: 1000 + fsGroup: 1000 + kubernetesConfig: + image: quay.io/opstree/redis:v7.0.12 + imagePullPolicy: Always + resources: + requests: + cpu: 101m + memory: 128Mi + limits: + cpu: 101m + memory: 128Mi + storage: + volumeClaimTemplate: + spec: + accessModes: [ReadWriteOnce] + resources: + requests: + storage: 1Gi + nodeConfVolume: true + nodeConfVolumeClaimTemplate: + spec: + accessModes: [ReadWriteOnce] + resources: + requests: + storage: 1Gi diff --git a/tests/e2e-chainsaw/v1beta2/disable-persistence/redis-cluster/ready-cluster.yaml b/tests/e2e-chainsaw/v1beta2/disable-persistence/redis-cluster/ready-cluster.yaml new file mode 100644 index 000000000..a0708b5a5 --- /dev/null +++ b/tests/e2e-chainsaw/v1beta2/disable-persistence/redis-cluster/ready-cluster.yaml @@ -0,0 +1,10 @@ +--- +apiVersion: redis.redis.opstreelabs.in/v1beta2 +kind: RedisCluster +metadata: + name: redis-cluster-v1beta2 +status: + readyFollowerReplicas: 3 + readyLeaderReplicas: 3 + state: Ready + reason: RedisCluster is ready