diff --git a/ci/pull_e2e_kind.groovy b/ci/pull_e2e_kind.groovy index 75610102b7e..bdd49be7c1d 100644 --- a/ci/pull_e2e_kind.groovy +++ b/ci/pull_e2e_kind.groovy @@ -181,7 +181,7 @@ def build(String name, String code, Map resources = e2ePodResources) { unstash 'tidb-operator' stage("Debug Info") { println "debug host: 172.16.5.15" - println "debug command: kubectl -n jenkins-ci exec -ti ${NODE_NAME} bash" + println "debug command: kubectl -n jenkins-tidb exec -ti ${NODE_NAME} bash" sh """ echo "====== shell env ======" echo "pwd: \$(pwd)" diff --git a/ci/pull_e2e_release.groovy b/ci/pull_e2e_release.groovy index 40f578beaa2..c75af1938fa 100644 --- a/ci/pull_e2e_release.groovy +++ b/ci/pull_e2e_release.groovy @@ -177,7 +177,7 @@ def build(String name, String code, Map resources = e2ePodResources) { unstash 'tidb-operator' stage("Debug Info") { println "debug host: 172.16.5.15" - println "debug command: kubectl -n jenkins-ci exec -ti ${NODE_NAME} bash" + println "debug command: kubectl -n jenkins-tidb exec -ti ${NODE_NAME} bash" sh """ echo "====== shell env ======" echo "pwd: \$(pwd)" diff --git a/pkg/manager/member/utils.go b/pkg/manager/member/utils.go index 156232f3c24..b9fb0e6f753 100644 --- a/pkg/manager/member/utils.go +++ b/pkg/manager/member/utils.go @@ -520,7 +520,7 @@ func addDeferDeletingAnnoToPVC(tc *v1alpha1.TidbCluster, pvc *corev1.PersistentV klog.Errorf("failed to set PVC %s/%s annotation %q to %q", tc.Namespace, pvc.Name, label.AnnPVCDeferDeleting, now) return err } - klog.Infof("set PVC %s/%s annotationq %q to %q successfully", tc.Namespace, pvc.Name, label.AnnPVCDeferDeleting, now) + klog.Infof("set PVC %s/%s annotation %q to %q successfully", tc.Namespace, pvc.Name, label.AnnPVCDeferDeleting, now) return nil } diff --git a/tests/actions.go b/tests/actions.go index 3545181e259..da97657fdf1 100644 --- a/tests/actions.go +++ b/tests/actions.go @@ -649,7 +649,7 @@ func (oa *OperatorActions) DeployDMTiDBOrDie() { slack.NotifyAndPanic(err) } - tc := fixture.GetTidbCluster(DMTiDBNamespace, DMTiDBName, utilimage.TiDBV4) + tc := fixture.GetTidbCluster(DMTiDBNamespace, DMTiDBName, utilimage.TiDBV5) tc.Spec.PD.Replicas = 1 tc.Spec.TiKV.Replicas = 1 tc.Spec.TiDB.Replicas = 1 diff --git a/tests/e2e/tidbcluster/serial.go b/tests/e2e/tidbcluster/serial.go index 79a13b61231..42fab00e631 100644 --- a/tests/e2e/tidbcluster/serial.go +++ b/tests/e2e/tidbcluster/serial.go @@ -147,11 +147,11 @@ var _ = ginkgo.Describe("[Serial]", func() { oa.CleanCRDOrDie() }) - ginkgo.It(fmt.Sprintf("should be able to upgrade TiDB Cluster from %s to %s", utilimage.TiDBV4Prev, utilimage.TiDBV4), func() { + ginkgo.It(fmt.Sprintf("should be able to upgrade TiDB Cluster from %s to %s", utilimage.TiDBV5Prev, utilimage.TiDBV5), func() { log.Logf("start to upgrade tidbcluster with pod admission webhook") // deploy new cluster and test upgrade and scale-in/out with pod admission webhook - ginkgo.By(fmt.Sprintf("start initial TidbCluster %q", utilimage.TiDBV4Prev)) - tc := fixture.GetTidbCluster(ns, "admission", utilimage.TiDBV4Prev) + ginkgo.By(fmt.Sprintf("start initial TidbCluster %q", utilimage.TiDBV5Prev)) + tc := fixture.GetTidbCluster(ns, "admission", utilimage.TiDBV5Prev) tc.Spec.PD.Replicas = 3 tc.Spec.TiKV.Replicas = 3 tc.Spec.TiDB.Replicas = 2 @@ -164,25 +164,25 @@ var _ = ginkgo.Describe("[Serial]", func() { err = setPartitionAnnotation(ns, tc.Name, label.TiKVLabelVal, 1) framework.ExpectNoError(err, "set tikv Partition annotation failed") - ginkgo.By(fmt.Sprintf("Upgrade TidbCluster version to %q", utilimage.TiDBV4)) + ginkgo.By(fmt.Sprintf("Upgrade TidbCluster version to %q", utilimage.TiDBV5)) err = controller.GuaranteedUpdate(genericCli, tc, func() error { - tc.Spec.Version = utilimage.TiDBV4 + tc.Spec.Version = utilimage.TiDBV5 return nil }) - framework.ExpectNoError(err, "failed to update TidbCluster to upgrade tidb version to %v", utilimage.TiDBV4) + framework.ExpectNoError(err, "failed to update TidbCluster to upgrade tidb version to %v", utilimage.TiDBV5) - ginkgo.By(fmt.Sprintf("wait for tikv-1 pod upgrading to %q", utilimage.TiDBV4)) + ginkgo.By(fmt.Sprintf("wait for tikv-1 pod upgrading to %q", utilimage.TiDBV5)) err = wait.Poll(5*time.Second, 10*time.Minute, func() (done bool, err error) { tikvPod, err := c.CoreV1().Pods(ns).Get(fmt.Sprintf("%s-tikv-1", tc.Name), metav1.GetOptions{}) if err != nil { return false, nil } - if tikvPod.Spec.Containers[0].Image != fmt.Sprintf("pingcap/tikv:%s", utilimage.TiDBV4) { + if tikvPod.Spec.Containers[0].Image != fmt.Sprintf("pingcap/tikv:%s", utilimage.TiDBV5) { return false, nil } return true, nil }) - framework.ExpectNoError(err, "failed to upgrade tikv-1 to %q", utilimage.TiDBV4) + framework.ExpectNoError(err, "failed to upgrade tikv-1 to %q", utilimage.TiDBV5) ginkgo.By("Wait to see if tikv sts partition annotation remains 1 for 3 min") // TODO: explain the purpose of this testing @@ -260,13 +260,13 @@ var _ = ginkgo.Describe("[Serial]", func() { TiDB: &v1alpha1.TiDBSpec{ Replicas: 1, ComponentSpec: v1alpha1.ComponentSpec{ - Image: fmt.Sprintf("pingcap/tidb:%s", utilimage.TiDBV4Prev), + Image: fmt.Sprintf("pingcap/tidb:%s", utilimage.TiDBV5Prev), }, }, TiKV: &v1alpha1.TiKVSpec{ Replicas: 1, ComponentSpec: v1alpha1.ComponentSpec{ - Image: fmt.Sprintf("pingcap/tikv:%s", utilimage.TiDBV4Prev), + Image: fmt.Sprintf("pingcap/tikv:%s", utilimage.TiDBV5Prev), }, ResourceRequirements: v1.ResourceRequirements{ Requests: v1.ResourceList{ @@ -277,7 +277,7 @@ var _ = ginkgo.Describe("[Serial]", func() { PD: &v1alpha1.PDSpec{ Replicas: 1, ComponentSpec: v1alpha1.ComponentSpec{ - Image: fmt.Sprintf("pingcap/pd:%s", utilimage.TiDBV4Prev), + Image: fmt.Sprintf("pingcap/pd:%s", utilimage.TiDBV5Prev), }, ResourceRequirements: v1.ResourceRequirements{ Requests: v1.ResourceList{ @@ -297,7 +297,7 @@ var _ = ginkgo.Describe("[Serial]", func() { oa.UpgradeOperatorOrDie(ocfg) // now the webhook enabled err = controller.GuaranteedUpdate(genericCli, legacyTc, func() error { - legacyTc.Spec.TiDB.Image = fmt.Sprintf("pingcap/tidb:%s", utilimage.TiDBV4) + legacyTc.Spec.TiDB.Image = fmt.Sprintf("pingcap/tidb:%s", utilimage.TiDBV5) return nil }) framework.ExpectNoError(err, "Update legacy TidbCluster should not be influenced by validating") @@ -308,7 +308,7 @@ var _ = ginkgo.Describe("[Serial]", func() { legacyTc.Spec.TiDB.BaseImage = "pingcap/tidb" legacyTc.Spec.TiKV.BaseImage = "pingcap/tikv" legacyTc.Spec.PD.BaseImage = "pingcap/pd" - legacyTc.Spec.PD.Version = pointer.StringPtr(utilimage.TiDBV4) + legacyTc.Spec.PD.Version = pointer.StringPtr(utilimage.TiDBV5) return nil }) framework.ExpectNoError(err, "failed to update TidbCluster") @@ -331,12 +331,12 @@ var _ = ginkgo.Describe("[Serial]", func() { Spec: v1alpha1.TidbClusterSpec{ TiDB: &v1alpha1.TiDBSpec{ ComponentSpec: v1alpha1.ComponentSpec{ - Image: fmt.Sprintf("pingcap/tidb:%s", utilimage.TiDBV4), + Image: fmt.Sprintf("pingcap/tidb:%s", utilimage.TiDBV5), }, }, TiKV: &v1alpha1.TiKVSpec{ ComponentSpec: v1alpha1.ComponentSpec{ - Image: fmt.Sprintf("pingcap/tikv:%s", utilimage.TiDBV4), + Image: fmt.Sprintf("pingcap/tikv:%s", utilimage.TiDBV5), }, ResourceRequirements: v1.ResourceRequirements{ Requests: v1.ResourceList{ @@ -346,7 +346,7 @@ var _ = ginkgo.Describe("[Serial]", func() { }, PD: &v1alpha1.PDSpec{ ComponentSpec: v1alpha1.ComponentSpec{ - Image: fmt.Sprintf("pingcap/pd:%s", utilimage.TiDBV4), + Image: fmt.Sprintf("pingcap/pd:%s", utilimage.TiDBV5), }, ResourceRequirements: v1.ResourceRequirements{ Requests: v1.ResourceList{ @@ -366,7 +366,7 @@ var _ = ginkgo.Describe("[Serial]", func() { Name: "newly-created", }, Spec: v1alpha1.TidbClusterSpec{ - Version: utilimage.TiDBV4, + Version: utilimage.TiDBV5, TiDB: &v1alpha1.TiDBSpec{ Replicas: 1, }, @@ -446,9 +446,9 @@ var _ = ginkgo.Describe("[Serial]", func() { }) ginkgo.It("should not change old TidbCluster", func() { - ginkgo.By(fmt.Sprintf("deploy original tc %q", utilimage.TiDBV4)) + ginkgo.By(fmt.Sprintf("deploy original tc %q", utilimage.TiDBV5)) tcName := "tidbcluster" - tc := fixture.GetTidbCluster(ns, tcName, utilimage.TiDBV4) + tc := fixture.GetTidbCluster(ns, tcName, utilimage.TiDBV5) tc.Spec.PD.Replicas = 3 tc.Spec.TiKV.Replicas = 1 tc.Spec.TiDB.Replicas = 1 @@ -517,7 +517,7 @@ var _ = ginkgo.Describe("[Serial]", func() { ginkgo.It("should migrate tidbmonitor from deployment to sts", func() { ginkgo.By("deploy initial tc") tcName := "smooth-tidbcluster" - tc := fixture.GetTidbCluster(ns, tcName, utilimage.TiDBV4) + tc := fixture.GetTidbCluster(ns, tcName, utilimage.TiDBV5) tc.Spec.PD.Replicas = 1 tc.Spec.TiKV.Replicas = 1 tc.Spec.TiDB.Replicas = 1 @@ -607,9 +607,9 @@ var _ = ginkgo.Describe("[Serial]", func() { }) ginkgo.It("Deploy TidbCluster and check the result", func() { - ginkgo.By(fmt.Sprintf("deploy original tc %q", utilimage.TiDBV4Prev)) + ginkgo.By(fmt.Sprintf("deploy original tc %q", utilimage.TiDBV5Prev)) tcName := "tidbcluster1" - tc := fixture.GetTidbCluster(ns, tcName, utilimage.TiDBV4Prev) + tc := fixture.GetTidbCluster(ns, tcName, utilimage.TiDBV5Prev) tc.Spec.PD.Replicas = 1 tc.Spec.TiKV.Replicas = 1 tc.Spec.TiDB.Replicas = 1 @@ -629,10 +629,10 @@ var _ = ginkgo.Describe("[Serial]", func() { ginkgo.By("Upgrade TidbCluster 1 version, wait for 2 minutes, check that no rolling update occurs") err = controller.GuaranteedUpdate(genericCli, tc, func() error { - tc.Spec.Version = utilimage.TiDBV4 + tc.Spec.Version = utilimage.TiDBV5 return nil }) - framework.ExpectNoError(err, "failed to update TidbCluster 1 to upgrade PD version to %v", utilimage.TiDBV4) + framework.ExpectNoError(err, "failed to update TidbCluster 1 to upgrade PD version to %v", utilimage.TiDBV5) err = wait.Poll(5*time.Second, 2*time.Minute, func() (done bool, err error) { // confirm the TidbCluster 1 PD haven't been changed @@ -675,7 +675,7 @@ var _ = ginkgo.Describe("[Serial]", func() { ginkgo.By("Deploy TidbCluster 2 with label version=new") tc2Name := "tidbcluster2" - tc2 := fixture.GetTidbCluster(ns, tc2Name, utilimage.TiDBV4Prev) + tc2 := fixture.GetTidbCluster(ns, tc2Name, utilimage.TiDBV5Prev) tc2.Spec.PD.Replicas = 1 tc2.Spec.TiKV.Replicas = 1 tc2.Spec.TiDB.Replicas = 1 @@ -726,27 +726,27 @@ var _ = ginkgo.Describe("[Serial]", func() { ginkgo.By("Upgrade TiDB version of TidbCluster 2") err = controller.GuaranteedUpdate(genericCli, tc2, func() error { - tc2.Spec.Version = utilimage.TiDBV4 + tc2.Spec.Version = utilimage.TiDBV5 return nil }) - framework.ExpectNoError(err, "failed to update TidbCluster 2 to upgrade tidb version to %v", utilimage.TiDBV4) + framework.ExpectNoError(err, "failed to update TidbCluster 2 to upgrade tidb version to %v", utilimage.TiDBV5) log.Logf("Finished upgrading TidbCluster 2") err = oa.WaitForTidbClusterReady(tc2, 10*time.Minute, 10*time.Second) framework.ExpectNoError(err, "failed to wait for TidbCluster %s/%s components ready", ns, tc2.Name) - ginkgo.By(fmt.Sprintf("wait for TidbCluster 2 pd-0 pod upgrading to %q", utilimage.TiDBV4)) + ginkgo.By(fmt.Sprintf("wait for TidbCluster 2 pd-0 pod upgrading to %q", utilimage.TiDBV5)) err = wait.Poll(5*time.Second, 10*time.Minute, func() (done bool, err error) { pdPod, err := c.CoreV1().Pods(ns).Get(fmt.Sprintf("%s-pd-0", tc2.Name), metav1.GetOptions{}) if err != nil { return false, nil } - if pdPod.Spec.Containers[0].Image != fmt.Sprintf("pingcap/pd:%s", utilimage.TiDBV4) { + if pdPod.Spec.Containers[0].Image != fmt.Sprintf("pingcap/pd:%s", utilimage.TiDBV5) { return false, nil } return true, nil }) - framework.ExpectNoError(err, "failed to upgrade TidbCluster 2 pd-0 to %q", utilimage.TiDBV4) + framework.ExpectNoError(err, "failed to upgrade TidbCluster 2 pd-0 to %q", utilimage.TiDBV5) log.Logf("Finished upgrading TidbCluster 2") ginkgo.By("Deploy the default TiDB Operator with --selector=version=old") @@ -818,7 +818,7 @@ var _ = ginkgo.Describe("[Serial]", func() { framework.Skipf("duplicated test") tcName := "basic" - tc := fixture.GetTidbCluster(ns, tcName, utilimage.TiDBV4) + tc := fixture.GetTidbCluster(ns, tcName, utilimage.TiDBV5) tc.Spec.PD.Replicas = 1 tc.Spec.TiKV.Replicas = 1 tc.Spec.TiDB.Replicas = 1 diff --git a/tests/e2e/tidbcluster/stability-asts.go b/tests/e2e/tidbcluster/stability-asts.go index c261d427dd4..0636297599f 100644 --- a/tests/e2e/tidbcluster/stability-asts.go +++ b/tests/e2e/tidbcluster/stability-asts.go @@ -141,7 +141,7 @@ var _ = ginkgo.Describe("[Stability]", func() { ginkgo.It("Scaling tidb cluster with advanced statefulset", func() { clusterName := "scaling-with-asts" - tc := fixture.GetTidbCluster(ns, clusterName, utilimage.TiDBV4) + tc := fixture.GetTidbCluster(ns, clusterName, utilimage.TiDBV5) tc = fixture.AddTiFlashForTidbCluster(tc) tc.Spec.PD.Replicas = 3 tc.Spec.TiKV.Replicas = 5 @@ -346,7 +346,7 @@ var _ = ginkgo.Describe("[Stability]", func() { oa.CleanCRDOrDie() }() - tc := fixture.GetTidbCluster(ns, "sts", utilimage.TiDBV4) + tc := fixture.GetTidbCluster(ns, "sts", utilimage.TiDBV5) err = genericCli.Create(context.TODO(), tc) framework.ExpectNoError(err, "failed to create TidbCluster: %v", tc) err = oa.WaitForTidbClusterReady(tc, 30*time.Minute, 15*time.Second) @@ -438,7 +438,7 @@ var _ = ginkgo.Describe("[Stability]", func() { oa.CleanCRDOrDie() }() - tc := fixture.GetTidbCluster(ns, "upgrade-cluster", utilimage.TiDBV4Prev) + tc := fixture.GetTidbCluster(ns, "upgrade-cluster", utilimage.TiDBV5Prev) tc.Spec.PD.Replicas = 5 tc.Spec.TiKV.Replicas = 4 tc.Spec.TiDB.Replicas = 3 @@ -469,7 +469,7 @@ var _ = ginkgo.Describe("[Stability]", func() { ginkgo.By("Upgrading the cluster") err = controller.GuaranteedUpdate(genericCli, tc, func() error { - tc.Spec.Version = utilimage.TiDBV4 + tc.Spec.Version = utilimage.TiDBV5 return nil }) framework.ExpectNoError(err, "failed to upgrade TidbCluster %s/%s", ns, tc.Name) diff --git a/tests/e2e/tidbcluster/stability-br.go b/tests/e2e/tidbcluster/stability-br.go index f11427bed94..c843b5be3f8 100644 --- a/tests/e2e/tidbcluster/stability-br.go +++ b/tests/e2e/tidbcluster/stability-br.go @@ -196,7 +196,7 @@ func testBR(provider, ns string, fw portforward.PortForward, c clientset.Interfa } // create backup cluster - tcFrom := fixture.GetTidbCluster(ns, tcNameFrom, utilimage.TiDBV4) + tcFrom := fixture.GetTidbCluster(ns, tcNameFrom, utilimage.TiDBV5) tcFrom.Spec.PD.Replicas = 1 tcFrom.Spec.TiKV.Replicas = 1 tcFrom.Spec.TiDB.Replicas = 1 @@ -207,7 +207,7 @@ func testBR(provider, ns string, fw portforward.PortForward, c clientset.Interfa framework.ExpectNoError(err, "failed to create TidbCluster tcFrom: %v", tcFrom) // create restore cluster - tcTo := fixture.GetTidbCluster(ns, tcNameTo, utilimage.TiDBV4) + tcTo := fixture.GetTidbCluster(ns, tcNameTo, utilimage.TiDBV5) tcTo.Spec.PD.Replicas = 1 tcTo.Spec.TiKV.Replicas = 1 tcTo.Spec.TiDB.Replicas = 1 @@ -220,11 +220,11 @@ func testBR(provider, ns string, fw portforward.PortForward, c clientset.Interfa // wait both tidbcluster ready err = oa.WaitForTidbClusterReady(tcFrom, 30*time.Minute, 15*time.Second) framework.ExpectNoError(err, "failed to wait for TidbCluster tcFrom ready") - clusterFrom := newTidbClusterConfig(e2econfig.TestConfig, ns, tcNameFrom, "", utilimage.TiDBV4) + clusterFrom := newTidbClusterConfig(e2econfig.TestConfig, ns, tcNameFrom, "", utilimage.TiDBV5) err = oa.WaitForTidbClusterReady(tcTo, 30*time.Minute, 15*time.Second) framework.ExpectNoError(err, "failed to wait for TidbCluster tcTo ready") - clusterTo := newTidbClusterConfig(e2econfig.TestConfig, ns, tcNameTo, "", utilimage.TiDBV4) + clusterTo := newTidbClusterConfig(e2econfig.TestConfig, ns, tcNameTo, "", utilimage.TiDBV5) // import some data to sql with blockwriter ginkgo.By(fmt.Sprintf("Begin inserting data into cluster %q", clusterFrom.ClusterName)) diff --git a/tests/e2e/tidbcluster/stability.go b/tests/e2e/tidbcluster/stability.go index f1b22250d3e..7fd65cd457e 100644 --- a/tests/e2e/tidbcluster/stability.go +++ b/tests/e2e/tidbcluster/stability.go @@ -161,7 +161,7 @@ var _ = ginkgo.Describe("[Stability]", func() { for _, test := range testCases { ginkgo.It("tidb cluster should not be affected while "+test.name, func() { clusterName := "test" - tc := fixture.GetTidbCluster(ns, clusterName, utilimage.TiDBV4) + tc := fixture.GetTidbCluster(ns, clusterName, utilimage.TiDBV5) utiltc.MustCreateTCWithComponentsReady(genericCli, oa, tc, 30*time.Minute, 15*time.Second) test.fn() @@ -287,7 +287,7 @@ var _ = ginkgo.Describe("[Stability]", func() { ginkgo.By("Deploy a test cluster with 3 pd and tikv replicas") clusterName := "test" - tc := fixture.GetTidbCluster(ns, clusterName, utilimage.TiDBV4) + tc := fixture.GetTidbCluster(ns, clusterName, utilimage.TiDBV5) tc.Spec.PD.Replicas = 3 tc.Spec.PD.MaxFailoverCount = pointer.Int32Ptr(0) tc.Spec.TiDB.Replicas = 1 @@ -501,7 +501,7 @@ var _ = ginkgo.Describe("[Stability]", func() { // See docs/design-proposals/tidb-stable-scheduling.md ginkgo.It("[Feature: StableScheduling] TiDB pods should be scheduled to preivous nodes", func() { clusterName := "tidb-scheduling" - tc := fixture.GetTidbCluster(ns, clusterName, utilimage.TiDBV4) + tc := fixture.GetTidbCluster(ns, clusterName, utilimage.TiDBV5) tc.Spec.PD.Replicas = 1 tc.Spec.TiKV.Replicas = 1 tc.Spec.TiDB.Replicas = 3 @@ -607,7 +607,7 @@ var _ = ginkgo.Describe("[Stability]", func() { defer utilcloud.EnableNodeAutoRepair() utilcloud.DisableNodeAutoRepair() clusterName := "failover" - tc := fixture.GetTidbCluster(ns, clusterName, utilimage.TiDBV4) + tc := fixture.GetTidbCluster(ns, clusterName, utilimage.TiDBV5) tc.Spec.PD.Replicas = 3 tc.Spec.TiKV.Replicas = 1 tc.Spec.TiDB.Replicas = 1 @@ -695,7 +695,7 @@ var _ = ginkgo.Describe("[Stability]", func() { gomega.Expect(len(nodeList.Items)).To(gomega.BeNumerically(">=", 3)) clusterName := "failover" - tc := fixture.GetTidbCluster(ns, clusterName, utilimage.TiDBV4) + tc := fixture.GetTidbCluster(ns, clusterName, utilimage.TiDBV5) tc.Spec.PD.Replicas = 1 tc.Spec.TiKV.Replicas = 1 tc.Spec.TiDB.Replicas = 2 @@ -862,7 +862,7 @@ var _ = ginkgo.Describe("[Stability]", func() { // TODO: this should be a regression type ginkgo.It("[Feature: AutoFailover] Failover can work if a store fails to update", func() { clusterName := "scale" - tc := fixture.GetTidbCluster(ns, clusterName, utilimage.TiDBV4) + tc := fixture.GetTidbCluster(ns, clusterName, utilimage.TiDBV5) tc.Spec.PD.Replicas = 1 // By default, PD set the state of disconnected store to Down // after 30 minutes. Use a short time in testing. @@ -922,7 +922,7 @@ var _ = ginkgo.Describe("[Stability]", func() { // TODO: this should be a regression type ginkgo.It("[Feature: AutoFailover] Failover can work if a pd fails to update", func() { clusterName := "scale" - tc := fixture.GetTidbCluster(ns, clusterName, utilimage.TiDBV4) + tc := fixture.GetTidbCluster(ns, clusterName, utilimage.TiDBV5) tc.Spec.PD.Replicas = 3 tc.Spec.TiKV.Replicas = 1 tc.Spec.TiDB.Replicas = 1 @@ -1019,7 +1019,7 @@ var _ = ginkgo.Describe("[Stability]", func() { gomega.Expect(len(nodeList.Items)).To(gomega.BeNumerically(">=", 3)) clusterName := "failover" - tc := fixture.GetTidbCluster(ns, clusterName, utilimage.TiDBV4) + tc := fixture.GetTidbCluster(ns, clusterName, utilimage.TiDBV5) tc.Spec.SchedulerName = "" tc.Spec.PD.Replicas = 1 tc.Spec.PD.Config.Set("schedule.max-store-down-time", "1m") diff --git a/tests/e2e/tidbcluster/tidbcluster.go b/tests/e2e/tidbcluster/tidbcluster.go index 174e82a623b..05c0ca5770a 100644 --- a/tests/e2e/tidbcluster/tidbcluster.go +++ b/tests/e2e/tidbcluster/tidbcluster.go @@ -132,7 +132,7 @@ var _ = ginkgo.Describe("TiDBCluster", func() { // basic deploy, scale out, scale in, change configuration tests ginkgo.Describe("when using version", func() { - versions := []string{utilimage.TiDBV3, utilimage.TiDBV4} + versions := []string{utilimage.TiDBV3, utilimage.TiDBV4, utilimage.TiDBV5} for _, version := range versions { version := version versionDashed := strings.ReplaceAll(version, ".", "-") @@ -253,7 +253,7 @@ var _ = ginkgo.Describe("TiDBCluster", func() { ginkgo.By("Deploy initial tc") clusterName := "host-network" - tc := fixture.GetTidbCluster(ns, clusterName, utilimage.TiDBV4) + tc := fixture.GetTidbCluster(ns, clusterName, utilimage.TiDBV5) // Set some properties tc.Spec.PD.Replicas = 1 tc.Spec.TiKV.Replicas = 1 @@ -299,14 +299,14 @@ var _ = ginkgo.Describe("TiDBCluster", func() { ginkgo.By("Deploying tidb cluster") clusterName := "webhook-upgrade-cluster" - tc := fixture.GetTidbCluster(ns, clusterName, utilimage.TiDBV4Prev) + tc := fixture.GetTidbCluster(ns, clusterName, utilimage.TiDBV5Prev) tc.Spec.PD.Replicas = 3 // Deploy utiltc.MustCreateTCWithComponentsReady(genericCli, oa, tc, 6*time.Minute, 5*time.Second) - ginkgo.By(fmt.Sprintf("Upgrading tidb cluster from %s to %s", tc.Spec.Version, utilimage.TiDBV4)) + ginkgo.By(fmt.Sprintf("Upgrading tidb cluster from %s to %s", tc.Spec.Version, utilimage.TiDBV5)) err = controller.GuaranteedUpdate(genericCli, tc, func() error { - tc.Spec.Version = utilimage.TiDBV4 + tc.Spec.Version = utilimage.TiDBV5 return nil }) framework.ExpectNoError(err, "failed to upgrade TidbCluster: %q", tc.Name) @@ -330,7 +330,7 @@ var _ = ginkgo.Describe("TiDBCluster", func() { ginkgo.Context("[Feature: Helm Chart migrate to CR]", func() { ginkgo.It("should keep tidb service in sync", func() { ginkgo.By("Deploy initial tc") - tcCfg := newTidbClusterConfig(e2econfig.TestConfig, ns, "service", "admin", utilimage.TiDBV4) + tcCfg := newTidbClusterConfig(e2econfig.TestConfig, ns, "service", "admin", utilimage.TiDBV5) tcCfg.Resources["pd.replicas"] = "1" tcCfg.Resources["tidb.replicas"] = "1" tcCfg.Resources["tikv.replicas"] = "1" @@ -419,7 +419,7 @@ var _ = ginkgo.Describe("TiDBCluster", func() { // TODO: Add pump configmap rolling-update case ginkgo.It("should adopt helm created pump with TidbCluster CR", func() { ginkgo.By("Deploy initial tc") - tcCfg := newTidbClusterConfig(e2econfig.TestConfig, ns, "pump", "admin", utilimage.TiDBV4) + tcCfg := newTidbClusterConfig(e2econfig.TestConfig, ns, "pump", "admin", utilimage.TiDBV5) tcCfg.Resources["pd.replicas"] = "1" tcCfg.Resources["tikv.replicas"] = "1" tcCfg.Resources["tidb.replicas"] = "1" @@ -556,7 +556,7 @@ var _ = ginkgo.Describe("TiDBCluster", func() { ginkgo.It("should migrate from helm to CR", func() { ginkgo.By("Deploy initial tc") - tcCfg := newTidbClusterConfig(e2econfig.TestConfig, ns, "helm-migration", "admin", utilimage.TiDBV4) + tcCfg := newTidbClusterConfig(e2econfig.TestConfig, ns, "helm-migration", "admin", utilimage.TiDBV5) tcCfg.Resources["pd.replicas"] = "1" tcCfg.Resources["tikv.replicas"] = "1" tcCfg.Resources["tidb.replicas"] = "1" @@ -614,6 +614,7 @@ var _ = ginkgo.Describe("TiDBCluster", func() { tc.Spec.TiDB.Config = v1alpha1.NewTiDBConfig() tc.Spec.TiDB.ConfigUpdateStrategy = &updateStrategy tc.Spec.TiKV.Config = v1alpha1.NewTiKVConfig() + tc.Spec.TiKV.Config.Set("storage.reserve-space", "0MB") tc.Spec.TiKV.ConfigUpdateStrategy = &updateStrategy tc.Spec.PD.Config = v1alpha1.NewPDConfig() tc.Spec.PD.ConfigUpdateStrategy = &updateStrategy @@ -670,7 +671,7 @@ var _ = ginkgo.Describe("TiDBCluster", func() { // TODO: move into TiDBMonitor specific group ginkgo.It("should manage tidb monitor normally", func() { ginkgo.By("Deploy initial tc") - tc := fixture.GetTidbCluster(ns, "monitor-test", utilimage.TiDBV4) + tc := fixture.GetTidbCluster(ns, "monitor-test", utilimage.TiDBV5) tc.Spec.PD.Replicas = 1 tc.Spec.TiKV.Replicas = 1 tc.Spec.TiDB.Replicas = 1 @@ -827,7 +828,7 @@ var _ = ginkgo.Describe("TiDBCluster", func() { ginkgo.It("can be paused and resumed", func() { ginkgo.By("Deploy initial tc") tcName := "paused" - tc := fixture.GetTidbCluster(ns, tcName, utilimage.TiDBV4Prev) + tc := fixture.GetTidbCluster(ns, tcName, utilimage.TiDBV5Prev) tc.Spec.PD.Replicas = 1 tc.Spec.TiKV.Replicas = 1 tc.Spec.TiDB.Replicas = 1 @@ -843,9 +844,9 @@ var _ = ginkgo.Describe("TiDBCluster", func() { }) framework.ExpectNoError(err, "failed to pause TidbCluster: %q", tc.Name) - ginkgo.By(fmt.Sprintf("upgrade tc version to %q", utilimage.TiDBV4)) + ginkgo.By(fmt.Sprintf("upgrade tc version to %q", utilimage.TiDBV5)) err = controller.GuaranteedUpdate(genericCli, tc, func() error { - tc.Spec.Version = utilimage.TiDBV4 + tc.Spec.Version = utilimage.TiDBV5 return nil }) framework.ExpectNoError(err, "failed to upgrade TidbCluster version: %q", tc.Name) @@ -890,7 +891,7 @@ var _ = ginkgo.Describe("TiDBCluster", func() { // TODO: explain purpose of this case ginkgo.It("should clear TiDB failureMembers when scale TiDB to zero", func() { ginkgo.By("Deploy initial tc") - tc := fixture.GetTidbCluster(ns, "tidb-scale", utilimage.TiDBV4) + tc := fixture.GetTidbCluster(ns, "tidb-scale", utilimage.TiDBV5) tc.Spec.PD.Replicas = 1 tc.Spec.TiKV.Replicas = 1 tc.Spec.TiDB.Replicas = 1 @@ -973,7 +974,7 @@ var _ = ginkgo.Describe("TiDBCluster", func() { ginkgo.By("Creating tidb cluster with TLS enabled") dashTLSName := fmt.Sprintf("%s-dashboard-tls", tcName) - tc := fixture.GetTidbCluster(ns, tcName, utilimage.TiDBV4Prev) + tc := fixture.GetTidbCluster(ns, tcName, utilimage.TiDBV5Prev) tc.Spec.PD.Replicas = 3 tc.Spec.PD.TLSClientSecretName = &dashTLSName tc.Spec.TiKV.Replicas = 3 @@ -1022,10 +1023,10 @@ var _ = ginkgo.Describe("TiDBCluster", func() { Namespace: ns, ClusterName: tcName, OperatorTag: cfg.OperatorTag, - ClusterVersion: utilimage.TiDBV4, + ClusterVersion: utilimage.TiDBV5, } targetTcName := "tls-target" - targetTc := fixture.GetTidbCluster(ns, targetTcName, utilimage.TiDBV4) + targetTc := fixture.GetTidbCluster(ns, targetTcName, utilimage.TiDBV5) targetTc.Spec.PD.Replicas = 1 targetTc.Spec.TiKV.Replicas = 1 targetTc.Spec.TiDB.Replicas = 1 @@ -1089,7 +1090,7 @@ var _ = ginkgo.Describe("TiDBCluster", func() { ginkgo.By("Upgrading tidb cluster") err = controller.GuaranteedUpdate(genericCli, tc, func() error { - tc.Spec.Version = utilimage.TiDBV4 + tc.Spec.Version = utilimage.TiDBV5 return nil }) framework.ExpectNoError(err, "failed to update TidbCluster: %q", tc.Name) @@ -1129,7 +1130,7 @@ var _ = ginkgo.Describe("TiDBCluster", func() { ginkgo.By("Creating tidb cluster") dashTLSName := fmt.Sprintf("%s-dashboard-tls", tcName) - tc := fixture.GetTidbCluster(ns, tcName, utilimage.TiDBV4) + tc := fixture.GetTidbCluster(ns, tcName, utilimage.TiDBV5) tc.Spec.PD.Replicas = 1 tc.Spec.PD.TLSClientSecretName = &dashTLSName tc.Spec.TiKV.Replicas = 1 @@ -1150,9 +1151,8 @@ var _ = ginkgo.Describe("TiDBCluster", func() { framework.ExpectNoError(err, "wait for TidbCluster ready timeout: %q", tc.Name) ginkgo.By("Creating heterogeneous tidb cluster") - heterogeneousTc := fixture.GetTidbCluster(ns, heterogeneousTcName, utilimage.TiDBV4) + heterogeneousTc := fixture.GetTidbCluster(ns, heterogeneousTcName, utilimage.TiDBV5) heterogeneousTc = fixture.AddTiFlashForTidbCluster(heterogeneousTc) - heterogeneousTc.Spec.PD = nil heterogeneousTc.Spec.TiKV.Replicas = 1 heterogeneousTc.Spec.TiDB.Replicas = 1 @@ -1222,10 +1222,10 @@ var _ = ginkgo.Describe("TiDBCluster", func() { Namespace: ns, ClusterName: tcName, OperatorTag: cfg.OperatorTag, - ClusterVersion: utilimage.TiDBV4, + ClusterVersion: utilimage.TiDBV5, } targetTcName := "tls-target" - targetTc := fixture.GetTidbCluster(ns, targetTcName, utilimage.TiDBV4) + targetTc := fixture.GetTidbCluster(ns, targetTcName, utilimage.TiDBV5) targetTc.Spec.PD.Replicas = 1 targetTc.Spec.TiKV.Replicas = 1 targetTc.Spec.TiDB.Replicas = 1 @@ -1271,7 +1271,7 @@ var _ = ginkgo.Describe("TiDBCluster", func() { ginkgo.It("should ensure changing TiDB service annotations won't change TiDB service type NodePort", func() { ginkgo.By("Deploy initial tc") // Create TidbCluster with NodePort to check whether node port would change - nodeTc := fixture.GetTidbCluster(ns, "nodeport", utilimage.TiDBV4) + nodeTc := fixture.GetTidbCluster(ns, "nodeport", utilimage.TiDBV5) nodeTc.Spec.PD.Replicas = 1 nodeTc.Spec.TiKV.Replicas = 1 nodeTc.Spec.TiDB.Replicas = 1 @@ -1367,7 +1367,7 @@ var _ = ginkgo.Describe("TiDBCluster", func() { ginkgo.It("should join heterogeneous cluster into an existing cluster", func() { // Create TidbCluster with NodePort to check whether node port would change ginkgo.By("Deploy origin tc") - originTc := fixture.GetTidbCluster(ns, "origin", utilimage.TiDBV4) + originTc := fixture.GetTidbCluster(ns, "origin", utilimage.TiDBV5) originTc.Spec.PD.Replicas = 1 originTc.Spec.TiKV.Replicas = 1 originTc.Spec.TiDB.Replicas = 1 @@ -1377,7 +1377,7 @@ var _ = ginkgo.Describe("TiDBCluster", func() { framework.ExpectNoError(err, "Expected TiDB cluster ready") ginkgo.By("Deploy heterogeneous tc") - heterogeneousTc := fixture.GetTidbCluster(ns, "heterogeneous", utilimage.TiDBV4) + heterogeneousTc := fixture.GetTidbCluster(ns, "heterogeneous", utilimage.TiDBV5) heterogeneousTc = fixture.AddTiFlashForTidbCluster(heterogeneousTc) heterogeneousTc.Spec.PD = nil @@ -1420,7 +1420,7 @@ var _ = ginkgo.Describe("TiDBCluster", func() { ginkgo.It("[Feature: CDC]", func() { ginkgo.By("Creating cdc cluster") - fromTc := fixture.GetTidbCluster(ns, "cdc-source", utilimage.TiDBV4) + fromTc := fixture.GetTidbCluster(ns, "cdc-source", utilimage.TiDBV5) fromTc = fixture.AddTiCDCForTidbCluster(fromTc) fromTc.Spec.PD.Replicas = 3 @@ -1434,7 +1434,7 @@ var _ = ginkgo.Describe("TiDBCluster", func() { framework.ExpectNoError(err, "Expected TiDB cluster ready") ginkgo.By("Creating cdc-sink cluster") - toTc := fixture.GetTidbCluster(ns, "cdc-sink", utilimage.TiDBV4) + toTc := fixture.GetTidbCluster(ns, "cdc-sink", utilimage.TiDBV5) toTc.Spec.PD.Replicas = 1 toTc.Spec.TiKV.Replicas = 1 toTc.Spec.TiDB.Replicas = 1 @@ -1471,7 +1471,7 @@ var _ = ginkgo.Describe("TiDBCluster", func() { ginkgo.It("TiKV should mount multiple pvc", func() { ginkgo.By("Deploy initial tc with addition") clusterName := "tidb-multiple-pvc-scale" - tc := fixture.GetTidbCluster(ns, clusterName, utilimage.TiDBV4) + tc := fixture.GetTidbCluster(ns, clusterName, utilimage.TiDBV5) tc.Spec.TiKV.StorageVolumes = []v1alpha1.StorageVolume{ { Name: "wal", @@ -1537,7 +1537,7 @@ var _ = ginkgo.Describe("TiDBCluster", func() { ginkgo.Context("upgrade should work correctly", func() { ginkgo.It("for tc and components version", func() { ginkgo.By("Deploy initial tc") - tc := fixture.GetTidbCluster(ns, "upgrade-version", utilimage.TiDBV4Prev) + tc := fixture.GetTidbCluster(ns, "upgrade-version", utilimage.TiDBV5Prev) pvRetain := corev1.PersistentVolumeReclaimRetain tc.Spec.PVReclaimPolicy = &pvRetain tc.Spec.PD.StorageClassName = pointer.StringPtr("local-storage") @@ -1547,15 +1547,15 @@ var _ = ginkgo.Describe("TiDBCluster", func() { ginkgo.By("Update tc version") err := controller.GuaranteedUpdate(genericCli, tc, func() error { - tc.Spec.Version = utilimage.TiDBV4 + tc.Spec.Version = utilimage.TiDBV5 return nil }) - framework.ExpectNoError(err, "failed to update tc version to %q", utilimage.TiDBV4) - err = oa.WaitForTidbClusterReady(tc, 10*time.Minute, 10*time.Second) + framework.ExpectNoError(err, "failed to update tc version to %q", utilimage.TiDBV5) + err = oa.WaitForTidbClusterReady(tc, 15*time.Minute, 10*time.Second) framework.ExpectNoError(err, "failed to wait for TidbCluster %s/%s components ready", ns, tc.Name) ginkgo.By("Update components version") - componentVersion := utilimage.TiDBV4Prev + componentVersion := utilimage.TiDBV5Prev err = controller.GuaranteedUpdate(genericCli, tc, func() error { tc.Spec.PD.Version = pointer.StringPtr(componentVersion) tc.Spec.TiKV.Version = pointer.StringPtr(componentVersion) @@ -1563,7 +1563,7 @@ var _ = ginkgo.Describe("TiDBCluster", func() { return nil }) framework.ExpectNoError(err, "failed to update components version to %q", componentVersion) - err = oa.WaitForTidbClusterReady(tc, 10*time.Minute, 10*time.Second) + err = oa.WaitForTidbClusterReady(tc, 15*time.Minute, 10*time.Second) framework.ExpectNoError(err, "failed to wait for TidbCluster %s/%s components ready", ns, tc.Name) ginkgo.By("Check components version") @@ -1589,7 +1589,7 @@ var _ = ginkgo.Describe("TiDBCluster", func() { ginkgo.It("for configuration update", func() { ginkgo.By("Deploy initial tc") - tc := fixture.GetTidbCluster(ns, "update-config", utilimage.TiDBV4) + tc := fixture.GetTidbCluster(ns, "update-config", utilimage.TiDBV5) utiltc.MustCreateTCWithComponentsReady(genericCli, oa, tc, 5*time.Minute, 10*time.Second) ginkgo.By("Update components configuration") @@ -1598,6 +1598,7 @@ var _ = ginkgo.Describe("TiDBCluster", func() { tikvCfg := v1alpha1.NewTiKVConfig() tidbCfg := v1alpha1.NewTiDBConfig() pdCfg.Set("lease", 3) + tikvCfg.Set("storage.reserve-space", "0MB") tikvCfg.Set("status-thread-pool-size", 1) tidbCfg.Set("token-limit", 10000) tc.Spec.PD.Config = pdCfg @@ -1677,7 +1678,7 @@ var _ = ginkgo.Describe("TiDBCluster", func() { ginkgo.It(op, func() { ginkgo.By("Deploy initial tc") tcName := fmt.Sprintf("scale-%s-pd-concurrently", op) - tc := fixture.GetTidbCluster(ns, tcName, utilimage.TiDBV4Prev) + tc := fixture.GetTidbCluster(ns, tcName, utilimage.TiDBV5Prev) tc.Spec.PD.StorageClassName = pointer.StringPtr("local-storage") if op == "in" { tc.Spec.PD.Replicas = 5 @@ -1688,10 +1689,10 @@ var _ = ginkgo.Describe("TiDBCluster", func() { ginkgo.By("Upgrade PD version") err := controller.GuaranteedUpdate(genericCli, tc, func() error { - tc.Spec.PD.Version = pointer.StringPtr(utilimage.TiDBV4) + tc.Spec.PD.Version = pointer.StringPtr(utilimage.TiDBV5) return nil }) - framework.ExpectNoError(err, "failed to update PD version to %q", utilimage.TiDBV4) + framework.ExpectNoError(err, "failed to update PD version to %q", utilimage.TiDBV5) ginkgo.By(fmt.Sprintf("Wait for PD phase is %q", v1alpha1.UpgradePhase)) err = wait.PollImmediate(10*time.Second, 3*time.Minute, func() (bool, error) { @@ -1740,7 +1741,7 @@ var _ = ginkgo.Describe("TiDBCluster", func() { ginkgo.It(op, func() { ginkgo.By("Deploy initial tc") tcName := fmt.Sprintf("scale-%s-tikv-concurrently", op) - tc := fixture.GetTidbCluster(ns, tcName, utilimage.TiDBV4Prev) + tc := fixture.GetTidbCluster(ns, tcName, utilimage.TiDBV5Prev) if op == "in" { tc.Spec.TiKV.Replicas = 4 } else { @@ -1750,10 +1751,10 @@ var _ = ginkgo.Describe("TiDBCluster", func() { ginkgo.By("Upgrade TiKV version") err := controller.GuaranteedUpdate(genericCli, tc, func() error { - tc.Spec.TiKV.Version = pointer.StringPtr(utilimage.TiDBV4) + tc.Spec.TiKV.Version = pointer.StringPtr(utilimage.TiDBV5) return nil }) - framework.ExpectNoError(err, "failed to update TiKV version to %q", utilimage.TiDBV4) + framework.ExpectNoError(err, "failed to update TiKV version to %q", utilimage.TiDBV5) ginkgo.By(fmt.Sprintf("Wait for TiKV phase is %q", v1alpha1.UpgradePhase)) err = wait.PollImmediate(10*time.Second, 3*time.Minute, func() (bool, error) { @@ -1819,7 +1820,7 @@ var _ = ginkgo.Describe("TiDBCluster", func() { ginkgo.It("with bad PD config, then recover after force upgrading PD", func() { ginkgo.By("Deploy initial tc with incorrect PD image") - tc := fixture.GetTidbCluster(ns, "force-upgrade-pd", utilimage.TiDBV4Prev) + tc := fixture.GetTidbCluster(ns, "force-upgrade-pd", utilimage.TiDBV5Prev) tc.Spec.PD.BaseImage = "wrong-pd-image" err := genericCli.Create(context.TODO(), tc) framework.ExpectNoError(err, "failed to create TidbCluster %s/%s", ns, tc.Name) @@ -1895,7 +1896,7 @@ var _ = ginkgo.Describe("TiDBCluster", func() { ginkgo.It("Deleted objects controlled by TidbCluster will be recovered by Operator", func() { ginkgo.By("Deploy initial tc") - tc := fixture.GetTidbCluster(ns, "delete-objects", utilimage.TiDBV4) + tc := fixture.GetTidbCluster(ns, "delete-objects", utilimage.TiDBV5) utiltc.MustCreateTCWithComponentsReady(genericCli, oa, tc, 5*time.Minute, 10*time.Second) ginkgo.By("Delete StatefulSet/ConfigMap/Service of PD") @@ -1947,7 +1948,7 @@ var _ = ginkgo.Describe("TiDBCluster", func() { } ginkgo.It(fmt.Sprintf("should work for %s", comp), func() { ginkgo.By("Deploy initial tc") - tc := fixture.GetTidbCluster(ns, fmt.Sprintf("scale-out-scale-in-%s", comp), utilimage.TiDBV4) + tc := fixture.GetTidbCluster(ns, fmt.Sprintf("scale-out-scale-in-%s", comp), utilimage.TiDBV5) switch comp { case v1alpha1.PDMemberType: tc.Spec.PD.Replicas = replicasLarge @@ -2067,7 +2068,7 @@ var _ = ginkgo.Describe("TiDBCluster", func() { } ginkgo.It(fmt.Sprintf("should work for %s", comp), func() { ginkgo.By("Deploy initial tc") - tc := fixture.GetTidbCluster(ns, fmt.Sprintf("scale-in-upgrade-%s", comp), utilimage.TiDBV4Prev) + tc := fixture.GetTidbCluster(ns, fmt.Sprintf("scale-in-upgrade-%s", comp), utilimage.TiDBV5Prev) switch comp { case v1alpha1.PDMemberType: tc.Spec.PD.Replicas = replicasLarge @@ -2099,11 +2100,11 @@ var _ = ginkgo.Describe("TiDBCluster", func() { err = controller.GuaranteedUpdate(genericCli, tc, func() error { switch comp { case v1alpha1.PDMemberType: - tc.Spec.PD.Version = pointer.StringPtr(utilimage.TiDBV4) + tc.Spec.PD.Version = pointer.StringPtr(utilimage.TiDBV5) case v1alpha1.TiKVMemberType: - tc.Spec.TiKV.Version = pointer.StringPtr(utilimage.TiDBV4) + tc.Spec.TiKV.Version = pointer.StringPtr(utilimage.TiDBV5) case v1alpha1.TiDBMemberType: - tc.Spec.TiDB.Version = pointer.StringPtr(utilimage.TiDBV4) + tc.Spec.TiDB.Version = pointer.StringPtr(utilimage.TiDBV5) } return nil }) @@ -2134,13 +2135,13 @@ var _ = ginkgo.Describe("TiDBCluster", func() { wrongImage := true for _, c := range pod.Spec.Containers { log.Logf("container image: %s", c.Image) - if fmt.Sprintf("pingcap/%s:%s", comp, utilimage.TiDBV4) == c.Image { + if fmt.Sprintf("pingcap/%s:%s", comp, utilimage.TiDBV5) == c.Image { wrongImage = false break } } if wrongImage { - log.Failf("%s Pod has wrong image, expected %s", comp, utilimage.TiDBV4) + log.Failf("%s Pod has wrong image, expected %s", comp, utilimage.TiDBV5) } } }) @@ -2149,7 +2150,7 @@ var _ = ginkgo.Describe("TiDBCluster", func() { ginkgo.It("PD to 0 is forbidden while other components are running", func() { ginkgo.By("Deploy initial tc") - tc := fixture.GetTidbCluster(ns, "scale-pd-to-0", utilimage.TiDBV4) + tc := fixture.GetTidbCluster(ns, "scale-pd-to-0", utilimage.TiDBV5) tc.Spec.PD.Replicas = 1 utiltc.MustCreateTCWithComponentsReady(genericCli, oa, tc, 5*time.Minute, 10*time.Second) @@ -2188,7 +2189,7 @@ var _ = ginkgo.Describe("TiDBCluster", func() { ginkgo.It("TiKV from >=3 replicas to <3 should be forbidden", func() { ginkgo.By("Deploy initial tc") - tc := fixture.GetTidbCluster(ns, "scale-in-tikv", utilimage.TiDBV4) + tc := fixture.GetTidbCluster(ns, "scale-in-tikv", utilimage.TiDBV5) tc, err := cli.PingcapV1alpha1().TidbClusters(tc.Namespace).Create(tc) framework.ExpectNoError(err, "Expected create tidbcluster") err = oa.WaitForTidbClusterReady(tc, 30*time.Minute, 5*time.Second) @@ -2223,7 +2224,7 @@ var _ = ginkgo.Describe("TiDBCluster", func() { ginkgo.By("Deploy tidbCluster") userID := int64(1000) groupID := int64(2000) - tc := fixture.GetTidbCluster(ns, "run-as-non-root", utilimage.TiDBV4) + tc := fixture.GetTidbCluster(ns, "run-as-non-root", utilimage.TiDBV5) tc = fixture.AddTiFlashForTidbCluster(tc) tc = fixture.AddTiCDCForTidbCluster(tc) tc = fixture.AddPumpForTidbCluster(tc) @@ -2266,7 +2267,7 @@ var _ = ginkgo.Describe("TiDBCluster", func() { ginkgo.It("TidbCluster global topology spread contraint", func() { ginkgo.By("Deploy tidbCluster") - tc := fixture.GetTidbCluster(ns, "topology-test", utilimage.TiDBV4) + tc := fixture.GetTidbCluster(ns, "topology-test", utilimage.TiDBV5) tc.Spec.TopologySpreadConstraints = []v1alpha1.TopologySpreadConstraint{ { TopologyKey: tests.LabelKeyTestingZone, diff --git a/tests/e2e/util/image/image.go b/tests/e2e/util/image/image.go index 5a927aaa405..b6b0a228955 100644 --- a/tests/e2e/util/image/image.go +++ b/tests/e2e/util/image/image.go @@ -28,8 +28,9 @@ import ( const ( TiDBV3 = "v3.0.20" - TiDBV4Prev = "v4.0.9" - TiDBV4 = "v4.0.10" + TiDBV4 = "v4.0.12" + TiDBV5Prev = "v5.0.0" + TiDBV5 = "v5.0.1" TiDBNightlyVersion = "nightly" PrometheusImage = "prom/prometheus" PrometheusVersion = "v2.18.1" @@ -48,8 +49,9 @@ func ListImages() []string { images := []string{} versions := make([]string, 0) versions = append(versions, TiDBV3) - versions = append(versions, TiDBV4Prev) versions = append(versions, TiDBV4) + versions = append(versions, TiDBV5Prev) + versions = append(versions, TiDBV5) versions = append(versions, TiDBNightlyVersion) for _, v := range versions { images = append(images, fmt.Sprintf("pingcap/pd:%s", v))