Skip to content

Commit

Permalink
Update the main TiDB version to v5.0 in e2e (#3934)
Browse files Browse the repository at this point in the history
  • Loading branch information
handlerww authored May 13, 2021
1 parent d90f424 commit e405d4d
Show file tree
Hide file tree
Showing 10 changed files with 112 additions and 109 deletions.
2 changes: 1 addition & 1 deletion ci/pull_e2e_kind.groovy
Original file line number Diff line number Diff line change
Expand Up @@ -181,7 +181,7 @@ def build(String name, String code, Map resources = e2ePodResources) {
unstash 'tidb-operator'
stage("Debug Info") {
println "debug host: 172.16.5.15"
println "debug command: kubectl -n jenkins-ci exec -ti ${NODE_NAME} bash"
println "debug command: kubectl -n jenkins-tidb exec -ti ${NODE_NAME} bash"
sh """
echo "====== shell env ======"
echo "pwd: \$(pwd)"
Expand Down
2 changes: 1 addition & 1 deletion ci/pull_e2e_release.groovy
Original file line number Diff line number Diff line change
Expand Up @@ -177,7 +177,7 @@ def build(String name, String code, Map resources = e2ePodResources) {
unstash 'tidb-operator'
stage("Debug Info") {
println "debug host: 172.16.5.15"
println "debug command: kubectl -n jenkins-ci exec -ti ${NODE_NAME} bash"
println "debug command: kubectl -n jenkins-tidb exec -ti ${NODE_NAME} bash"
sh """
echo "====== shell env ======"
echo "pwd: \$(pwd)"
Expand Down
2 changes: 1 addition & 1 deletion pkg/manager/member/utils.go
Original file line number Diff line number Diff line change
Expand Up @@ -520,7 +520,7 @@ func addDeferDeletingAnnoToPVC(tc *v1alpha1.TidbCluster, pvc *corev1.PersistentV
klog.Errorf("failed to set PVC %s/%s annotation %q to %q", tc.Namespace, pvc.Name, label.AnnPVCDeferDeleting, now)
return err
}
klog.Infof("set PVC %s/%s annotationq %q to %q successfully", tc.Namespace, pvc.Name, label.AnnPVCDeferDeleting, now)
klog.Infof("set PVC %s/%s annotation %q to %q successfully", tc.Namespace, pvc.Name, label.AnnPVCDeferDeleting, now)
return nil
}

Expand Down
2 changes: 1 addition & 1 deletion tests/actions.go
Original file line number Diff line number Diff line change
Expand Up @@ -649,7 +649,7 @@ func (oa *OperatorActions) DeployDMTiDBOrDie() {
slack.NotifyAndPanic(err)
}

tc := fixture.GetTidbCluster(DMTiDBNamespace, DMTiDBName, utilimage.TiDBV4)
tc := fixture.GetTidbCluster(DMTiDBNamespace, DMTiDBName, utilimage.TiDBV5)
tc.Spec.PD.Replicas = 1
tc.Spec.TiKV.Replicas = 1
tc.Spec.TiDB.Replicas = 1
Expand Down
64 changes: 32 additions & 32 deletions tests/e2e/tidbcluster/serial.go
Original file line number Diff line number Diff line change
Expand Up @@ -147,11 +147,11 @@ var _ = ginkgo.Describe("[Serial]", func() {
oa.CleanCRDOrDie()
})

ginkgo.It(fmt.Sprintf("should be able to upgrade TiDB Cluster from %s to %s", utilimage.TiDBV4Prev, utilimage.TiDBV4), func() {
ginkgo.It(fmt.Sprintf("should be able to upgrade TiDB Cluster from %s to %s", utilimage.TiDBV5Prev, utilimage.TiDBV5), func() {
log.Logf("start to upgrade tidbcluster with pod admission webhook")
// deploy new cluster and test upgrade and scale-in/out with pod admission webhook
ginkgo.By(fmt.Sprintf("start initial TidbCluster %q", utilimage.TiDBV4Prev))
tc := fixture.GetTidbCluster(ns, "admission", utilimage.TiDBV4Prev)
ginkgo.By(fmt.Sprintf("start initial TidbCluster %q", utilimage.TiDBV5Prev))
tc := fixture.GetTidbCluster(ns, "admission", utilimage.TiDBV5Prev)
tc.Spec.PD.Replicas = 3
tc.Spec.TiKV.Replicas = 3
tc.Spec.TiDB.Replicas = 2
Expand All @@ -164,25 +164,25 @@ var _ = ginkgo.Describe("[Serial]", func() {
err = setPartitionAnnotation(ns, tc.Name, label.TiKVLabelVal, 1)
framework.ExpectNoError(err, "set tikv Partition annotation failed")

ginkgo.By(fmt.Sprintf("Upgrade TidbCluster version to %q", utilimage.TiDBV4))
ginkgo.By(fmt.Sprintf("Upgrade TidbCluster version to %q", utilimage.TiDBV5))
err = controller.GuaranteedUpdate(genericCli, tc, func() error {
tc.Spec.Version = utilimage.TiDBV4
tc.Spec.Version = utilimage.TiDBV5
return nil
})
framework.ExpectNoError(err, "failed to update TidbCluster to upgrade tidb version to %v", utilimage.TiDBV4)
framework.ExpectNoError(err, "failed to update TidbCluster to upgrade tidb version to %v", utilimage.TiDBV5)

ginkgo.By(fmt.Sprintf("wait for tikv-1 pod upgrading to %q", utilimage.TiDBV4))
ginkgo.By(fmt.Sprintf("wait for tikv-1 pod upgrading to %q", utilimage.TiDBV5))
err = wait.Poll(5*time.Second, 10*time.Minute, func() (done bool, err error) {
tikvPod, err := c.CoreV1().Pods(ns).Get(fmt.Sprintf("%s-tikv-1", tc.Name), metav1.GetOptions{})
if err != nil {
return false, nil
}
if tikvPod.Spec.Containers[0].Image != fmt.Sprintf("pingcap/tikv:%s", utilimage.TiDBV4) {
if tikvPod.Spec.Containers[0].Image != fmt.Sprintf("pingcap/tikv:%s", utilimage.TiDBV5) {
return false, nil
}
return true, nil
})
framework.ExpectNoError(err, "failed to upgrade tikv-1 to %q", utilimage.TiDBV4)
framework.ExpectNoError(err, "failed to upgrade tikv-1 to %q", utilimage.TiDBV5)

ginkgo.By("Wait to see if tikv sts partition annotation remains 1 for 3 min")
// TODO: explain the purpose of this testing
Expand Down Expand Up @@ -260,13 +260,13 @@ var _ = ginkgo.Describe("[Serial]", func() {
TiDB: &v1alpha1.TiDBSpec{
Replicas: 1,
ComponentSpec: v1alpha1.ComponentSpec{
Image: fmt.Sprintf("pingcap/tidb:%s", utilimage.TiDBV4Prev),
Image: fmt.Sprintf("pingcap/tidb:%s", utilimage.TiDBV5Prev),
},
},
TiKV: &v1alpha1.TiKVSpec{
Replicas: 1,
ComponentSpec: v1alpha1.ComponentSpec{
Image: fmt.Sprintf("pingcap/tikv:%s", utilimage.TiDBV4Prev),
Image: fmt.Sprintf("pingcap/tikv:%s", utilimage.TiDBV5Prev),
},
ResourceRequirements: v1.ResourceRequirements{
Requests: v1.ResourceList{
Expand All @@ -277,7 +277,7 @@ var _ = ginkgo.Describe("[Serial]", func() {
PD: &v1alpha1.PDSpec{
Replicas: 1,
ComponentSpec: v1alpha1.ComponentSpec{
Image: fmt.Sprintf("pingcap/pd:%s", utilimage.TiDBV4Prev),
Image: fmt.Sprintf("pingcap/pd:%s", utilimage.TiDBV5Prev),
},
ResourceRequirements: v1.ResourceRequirements{
Requests: v1.ResourceList{
Expand All @@ -297,7 +297,7 @@ var _ = ginkgo.Describe("[Serial]", func() {
oa.UpgradeOperatorOrDie(ocfg)
// now the webhook enabled
err = controller.GuaranteedUpdate(genericCli, legacyTc, func() error {
legacyTc.Spec.TiDB.Image = fmt.Sprintf("pingcap/tidb:%s", utilimage.TiDBV4)
legacyTc.Spec.TiDB.Image = fmt.Sprintf("pingcap/tidb:%s", utilimage.TiDBV5)
return nil
})
framework.ExpectNoError(err, "Update legacy TidbCluster should not be influenced by validating")
Expand All @@ -308,7 +308,7 @@ var _ = ginkgo.Describe("[Serial]", func() {
legacyTc.Spec.TiDB.BaseImage = "pingcap/tidb"
legacyTc.Spec.TiKV.BaseImage = "pingcap/tikv"
legacyTc.Spec.PD.BaseImage = "pingcap/pd"
legacyTc.Spec.PD.Version = pointer.StringPtr(utilimage.TiDBV4)
legacyTc.Spec.PD.Version = pointer.StringPtr(utilimage.TiDBV5)
return nil
})
framework.ExpectNoError(err, "failed to update TidbCluster")
Expand All @@ -331,12 +331,12 @@ var _ = ginkgo.Describe("[Serial]", func() {
Spec: v1alpha1.TidbClusterSpec{
TiDB: &v1alpha1.TiDBSpec{
ComponentSpec: v1alpha1.ComponentSpec{
Image: fmt.Sprintf("pingcap/tidb:%s", utilimage.TiDBV4),
Image: fmt.Sprintf("pingcap/tidb:%s", utilimage.TiDBV5),
},
},
TiKV: &v1alpha1.TiKVSpec{
ComponentSpec: v1alpha1.ComponentSpec{
Image: fmt.Sprintf("pingcap/tikv:%s", utilimage.TiDBV4),
Image: fmt.Sprintf("pingcap/tikv:%s", utilimage.TiDBV5),
},
ResourceRequirements: v1.ResourceRequirements{
Requests: v1.ResourceList{
Expand All @@ -346,7 +346,7 @@ var _ = ginkgo.Describe("[Serial]", func() {
},
PD: &v1alpha1.PDSpec{
ComponentSpec: v1alpha1.ComponentSpec{
Image: fmt.Sprintf("pingcap/pd:%s", utilimage.TiDBV4),
Image: fmt.Sprintf("pingcap/pd:%s", utilimage.TiDBV5),
},
ResourceRequirements: v1.ResourceRequirements{
Requests: v1.ResourceList{
Expand All @@ -366,7 +366,7 @@ var _ = ginkgo.Describe("[Serial]", func() {
Name: "newly-created",
},
Spec: v1alpha1.TidbClusterSpec{
Version: utilimage.TiDBV4,
Version: utilimage.TiDBV5,
TiDB: &v1alpha1.TiDBSpec{
Replicas: 1,
},
Expand Down Expand Up @@ -446,9 +446,9 @@ var _ = ginkgo.Describe("[Serial]", func() {
})

ginkgo.It("should not change old TidbCluster", func() {
ginkgo.By(fmt.Sprintf("deploy original tc %q", utilimage.TiDBV4))
ginkgo.By(fmt.Sprintf("deploy original tc %q", utilimage.TiDBV5))
tcName := "tidbcluster"
tc := fixture.GetTidbCluster(ns, tcName, utilimage.TiDBV4)
tc := fixture.GetTidbCluster(ns, tcName, utilimage.TiDBV5)
tc.Spec.PD.Replicas = 3
tc.Spec.TiKV.Replicas = 1
tc.Spec.TiDB.Replicas = 1
Expand Down Expand Up @@ -517,7 +517,7 @@ var _ = ginkgo.Describe("[Serial]", func() {
ginkgo.It("should migrate tidbmonitor from deployment to sts", func() {
ginkgo.By("deploy initial tc")
tcName := "smooth-tidbcluster"
tc := fixture.GetTidbCluster(ns, tcName, utilimage.TiDBV4)
tc := fixture.GetTidbCluster(ns, tcName, utilimage.TiDBV5)
tc.Spec.PD.Replicas = 1
tc.Spec.TiKV.Replicas = 1
tc.Spec.TiDB.Replicas = 1
Expand Down Expand Up @@ -607,9 +607,9 @@ var _ = ginkgo.Describe("[Serial]", func() {
})

ginkgo.It("Deploy TidbCluster and check the result", func() {
ginkgo.By(fmt.Sprintf("deploy original tc %q", utilimage.TiDBV4Prev))
ginkgo.By(fmt.Sprintf("deploy original tc %q", utilimage.TiDBV5Prev))
tcName := "tidbcluster1"
tc := fixture.GetTidbCluster(ns, tcName, utilimage.TiDBV4Prev)
tc := fixture.GetTidbCluster(ns, tcName, utilimage.TiDBV5Prev)
tc.Spec.PD.Replicas = 1
tc.Spec.TiKV.Replicas = 1
tc.Spec.TiDB.Replicas = 1
Expand All @@ -629,10 +629,10 @@ var _ = ginkgo.Describe("[Serial]", func() {

ginkgo.By("Upgrade TidbCluster 1 version, wait for 2 minutes, check that no rolling update occurs")
err = controller.GuaranteedUpdate(genericCli, tc, func() error {
tc.Spec.Version = utilimage.TiDBV4
tc.Spec.Version = utilimage.TiDBV5
return nil
})
framework.ExpectNoError(err, "failed to update TidbCluster 1 to upgrade PD version to %v", utilimage.TiDBV4)
framework.ExpectNoError(err, "failed to update TidbCluster 1 to upgrade PD version to %v", utilimage.TiDBV5)

err = wait.Poll(5*time.Second, 2*time.Minute, func() (done bool, err error) {
// confirm the TidbCluster 1 PD haven't been changed
Expand Down Expand Up @@ -675,7 +675,7 @@ var _ = ginkgo.Describe("[Serial]", func() {

ginkgo.By("Deploy TidbCluster 2 with label version=new")
tc2Name := "tidbcluster2"
tc2 := fixture.GetTidbCluster(ns, tc2Name, utilimage.TiDBV4Prev)
tc2 := fixture.GetTidbCluster(ns, tc2Name, utilimage.TiDBV5Prev)
tc2.Spec.PD.Replicas = 1
tc2.Spec.TiKV.Replicas = 1
tc2.Spec.TiDB.Replicas = 1
Expand Down Expand Up @@ -726,27 +726,27 @@ var _ = ginkgo.Describe("[Serial]", func() {

ginkgo.By("Upgrade TiDB version of TidbCluster 2")
err = controller.GuaranteedUpdate(genericCli, tc2, func() error {
tc2.Spec.Version = utilimage.TiDBV4
tc2.Spec.Version = utilimage.TiDBV5
return nil
})
framework.ExpectNoError(err, "failed to update TidbCluster 2 to upgrade tidb version to %v", utilimage.TiDBV4)
framework.ExpectNoError(err, "failed to update TidbCluster 2 to upgrade tidb version to %v", utilimage.TiDBV5)
log.Logf("Finished upgrading TidbCluster 2")

err = oa.WaitForTidbClusterReady(tc2, 10*time.Minute, 10*time.Second)
framework.ExpectNoError(err, "failed to wait for TidbCluster %s/%s components ready", ns, tc2.Name)

ginkgo.By(fmt.Sprintf("wait for TidbCluster 2 pd-0 pod upgrading to %q", utilimage.TiDBV4))
ginkgo.By(fmt.Sprintf("wait for TidbCluster 2 pd-0 pod upgrading to %q", utilimage.TiDBV5))
err = wait.Poll(5*time.Second, 10*time.Minute, func() (done bool, err error) {
pdPod, err := c.CoreV1().Pods(ns).Get(fmt.Sprintf("%s-pd-0", tc2.Name), metav1.GetOptions{})
if err != nil {
return false, nil
}
if pdPod.Spec.Containers[0].Image != fmt.Sprintf("pingcap/pd:%s", utilimage.TiDBV4) {
if pdPod.Spec.Containers[0].Image != fmt.Sprintf("pingcap/pd:%s", utilimage.TiDBV5) {
return false, nil
}
return true, nil
})
framework.ExpectNoError(err, "failed to upgrade TidbCluster 2 pd-0 to %q", utilimage.TiDBV4)
framework.ExpectNoError(err, "failed to upgrade TidbCluster 2 pd-0 to %q", utilimage.TiDBV5)
log.Logf("Finished upgrading TidbCluster 2")

ginkgo.By("Deploy the default TiDB Operator with --selector=version=old")
Expand Down Expand Up @@ -818,7 +818,7 @@ var _ = ginkgo.Describe("[Serial]", func() {
framework.Skipf("duplicated test")
tcName := "basic"

tc := fixture.GetTidbCluster(ns, tcName, utilimage.TiDBV4)
tc := fixture.GetTidbCluster(ns, tcName, utilimage.TiDBV5)
tc.Spec.PD.Replicas = 1
tc.Spec.TiKV.Replicas = 1
tc.Spec.TiDB.Replicas = 1
Expand Down
8 changes: 4 additions & 4 deletions tests/e2e/tidbcluster/stability-asts.go
Original file line number Diff line number Diff line change
Expand Up @@ -141,7 +141,7 @@ var _ = ginkgo.Describe("[Stability]", func() {

ginkgo.It("Scaling tidb cluster with advanced statefulset", func() {
clusterName := "scaling-with-asts"
tc := fixture.GetTidbCluster(ns, clusterName, utilimage.TiDBV4)
tc := fixture.GetTidbCluster(ns, clusterName, utilimage.TiDBV5)
tc = fixture.AddTiFlashForTidbCluster(tc)
tc.Spec.PD.Replicas = 3
tc.Spec.TiKV.Replicas = 5
Expand Down Expand Up @@ -346,7 +346,7 @@ var _ = ginkgo.Describe("[Stability]", func() {
oa.CleanCRDOrDie()
}()

tc := fixture.GetTidbCluster(ns, "sts", utilimage.TiDBV4)
tc := fixture.GetTidbCluster(ns, "sts", utilimage.TiDBV5)
err = genericCli.Create(context.TODO(), tc)
framework.ExpectNoError(err, "failed to create TidbCluster: %v", tc)
err = oa.WaitForTidbClusterReady(tc, 30*time.Minute, 15*time.Second)
Expand Down Expand Up @@ -438,7 +438,7 @@ var _ = ginkgo.Describe("[Stability]", func() {
oa.CleanCRDOrDie()
}()

tc := fixture.GetTidbCluster(ns, "upgrade-cluster", utilimage.TiDBV4Prev)
tc := fixture.GetTidbCluster(ns, "upgrade-cluster", utilimage.TiDBV5Prev)
tc.Spec.PD.Replicas = 5
tc.Spec.TiKV.Replicas = 4
tc.Spec.TiDB.Replicas = 3
Expand Down Expand Up @@ -469,7 +469,7 @@ var _ = ginkgo.Describe("[Stability]", func() {

ginkgo.By("Upgrading the cluster")
err = controller.GuaranteedUpdate(genericCli, tc, func() error {
tc.Spec.Version = utilimage.TiDBV4
tc.Spec.Version = utilimage.TiDBV5
return nil
})
framework.ExpectNoError(err, "failed to upgrade TidbCluster %s/%s", ns, tc.Name)
Expand Down
8 changes: 4 additions & 4 deletions tests/e2e/tidbcluster/stability-br.go
Original file line number Diff line number Diff line change
Expand Up @@ -196,7 +196,7 @@ func testBR(provider, ns string, fw portforward.PortForward, c clientset.Interfa
}

// create backup cluster
tcFrom := fixture.GetTidbCluster(ns, tcNameFrom, utilimage.TiDBV4)
tcFrom := fixture.GetTidbCluster(ns, tcNameFrom, utilimage.TiDBV5)
tcFrom.Spec.PD.Replicas = 1
tcFrom.Spec.TiKV.Replicas = 1
tcFrom.Spec.TiDB.Replicas = 1
Expand All @@ -207,7 +207,7 @@ func testBR(provider, ns string, fw portforward.PortForward, c clientset.Interfa
framework.ExpectNoError(err, "failed to create TidbCluster tcFrom: %v", tcFrom)

// create restore cluster
tcTo := fixture.GetTidbCluster(ns, tcNameTo, utilimage.TiDBV4)
tcTo := fixture.GetTidbCluster(ns, tcNameTo, utilimage.TiDBV5)
tcTo.Spec.PD.Replicas = 1
tcTo.Spec.TiKV.Replicas = 1
tcTo.Spec.TiDB.Replicas = 1
Expand All @@ -220,11 +220,11 @@ func testBR(provider, ns string, fw portforward.PortForward, c clientset.Interfa
// wait both tidbcluster ready
err = oa.WaitForTidbClusterReady(tcFrom, 30*time.Minute, 15*time.Second)
framework.ExpectNoError(err, "failed to wait for TidbCluster tcFrom ready")
clusterFrom := newTidbClusterConfig(e2econfig.TestConfig, ns, tcNameFrom, "", utilimage.TiDBV4)
clusterFrom := newTidbClusterConfig(e2econfig.TestConfig, ns, tcNameFrom, "", utilimage.TiDBV5)

err = oa.WaitForTidbClusterReady(tcTo, 30*time.Minute, 15*time.Second)
framework.ExpectNoError(err, "failed to wait for TidbCluster tcTo ready")
clusterTo := newTidbClusterConfig(e2econfig.TestConfig, ns, tcNameTo, "", utilimage.TiDBV4)
clusterTo := newTidbClusterConfig(e2econfig.TestConfig, ns, tcNameTo, "", utilimage.TiDBV5)

// import some data to sql with blockwriter
ginkgo.By(fmt.Sprintf("Begin inserting data into cluster %q", clusterFrom.ClusterName))
Expand Down
16 changes: 8 additions & 8 deletions tests/e2e/tidbcluster/stability.go
Original file line number Diff line number Diff line change
Expand Up @@ -161,7 +161,7 @@ var _ = ginkgo.Describe("[Stability]", func() {
for _, test := range testCases {
ginkgo.It("tidb cluster should not be affected while "+test.name, func() {
clusterName := "test"
tc := fixture.GetTidbCluster(ns, clusterName, utilimage.TiDBV4)
tc := fixture.GetTidbCluster(ns, clusterName, utilimage.TiDBV5)
utiltc.MustCreateTCWithComponentsReady(genericCli, oa, tc, 30*time.Minute, 15*time.Second)

test.fn()
Expand Down Expand Up @@ -287,7 +287,7 @@ var _ = ginkgo.Describe("[Stability]", func() {

ginkgo.By("Deploy a test cluster with 3 pd and tikv replicas")
clusterName := "test"
tc := fixture.GetTidbCluster(ns, clusterName, utilimage.TiDBV4)
tc := fixture.GetTidbCluster(ns, clusterName, utilimage.TiDBV5)
tc.Spec.PD.Replicas = 3
tc.Spec.PD.MaxFailoverCount = pointer.Int32Ptr(0)
tc.Spec.TiDB.Replicas = 1
Expand Down Expand Up @@ -501,7 +501,7 @@ var _ = ginkgo.Describe("[Stability]", func() {
// See docs/design-proposals/tidb-stable-scheduling.md
ginkgo.It("[Feature: StableScheduling] TiDB pods should be scheduled to preivous nodes", func() {
clusterName := "tidb-scheduling"
tc := fixture.GetTidbCluster(ns, clusterName, utilimage.TiDBV4)
tc := fixture.GetTidbCluster(ns, clusterName, utilimage.TiDBV5)
tc.Spec.PD.Replicas = 1
tc.Spec.TiKV.Replicas = 1
tc.Spec.TiDB.Replicas = 3
Expand Down Expand Up @@ -607,7 +607,7 @@ var _ = ginkgo.Describe("[Stability]", func() {
defer utilcloud.EnableNodeAutoRepair()
utilcloud.DisableNodeAutoRepair()
clusterName := "failover"
tc := fixture.GetTidbCluster(ns, clusterName, utilimage.TiDBV4)
tc := fixture.GetTidbCluster(ns, clusterName, utilimage.TiDBV5)
tc.Spec.PD.Replicas = 3
tc.Spec.TiKV.Replicas = 1
tc.Spec.TiDB.Replicas = 1
Expand Down Expand Up @@ -695,7 +695,7 @@ var _ = ginkgo.Describe("[Stability]", func() {
gomega.Expect(len(nodeList.Items)).To(gomega.BeNumerically(">=", 3))

clusterName := "failover"
tc := fixture.GetTidbCluster(ns, clusterName, utilimage.TiDBV4)
tc := fixture.GetTidbCluster(ns, clusterName, utilimage.TiDBV5)
tc.Spec.PD.Replicas = 1
tc.Spec.TiKV.Replicas = 1
tc.Spec.TiDB.Replicas = 2
Expand Down Expand Up @@ -862,7 +862,7 @@ var _ = ginkgo.Describe("[Stability]", func() {
// TODO: this should be a regression type
ginkgo.It("[Feature: AutoFailover] Failover can work if a store fails to update", func() {
clusterName := "scale"
tc := fixture.GetTidbCluster(ns, clusterName, utilimage.TiDBV4)
tc := fixture.GetTidbCluster(ns, clusterName, utilimage.TiDBV5)
tc.Spec.PD.Replicas = 1
// By default, PD set the state of disconnected store to Down
// after 30 minutes. Use a short time in testing.
Expand Down Expand Up @@ -922,7 +922,7 @@ var _ = ginkgo.Describe("[Stability]", func() {
// TODO: this should be a regression type
ginkgo.It("[Feature: AutoFailover] Failover can work if a pd fails to update", func() {
clusterName := "scale"
tc := fixture.GetTidbCluster(ns, clusterName, utilimage.TiDBV4)
tc := fixture.GetTidbCluster(ns, clusterName, utilimage.TiDBV5)
tc.Spec.PD.Replicas = 3
tc.Spec.TiKV.Replicas = 1
tc.Spec.TiDB.Replicas = 1
Expand Down Expand Up @@ -1019,7 +1019,7 @@ var _ = ginkgo.Describe("[Stability]", func() {
gomega.Expect(len(nodeList.Items)).To(gomega.BeNumerically(">=", 3))

clusterName := "failover"
tc := fixture.GetTidbCluster(ns, clusterName, utilimage.TiDBV4)
tc := fixture.GetTidbCluster(ns, clusterName, utilimage.TiDBV5)
tc.Spec.SchedulerName = ""
tc.Spec.PD.Replicas = 1
tc.Spec.PD.Config.Set("schedule.max-store-down-time", "1m")
Expand Down
Loading

0 comments on commit e405d4d

Please sign in to comment.