From 88d9fa312fdc6fcca4fed678c2c6690b8ef4d9f6 Mon Sep 17 00:00:00 2001 From: glorv Date: Wed, 21 Feb 2024 17:57:54 +0800 Subject: [PATCH 1/9] adjust the recommend value of raft election-timeout in multi dc deployment --- config-templates/geo-redundancy-deployment.yaml | 4 ++-- dr-multi-replica.md | 4 ++-- geo-distributed-deployment-topology.md | 4 ++-- three-data-centers-in-two-cities-deployment.md | 13 +++++++++---- 4 files changed, 15 insertions(+), 10 deletions(-) diff --git a/config-templates/geo-redundancy-deployment.yaml b/config-templates/geo-redundancy-deployment.yaml index 74ad7ecddca3a..4f839c7752cc8 100644 --- a/config-templates/geo-redundancy-deployment.yaml +++ b/config-templates/geo-redundancy-deployment.yaml @@ -107,8 +107,8 @@ tikv_servers: host: host1 readpool.storage.use-unified-pool: true readpool.storage.low-concurrency: 10 - raftstore.raft-min-election-timeout-ticks: 1000 - raftstore.raft-max-election-timeout-ticks: 1020 + raftstore.raft-min-election-timeout-ticks: 50 + raftstore.raft-max-election-timeout-ticks: 60 monitoring_servers: - host: 10.0.1.16 grafana_servers: diff --git a/dr-multi-replica.md b/dr-multi-replica.md index fb8d2ed065cd5..fa85092ede9fe 100644 --- a/dr-multi-replica.md +++ b/dr-multi-replica.md @@ -74,8 +74,8 @@ In this example, TiDB contains five replicas and three regions. Region 1 is the config: server.labels: { Region: "Region3", AZ: "AZ5" } - raftstore.raft-min-election-timeout-ticks: 1000 - raftstore.raft-max-election-timeout-ticks: 1200 + raftstore.raft-min-election-timeout-ticks: 50 + raftstore.raft-max-election-timeout-ticks: 60 monitoring_servers: - host: tidb-dr-test2 diff --git a/geo-distributed-deployment-topology.md b/geo-distributed-deployment-topology.md index 230dff217355e..34af0369113ae 100644 --- a/geo-distributed-deployment-topology.md +++ b/geo-distributed-deployment-topology.md @@ -57,8 +57,8 @@ This section describes the key parameter configuration of the TiDB geo-distribut - To prevent remote TiKV nodes from launching unnecessary Raft elections, it is required to increase the minimum and maximum number of ticks that the remote TiKV nodes need to launch an election. The two parameters are set to `0` by default. ```yaml - raftstore.raft-min-election-timeout-ticks: 1000 - raftstore.raft-max-election-timeout-ticks: 1020 + raftstore.raft-min-election-timeout-ticks: 50 + raftstore.raft-max-election-timeout-ticks: 60 ``` #### PD parameters diff --git a/three-data-centers-in-two-cities-deployment.md b/three-data-centers-in-two-cities-deployment.md index 27352220f4174..24d2be8893697 100644 --- a/three-data-centers-in-two-cities-deployment.md +++ b/three-data-centers-in-two-cities-deployment.md @@ -114,8 +114,8 @@ tikv_servers: - host: 10.63.10.34 config: server.labels: { az: "3", replication zone: "5", rack: "5", host: "34" } - raftstore.raft-min-election-timeout-ticks: 1000 - raftstore.raft-max-election-timeout-ticks: 1200 + raftstore.raft-min-election-timeout-ticks: 50 + raftstore.raft-max-election-timeout-ticks: 60 monitoring_servers: - host: 10.63.10.60 @@ -175,10 +175,15 @@ In the deployment of three AZs in two regions, to optimize performance, you need - Optimize the network configuration of the TiKV node in another region (San Francisco). Modify the following TiKV parameters for AZ3 in San Francisco and try to prevent the replica in this TiKV node from participating in the Raft election. ```yaml - raftstore.raft-min-election-timeout-ticks: 1000 - raftstore.raft-max-election-timeout-ticks: 1200 + raftstore.raft-min-election-timeout-ticks: 50 + raftstore.raft-max-election-timeout-ticks: 60 ``` +> Note: +> +> Setting larger values for `raftstore.raft-min-election-timeout-ticks` and `raftstore.raft-max-election-timeout-ticks` can significantly decrease the likelihood of peers on the target TiKV instance becoming region leaders. However, in a disaster scenario where some TiKV instances are offline and the peers on other instances' Raft logs have fallen behind, it is possible that only the peer on the affected TiKV instance can become a region leader. This peer requires at least raftstore.raft-min-election-timeout-ticks seconds to start a campaign. Therefore, users should avoid setting excessively high configuration values to prevent impacting the cluster's availability in this extreme scenario. + + - Configure scheduling. After the cluster is enabled, use the `tiup ctl:v pd` tool to modify the scheduling policy. Modify the number of TiKV Raft replicas. Configure this number as planned. In this example, the number of replicas is five. ```bash From ca524aa6555cfbdf8c0416f52363a271b5e2584b Mon Sep 17 00:00:00 2001 From: xixirangrang Date: Thu, 22 Feb 2024 10:52:01 +0800 Subject: [PATCH 2/9] Apply suggestions from code review --- geo-distributed-deployment-topology.md | 4 +++- three-data-centers-in-two-cities-deployment.md | 5 ++--- 2 files changed, 5 insertions(+), 4 deletions(-) diff --git a/geo-distributed-deployment-topology.md b/geo-distributed-deployment-topology.md index 34af0369113ae..596b736189e28 100644 --- a/geo-distributed-deployment-topology.md +++ b/geo-distributed-deployment-topology.md @@ -60,7 +60,9 @@ This section describes the key parameter configuration of the TiDB geo-distribut raftstore.raft-min-election-timeout-ticks: 50 raftstore.raft-max-election-timeout-ticks: 60 ``` - +> **Note:** +> +> Configuring larger values for `raftstore.raft-min-election-timeout-ticks` and `raftstore.raft-max-election-timeout-ticks` can significantly decrease the likelihood of a peer on the TiKV becoming the leader. However, in a disaster scenario where some TiKV nodes are offline and the active TiKV node lags behind in Raft logs, only the Region on this specific TiKV can become the leader. In the event that the leader becomes unavailable, the Region on this TiKV node must wait for at least the duration set by `raftstore.raft-min-election-timeout-ticks' to become a new leader. It is advisable not to set these values excessively large to prevent potential impact on the cluster availability in such scenarios. #### PD parameters - The PD metadata information records the topology of the TiKV cluster. PD schedules the Raft Group replicas on the following four dimensions: diff --git a/three-data-centers-in-two-cities-deployment.md b/three-data-centers-in-two-cities-deployment.md index 24d2be8893697..b18008cc8f323 100644 --- a/three-data-centers-in-two-cities-deployment.md +++ b/three-data-centers-in-two-cities-deployment.md @@ -179,10 +179,9 @@ In the deployment of three AZs in two regions, to optimize performance, you need raftstore.raft-max-election-timeout-ticks: 60 ``` -> Note: +> **Note:** > -> Setting larger values for `raftstore.raft-min-election-timeout-ticks` and `raftstore.raft-max-election-timeout-ticks` can significantly decrease the likelihood of peers on the target TiKV instance becoming region leaders. However, in a disaster scenario where some TiKV instances are offline and the peers on other instances' Raft logs have fallen behind, it is possible that only the peer on the affected TiKV instance can become a region leader. This peer requires at least raftstore.raft-min-election-timeout-ticks seconds to start a campaign. Therefore, users should avoid setting excessively high configuration values to prevent impacting the cluster's availability in this extreme scenario. - +> Configuring larger values for `raftstore.raft-min-election-timeout-ticks` and `raftstore.raft-max-election-timeout-ticks` can significantly decrease the likelihood of a peer on the TiKV becoming the leader. However, in a disaster scenario where some TiKV nodes are offline and the active TiKV node lags behind in Raft logs, only the Region on this specific TiKV can become the leader. In the event that the leader becomes unavailable, the Region on this TiKV node must wait for at least the duration set by `raftstore.raft-min-election-timeout-ticks' to become a new leader. It is advisable not to set these values excessively large to prevent potential impact on the cluster availability in such scenarios. - Configure scheduling. After the cluster is enabled, use the `tiup ctl:v pd` tool to modify the scheduling policy. Modify the number of TiKV Raft replicas. Configure this number as planned. In this example, the number of replicas is five. From 797a395cc0bd79438e9a63a9929f81017c77772a Mon Sep 17 00:00:00 2001 From: xixirangrang Date: Thu, 22 Feb 2024 15:19:55 +0800 Subject: [PATCH 3/9] Apply suggestions from code review --- geo-distributed-deployment-topology.md | 2 +- three-data-centers-in-two-cities-deployment.md | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/geo-distributed-deployment-topology.md b/geo-distributed-deployment-topology.md index 596b736189e28..53886958bdcc2 100644 --- a/geo-distributed-deployment-topology.md +++ b/geo-distributed-deployment-topology.md @@ -62,7 +62,7 @@ This section describes the key parameter configuration of the TiDB geo-distribut ``` > **Note:** > -> Configuring larger values for `raftstore.raft-min-election-timeout-ticks` and `raftstore.raft-max-election-timeout-ticks` can significantly decrease the likelihood of a peer on the TiKV becoming the leader. However, in a disaster scenario where some TiKV nodes are offline and the active TiKV node lags behind in Raft logs, only the Region on this specific TiKV can become the leader. In the event that the leader becomes unavailable, the Region on this TiKV node must wait for at least the duration set by `raftstore.raft-min-election-timeout-ticks' to become a new leader. It is advisable not to set these values excessively large to prevent potential impact on the cluster availability in such scenarios. +> Using `raftstore.raft-min-election-timeout-ticks` and `raftstore.raft-max-election-timeout-ticks` to configure larger tick values for a TiKV node can significantly decrease the likelihood of a peer on the TiKV node becoming the leader. However, in a disaster scenario where some TiKV nodes are offline and the active TiKV node lags behind in Raft logs, only the Region on this specific TiKV node with large tick values can become the leader. In the event that the leader becomes unavailable, the Region on this TiKV node must wait for at least the duration set by `raftstore.raft-min-election-timeout-ticks' to become a new leader. It is advisable not to set these values excessively large to prevent potential impact on the cluster availability in such scenarios. #### PD parameters - The PD metadata information records the topology of the TiKV cluster. PD schedules the Raft Group replicas on the following four dimensions: diff --git a/three-data-centers-in-two-cities-deployment.md b/three-data-centers-in-two-cities-deployment.md index b18008cc8f323..5ad312ecd9f7c 100644 --- a/three-data-centers-in-two-cities-deployment.md +++ b/three-data-centers-in-two-cities-deployment.md @@ -181,7 +181,7 @@ In the deployment of three AZs in two regions, to optimize performance, you need > **Note:** > -> Configuring larger values for `raftstore.raft-min-election-timeout-ticks` and `raftstore.raft-max-election-timeout-ticks` can significantly decrease the likelihood of a peer on the TiKV becoming the leader. However, in a disaster scenario where some TiKV nodes are offline and the active TiKV node lags behind in Raft logs, only the Region on this specific TiKV can become the leader. In the event that the leader becomes unavailable, the Region on this TiKV node must wait for at least the duration set by `raftstore.raft-min-election-timeout-ticks' to become a new leader. It is advisable not to set these values excessively large to prevent potential impact on the cluster availability in such scenarios. +> Using `raftstore.raft-min-election-timeout-ticks` and `raftstore.raft-max-election-timeout-ticks` to configure larger tick values for a TiKV node can significantly decrease the likelihood of a peer on the TiKV node becoming the leader. However, in a disaster scenario where some TiKV nodes are offline and the active TiKV node lags behind in Raft logs, only the Region on this specific TiKV node with large tick values can become the leader. In the event that the leader becomes unavailable, the Region on this TiKV node must wait for at least the duration set by `raftstore.raft-min-election-timeout-ticks' to become a new leader. It is advisable not to set these values excessively large to prevent potential impact on the cluster availability in such scenarios. - Configure scheduling. After the cluster is enabled, use the `tiup ctl:v pd` tool to modify the scheduling policy. Modify the number of TiKV Raft replicas. Configure this number as planned. In this example, the number of replicas is five. From 8a1fe444ac675faaf704712175f37612348e70d0 Mon Sep 17 00:00:00 2001 From: xixirangrang Date: Thu, 22 Feb 2024 15:30:38 +0800 Subject: [PATCH 4/9] Apply suggestions from code review --- geo-distributed-deployment-topology.md | 2 +- three-data-centers-in-two-cities-deployment.md | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/geo-distributed-deployment-topology.md b/geo-distributed-deployment-topology.md index 53886958bdcc2..f9f24966f18c4 100644 --- a/geo-distributed-deployment-topology.md +++ b/geo-distributed-deployment-topology.md @@ -62,7 +62,7 @@ This section describes the key parameter configuration of the TiDB geo-distribut ``` > **Note:** > -> Using `raftstore.raft-min-election-timeout-ticks` and `raftstore.raft-max-election-timeout-ticks` to configure larger tick values for a TiKV node can significantly decrease the likelihood of a peer on the TiKV node becoming the leader. However, in a disaster scenario where some TiKV nodes are offline and the active TiKV node lags behind in Raft logs, only the Region on this specific TiKV node with large tick values can become the leader. In the event that the leader becomes unavailable, the Region on this TiKV node must wait for at least the duration set by `raftstore.raft-min-election-timeout-ticks' to become a new leader. It is advisable not to set these values excessively large to prevent potential impact on the cluster availability in such scenarios. +> Using `raftstore.raft-min-election-timeout-ticks` and `raftstore.raft-max-election-timeout-ticks` to configure larger election timeout ticks for a TiKV node can significantly decrease the likelihood of a peer on the TiKV node becoming the leader. However, in a disaster scenario where some TiKV nodes are offline and the active TiKV node lags behind in Raft logs, only the Region on this specific TiKV node with large election timeout ticks can become the leader. In the event that the leader becomes unavailable, the Region on this TiKV node must wait for at least the duration set by `raftstore.raft-min-election-timeout-ticks' to become a new leader. It is advisable not to set these values excessively large to prevent potential impact on the cluster availability in such scenarios. #### PD parameters - The PD metadata information records the topology of the TiKV cluster. PD schedules the Raft Group replicas on the following four dimensions: diff --git a/three-data-centers-in-two-cities-deployment.md b/three-data-centers-in-two-cities-deployment.md index 5ad312ecd9f7c..b47884826426f 100644 --- a/three-data-centers-in-two-cities-deployment.md +++ b/three-data-centers-in-two-cities-deployment.md @@ -181,7 +181,7 @@ In the deployment of three AZs in two regions, to optimize performance, you need > **Note:** > -> Using `raftstore.raft-min-election-timeout-ticks` and `raftstore.raft-max-election-timeout-ticks` to configure larger tick values for a TiKV node can significantly decrease the likelihood of a peer on the TiKV node becoming the leader. However, in a disaster scenario where some TiKV nodes are offline and the active TiKV node lags behind in Raft logs, only the Region on this specific TiKV node with large tick values can become the leader. In the event that the leader becomes unavailable, the Region on this TiKV node must wait for at least the duration set by `raftstore.raft-min-election-timeout-ticks' to become a new leader. It is advisable not to set these values excessively large to prevent potential impact on the cluster availability in such scenarios. +> Using `raftstore.raft-min-election-timeout-ticks` and `raftstore.raft-max-election-timeout-ticks` to configure larger election timeout ticks for a TiKV node can significantly decrease the likelihood of a peer on the TiKV node becoming the leader. However, in a disaster scenario where some TiKV nodes are offline and the active TiKV node lags behind in Raft logs, only the Region on this specific TiKV node with large election timeout ticks can become the leader. In the event that the leader becomes unavailable, the Region on this TiKV node must wait for at least the duration set by `raftstore.raft-min-election-timeout-ticks' to become a new leader. It is advisable not to set these values excessively large to prevent potential impact on the cluster availability in such scenarios. - Configure scheduling. After the cluster is enabled, use the `tiup ctl:v pd` tool to modify the scheduling policy. Modify the number of TiKV Raft replicas. Configure this number as planned. In this example, the number of replicas is five. From 56819cba1d9dffa5598ecaffdd5c3fd5ddfc1c86 Mon Sep 17 00:00:00 2001 From: xixirangrang Date: Thu, 22 Feb 2024 16:56:55 +0800 Subject: [PATCH 5/9] Apply suggestions from code review Co-authored-by: Grace Cai --- geo-distributed-deployment-topology.md | 2 +- three-data-centers-in-two-cities-deployment.md | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/geo-distributed-deployment-topology.md b/geo-distributed-deployment-topology.md index f9f24966f18c4..1f0bf6d551a9f 100644 --- a/geo-distributed-deployment-topology.md +++ b/geo-distributed-deployment-topology.md @@ -62,7 +62,7 @@ This section describes the key parameter configuration of the TiDB geo-distribut ``` > **Note:** > -> Using `raftstore.raft-min-election-timeout-ticks` and `raftstore.raft-max-election-timeout-ticks` to configure larger election timeout ticks for a TiKV node can significantly decrease the likelihood of a peer on the TiKV node becoming the leader. However, in a disaster scenario where some TiKV nodes are offline and the active TiKV node lags behind in Raft logs, only the Region on this specific TiKV node with large election timeout ticks can become the leader. In the event that the leader becomes unavailable, the Region on this TiKV node must wait for at least the duration set by `raftstore.raft-min-election-timeout-ticks' to become a new leader. It is advisable not to set these values excessively large to prevent potential impact on the cluster availability in such scenarios. +> Using `raftstore.raft-min-election-timeout-ticks` and `raftstore.raft-max-election-timeout-ticks` to configure larger election timeout ticks for a TiKV node can significantly decrease the likelihood of Regions on that node becoming Leaders. However, in a disaster scenario where some TiKV nodes are offline and the remaining active TiKV nodes lag behind in Raft logs, only Regions on this TiKV node with large election timeout ticks can become Leaders. Because Regions on this TiKV node must wait for at least the duration set by `raftstore.raft-min-election-timeout-ticks' before initiating an election, it is recommended to avoid setting these values excessively large to prevent potential impact on the cluster availability in such scenarios. #### PD parameters - The PD metadata information records the topology of the TiKV cluster. PD schedules the Raft Group replicas on the following four dimensions: diff --git a/three-data-centers-in-two-cities-deployment.md b/three-data-centers-in-two-cities-deployment.md index b47884826426f..32e1e41a24aa3 100644 --- a/three-data-centers-in-two-cities-deployment.md +++ b/three-data-centers-in-two-cities-deployment.md @@ -181,7 +181,7 @@ In the deployment of three AZs in two regions, to optimize performance, you need > **Note:** > -> Using `raftstore.raft-min-election-timeout-ticks` and `raftstore.raft-max-election-timeout-ticks` to configure larger election timeout ticks for a TiKV node can significantly decrease the likelihood of a peer on the TiKV node becoming the leader. However, in a disaster scenario where some TiKV nodes are offline and the active TiKV node lags behind in Raft logs, only the Region on this specific TiKV node with large election timeout ticks can become the leader. In the event that the leader becomes unavailable, the Region on this TiKV node must wait for at least the duration set by `raftstore.raft-min-election-timeout-ticks' to become a new leader. It is advisable not to set these values excessively large to prevent potential impact on the cluster availability in such scenarios. +> Using `raftstore.raft-min-election-timeout-ticks` and `raftstore.raft-max-election-timeout-ticks` to configure larger election timeout ticks for a TiKV node can significantly decrease the likelihood of Regions on that node becoming Leaders. However, in a disaster scenario where some TiKV nodes are offline and the remaining active TiKV nodes lag behind in Raft logs, only Regions on this TiKV node with large election timeout ticks can become Leaders. Because Regions on this TiKV node must wait for at least the duration set by `raftstore.raft-min-election-timeout-ticks' before initiating an election, it is recommended to avoid setting these values excessively large to prevent potential impact on the cluster availability in such scenarios. - Configure scheduling. After the cluster is enabled, use the `tiup ctl:v pd` tool to modify the scheduling policy. Modify the number of TiKV Raft replicas. Configure this number as planned. In this example, the number of replicas is five. From 9033ffe53f9a0d075cde5e273a867146443227bc Mon Sep 17 00:00:00 2001 From: xixirangrang Date: Thu, 22 Feb 2024 17:12:24 +0800 Subject: [PATCH 6/9] Update geo-distributed-deployment-topology.md --- geo-distributed-deployment-topology.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/geo-distributed-deployment-topology.md b/geo-distributed-deployment-topology.md index 1f0bf6d551a9f..69fbc8cb1e72c 100644 --- a/geo-distributed-deployment-topology.md +++ b/geo-distributed-deployment-topology.md @@ -60,7 +60,7 @@ This section describes the key parameter configuration of the TiDB geo-distribut raftstore.raft-min-election-timeout-ticks: 50 raftstore.raft-max-election-timeout-ticks: 60 ``` -> **Note:** +> **Note:** > > Using `raftstore.raft-min-election-timeout-ticks` and `raftstore.raft-max-election-timeout-ticks` to configure larger election timeout ticks for a TiKV node can significantly decrease the likelihood of Regions on that node becoming Leaders. However, in a disaster scenario where some TiKV nodes are offline and the remaining active TiKV nodes lag behind in Raft logs, only Regions on this TiKV node with large election timeout ticks can become Leaders. Because Regions on this TiKV node must wait for at least the duration set by `raftstore.raft-min-election-timeout-ticks' before initiating an election, it is recommended to avoid setting these values excessively large to prevent potential impact on the cluster availability in such scenarios. #### PD parameters From 73a90dda95962986446245deaabcde8375dfcc9b Mon Sep 17 00:00:00 2001 From: xixirangrang Date: Thu, 22 Feb 2024 17:17:53 +0800 Subject: [PATCH 7/9] Update geo-distributed-deployment-topology.md --- geo-distributed-deployment-topology.md | 1 + 1 file changed, 1 insertion(+) diff --git a/geo-distributed-deployment-topology.md b/geo-distributed-deployment-topology.md index 69fbc8cb1e72c..0af4ce21d900f 100644 --- a/geo-distributed-deployment-topology.md +++ b/geo-distributed-deployment-topology.md @@ -63,6 +63,7 @@ This section describes the key parameter configuration of the TiDB geo-distribut > **Note:** > > Using `raftstore.raft-min-election-timeout-ticks` and `raftstore.raft-max-election-timeout-ticks` to configure larger election timeout ticks for a TiKV node can significantly decrease the likelihood of Regions on that node becoming Leaders. However, in a disaster scenario where some TiKV nodes are offline and the remaining active TiKV nodes lag behind in Raft logs, only Regions on this TiKV node with large election timeout ticks can become Leaders. Because Regions on this TiKV node must wait for at least the duration set by `raftstore.raft-min-election-timeout-ticks' before initiating an election, it is recommended to avoid setting these values excessively large to prevent potential impact on the cluster availability in such scenarios. + #### PD parameters - The PD metadata information records the topology of the TiKV cluster. PD schedules the Raft Group replicas on the following four dimensions: From 49bfce4605b7f8d7dd87c4461b8c867be513e5e2 Mon Sep 17 00:00:00 2001 From: xixirangrang Date: Thu, 22 Feb 2024 17:23:15 +0800 Subject: [PATCH 8/9] Update geo-distributed-deployment-topology.md --- geo-distributed-deployment-topology.md | 1 + 1 file changed, 1 insertion(+) diff --git a/geo-distributed-deployment-topology.md b/geo-distributed-deployment-topology.md index 0af4ce21d900f..6846a757f8c44 100644 --- a/geo-distributed-deployment-topology.md +++ b/geo-distributed-deployment-topology.md @@ -60,6 +60,7 @@ This section describes the key parameter configuration of the TiDB geo-distribut raftstore.raft-min-election-timeout-ticks: 50 raftstore.raft-max-election-timeout-ticks: 60 ``` + > **Note:** > > Using `raftstore.raft-min-election-timeout-ticks` and `raftstore.raft-max-election-timeout-ticks` to configure larger election timeout ticks for a TiKV node can significantly decrease the likelihood of Regions on that node becoming Leaders. However, in a disaster scenario where some TiKV nodes are offline and the remaining active TiKV nodes lag behind in Raft logs, only Regions on this TiKV node with large election timeout ticks can become Leaders. Because Regions on this TiKV node must wait for at least the duration set by `raftstore.raft-min-election-timeout-ticks' before initiating an election, it is recommended to avoid setting these values excessively large to prevent potential impact on the cluster availability in such scenarios. From 1af384e95757582e18b1bf65c5c014372af2ad37 Mon Sep 17 00:00:00 2001 From: xixirangrang Date: Thu, 22 Feb 2024 17:34:45 +0800 Subject: [PATCH 9/9] Update three-data-centers-in-two-cities-deployment.md --- three-data-centers-in-two-cities-deployment.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/three-data-centers-in-two-cities-deployment.md b/three-data-centers-in-two-cities-deployment.md index 32e1e41a24aa3..d8b5e661fb2ad 100644 --- a/three-data-centers-in-two-cities-deployment.md +++ b/three-data-centers-in-two-cities-deployment.md @@ -183,7 +183,7 @@ In the deployment of three AZs in two regions, to optimize performance, you need > > Using `raftstore.raft-min-election-timeout-ticks` and `raftstore.raft-max-election-timeout-ticks` to configure larger election timeout ticks for a TiKV node can significantly decrease the likelihood of Regions on that node becoming Leaders. However, in a disaster scenario where some TiKV nodes are offline and the remaining active TiKV nodes lag behind in Raft logs, only Regions on this TiKV node with large election timeout ticks can become Leaders. Because Regions on this TiKV node must wait for at least the duration set by `raftstore.raft-min-election-timeout-ticks' before initiating an election, it is recommended to avoid setting these values excessively large to prevent potential impact on the cluster availability in such scenarios. -- Configure scheduling. After the cluster is enabled, use the `tiup ctl:v pd` tool to modify the scheduling policy. Modify the number of TiKV Raft replicas. Configure this number as planned. In this example, the number of replicas is five. +- Configure scheduling. After the cluster is enabled, use the `tiup ctl:v{CLUSTER_VERSION} pd` tool to modify the scheduling policy. Modify the number of TiKV Raft replicas. Configure this number as planned. In this example, the number of replicas is five. ```bash config set max-replicas 5