forked from pingcap/tidb-engine-ext
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathcluster.rs
2055 lines (1878 loc) · 70 KB
/
cluster.rs
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
// Copyright 2016 TiKV Project Authors. Licensed under Apache-2.0.
use std::{
collections::hash_map::Entry as MapEntry,
error::Error as StdError,
result,
sync::{mpsc, Arc, Mutex, RwLock},
thread,
time::Duration,
};
use collections::{HashMap, HashSet};
use crossbeam::channel::TrySendError;
use encryption_export::DataKeyManager;
use engine_rocks::{RocksEngine, RocksSnapshot, RocksStatistics};
use engine_test::raft::RaftTestEngine;
use engine_traits::{
CompactExt, Engines, Iterable, ManualCompactionOptions, MiscExt, Mutable, Peekable,
RaftEngineReadOnly, SyncMutable, WriteBatch, WriteBatchExt, CF_DEFAULT, CF_RAFT,
};
use file_system::IoRateLimiter;
use futures::{self, channel::oneshot, executor::block_on, future::BoxFuture, StreamExt};
use kvproto::{
errorpb::Error as PbError,
kvrpcpb::{ApiVersion, Context, DiskFullOpt},
metapb::{self, Buckets, PeerRole, RegionEpoch, StoreLabel},
pdpb::{self, CheckPolicy, StoreReport},
raft_cmdpb::*,
raft_serverpb::{
PeerState, RaftApplyState, RaftLocalState, RaftMessage, RaftTruncatedState,
RegionLocalState,
},
};
use pd_client::{BucketStat, PdClient};
use raft::eraftpb::ConfChangeType;
use raftstore::{
router::RaftStoreRouter,
store::{
fsm::{
create_raft_batch_system,
store::{StoreMeta, PENDING_MSG_CAP},
ApplyRouter, RaftBatchSystem, RaftRouter,
},
transport::CasualRouter,
*,
},
Error, Result,
};
use resource_control::ResourceGroupManager;
use tempfile::TempDir;
use test_pd_client::TestPdClient;
use tikv::{config::TikvConfig, server::Result as ServerResult};
use tikv_util::{
thread_group::GroupProperties,
time::{Instant, ThreadReadId},
worker::LazyWorker,
HandyRwLock,
};
use txn_types::WriteBatchFlags;
use super::*;
use crate::Config;
// We simulate 3 or 5 nodes, each has a store.
// Sometimes, we use fixed id to test, which means the id
// isn't allocated by pd, and node id, store id are same.
// E,g, for node 1, the node id and store id are both 1.
pub trait Simulator {
// Pass 0 to let pd allocate a node id if db is empty.
// If node id > 0, the node must be created in db already,
// and the node id must be the same as given argument.
// Return the node id.
// TODO: we will rename node name here because now we use store only.
fn run_node(
&mut self,
node_id: u64,
cfg: Config,
engines: Engines<RocksEngine, RaftTestEngine>,
store_meta: Arc<Mutex<StoreMeta>>,
key_manager: Option<Arc<DataKeyManager>>,
router: RaftRouter<RocksEngine, RaftTestEngine>,
system: RaftBatchSystem<RocksEngine, RaftTestEngine>,
resource_manager: &Option<Arc<ResourceGroupManager>>,
) -> ServerResult<u64>;
fn stop_node(&mut self, node_id: u64);
fn get_node_ids(&self) -> HashSet<u64>;
fn async_command_on_node(
&self,
node_id: u64,
request: RaftCmdRequest,
cb: Callback<RocksSnapshot>,
) -> Result<()> {
self.async_command_on_node_with_opts(node_id, request, cb, Default::default())
}
fn async_command_on_node_with_opts(
&self,
node_id: u64,
request: RaftCmdRequest,
cb: Callback<RocksSnapshot>,
opts: RaftCmdExtraOpts,
) -> Result<()>;
fn send_raft_msg(&mut self, msg: RaftMessage) -> Result<()>;
fn get_snap_dir(&self, node_id: u64) -> String;
fn get_snap_mgr(&self, node_id: u64) -> &SnapManager;
fn get_router(&self, node_id: u64) -> Option<RaftRouter<RocksEngine, RaftTestEngine>>;
fn get_apply_router(&self, node_id: u64) -> Option<ApplyRouter<RocksEngine>>;
fn add_send_filter(&mut self, node_id: u64, filter: Box<dyn Filter>);
fn clear_send_filters(&mut self, node_id: u64);
fn add_recv_filter(&mut self, node_id: u64, filter: Box<dyn Filter>);
fn clear_recv_filters(&mut self, node_id: u64);
fn call_command(&self, request: RaftCmdRequest, timeout: Duration) -> Result<RaftCmdResponse> {
let node_id = request.get_header().get_peer().get_store_id();
self.call_command_on_node(node_id, request, timeout)
}
fn read(
&mut self,
batch_id: Option<ThreadReadId>,
request: RaftCmdRequest,
timeout: Duration,
) -> Result<RaftCmdResponse> {
let node_id = request.get_header().get_peer().get_store_id();
let (cb, mut rx) = make_cb(&request);
self.async_read(node_id, batch_id, request, cb);
rx.recv_timeout(timeout)
.map_err(|_| Error::Timeout(format!("request timeout for {:?}", timeout)))
}
fn async_read(
&mut self,
node_id: u64,
batch_id: Option<ThreadReadId>,
request: RaftCmdRequest,
cb: Callback<RocksSnapshot>,
);
fn call_command_on_node(
&self,
node_id: u64,
request: RaftCmdRequest,
timeout: Duration,
) -> Result<RaftCmdResponse> {
let (cb, mut rx) = make_cb(&request);
match self.async_command_on_node(node_id, request, cb) {
Ok(()) => {}
Err(e) => {
let mut resp = RaftCmdResponse::default();
resp.mut_header().set_error(e.into());
return Ok(resp);
}
}
rx.recv_timeout(timeout)
.map_err(|e| Error::Timeout(format!("request timeout for {:?}: {:?}", timeout, e)))
}
}
pub struct Cluster<T: Simulator> {
pub cfg: Config,
leaders: HashMap<u64, metapb::Peer>,
pub count: usize,
pub paths: Vec<TempDir>,
pub dbs: Vec<Engines<RocksEngine, RaftTestEngine>>,
pub store_metas: HashMap<u64, Arc<Mutex<StoreMeta>>>,
key_managers: Vec<Option<Arc<DataKeyManager>>>,
pub io_rate_limiter: Option<Arc<IoRateLimiter>>,
pub engines: HashMap<u64, Engines<RocksEngine, RaftTestEngine>>,
key_managers_map: HashMap<u64, Option<Arc<DataKeyManager>>>,
pub labels: HashMap<u64, HashMap<String, String>>,
group_props: HashMap<u64, GroupProperties>,
pub sst_workers: Vec<LazyWorker<String>>,
pub sst_workers_map: HashMap<u64, usize>,
pub kv_statistics: Vec<Arc<RocksStatistics>>,
pub raft_statistics: Vec<Option<Arc<RocksStatistics>>>,
pub sim: Arc<RwLock<T>>,
pub pd_client: Arc<TestPdClient>,
resource_manager: Option<Arc<ResourceGroupManager>>,
}
impl<T: Simulator> Cluster<T> {
// Create the default Store cluster.
pub fn new(
id: u64,
count: usize,
sim: Arc<RwLock<T>>,
pd_client: Arc<TestPdClient>,
api_version: ApiVersion,
) -> Cluster<T> {
// TODO: In the future, maybe it's better to test both case where
// `use_delete_range` is true and false
Cluster {
cfg: Config::new(new_tikv_config_with_api_ver(id, api_version), true),
leaders: HashMap::default(),
count,
paths: vec![],
dbs: vec![],
store_metas: HashMap::default(),
key_managers: vec![],
io_rate_limiter: None,
engines: HashMap::default(),
key_managers_map: HashMap::default(),
labels: HashMap::default(),
group_props: HashMap::default(),
sim,
pd_client,
sst_workers: vec![],
sst_workers_map: HashMap::default(),
resource_manager: Some(Arc::new(ResourceGroupManager::default())),
kv_statistics: vec![],
raft_statistics: vec![],
}
}
pub fn set_cfg(&mut self, mut cfg: TikvConfig) {
cfg.cfg_path = self.cfg.tikv.cfg_path.clone();
self.cfg.tikv = cfg;
}
// To destroy temp dir later.
pub fn take_path(&mut self) -> Vec<TempDir> {
std::mem::take(&mut self.paths)
}
pub fn id(&self) -> u64 {
self.cfg.server.cluster_id
}
pub fn pre_start_check(&mut self) -> result::Result<(), Box<dyn StdError>> {
for path in &self.paths {
self.cfg.storage.data_dir = path.path().to_str().unwrap().to_owned();
self.cfg.validate()?
}
Ok(())
}
/// Engines in a just created cluster are not bootstrapped, which means they
/// are not associated with a `node_id`. Call `Cluster::start` can bootstrap
/// all nodes in the cluster.
///
/// However sometimes a node can be bootstrapped externally. This function
/// can be called to mark them as bootstrapped in `Cluster`.
pub fn set_bootstrapped(&mut self, node_id: u64, offset: usize) {
let engines = self.dbs[offset].clone();
let key_mgr = self.key_managers[offset].clone();
assert!(self.engines.insert(node_id, engines).is_none());
assert!(self.key_managers_map.insert(node_id, key_mgr).is_none());
assert!(self.sst_workers_map.insert(node_id, offset).is_none());
}
fn create_engine(&mut self, router: Option<RaftRouter<RocksEngine, RaftTestEngine>>) {
let (engines, key_manager, dir, sst_worker, kv_statistics, raft_statistics) =
create_test_engine(router, self.io_rate_limiter.clone(), &self.cfg);
self.dbs.push(engines);
self.key_managers.push(key_manager);
self.paths.push(dir);
self.sst_workers.push(sst_worker);
self.kv_statistics.push(kv_statistics);
self.raft_statistics.push(raft_statistics);
}
pub fn create_engines(&mut self) {
self.io_rate_limiter = Some(Arc::new(
self.cfg
.storage
.io_rate_limit
.build(true /* enable_statistics */),
));
for _ in 0..self.count {
self.create_engine(None);
}
}
pub fn start(&mut self) -> ServerResult<()> {
// Try recover from last shutdown.
let node_ids: Vec<u64> = self.engines.iter().map(|(&id, _)| id).collect();
for node_id in node_ids {
self.run_node(node_id)?;
}
// Try start new nodes.
for _ in 0..self.count - self.engines.len() {
let (router, system) =
create_raft_batch_system(&self.cfg.raft_store, &self.resource_manager);
self.create_engine(Some(router.clone()));
let engines = self.dbs.last().unwrap().clone();
let key_mgr = self.key_managers.last().unwrap().clone();
let store_meta = Arc::new(Mutex::new(StoreMeta::new(PENDING_MSG_CAP)));
let props = GroupProperties::default();
tikv_util::thread_group::set_properties(Some(props.clone()));
let mut sim = self.sim.wl();
let node_id = sim.run_node(
0,
self.cfg.clone(),
engines.clone(),
store_meta.clone(),
key_mgr.clone(),
router,
system,
&self.resource_manager,
)?;
self.group_props.insert(node_id, props);
self.engines.insert(node_id, engines);
self.store_metas.insert(node_id, store_meta);
self.key_managers_map.insert(node_id, key_mgr);
self.sst_workers_map
.insert(node_id, self.sst_workers.len() - 1);
}
Ok(())
}
pub fn compact_data(&self) {
for engine in self.engines.values() {
let db = &engine.kv;
db.compact_range_cf(
CF_DEFAULT,
None,
None,
ManualCompactionOptions::new(false, 1, false),
)
.unwrap();
}
}
pub fn flush_data(&self) {
for engine in self.engines.values() {
let db = &engine.kv;
db.flush_cf(CF_DEFAULT, true /* sync */).unwrap();
}
}
// Bootstrap the store with fixed ID (like 1, 2, .. 5) and
// initialize first region in all stores, then start the cluster.
pub fn run(&mut self) {
self.create_engines();
self.bootstrap_region().unwrap();
self.start().unwrap();
}
// Bootstrap the store with fixed ID (like 1, 2, .. 5) and
// initialize first region in store 1, then start the cluster.
pub fn run_conf_change(&mut self) -> u64 {
self.create_engines();
let region_id = self.bootstrap_conf_change();
self.start().unwrap();
region_id
}
pub fn get_node_ids(&self) -> HashSet<u64> {
self.sim.rl().get_node_ids()
}
pub fn run_node(&mut self, node_id: u64) -> ServerResult<()> {
debug!("starting node {}", node_id);
let engines = self.engines[&node_id].clone();
let key_mgr = self.key_managers_map[&node_id].clone();
let (router, system) =
create_raft_batch_system(&self.cfg.raft_store, &self.resource_manager);
let mut cfg = self.cfg.clone();
if let Some(labels) = self.labels.get(&node_id) {
cfg.server.labels = labels.to_owned();
}
let store_meta = match self.store_metas.entry(node_id) {
MapEntry::Occupied(o) => {
let mut meta = o.get().lock().unwrap();
*meta = StoreMeta::new(PENDING_MSG_CAP);
o.get().clone()
}
MapEntry::Vacant(v) => v
.insert(Arc::new(Mutex::new(StoreMeta::new(PENDING_MSG_CAP))))
.clone(),
};
let props = GroupProperties::default();
self.group_props.insert(node_id, props.clone());
tikv_util::thread_group::set_properties(Some(props));
debug!("calling run node"; "node_id" => node_id);
// FIXME: rocksdb event listeners may not work, because we change the router.
self.sim.wl().run_node(
node_id,
cfg,
engines,
store_meta,
key_mgr,
router,
system,
&self.resource_manager,
)?;
debug!("node {} started", node_id);
Ok(())
}
pub fn stop_node(&mut self, node_id: u64) {
debug!("stopping node {}", node_id);
self.group_props[&node_id].mark_shutdown();
// Simulate shutdown behavior of server shutdown. It's not enough to just set
// the map above as current thread may also query properties during shutdown.
let previous_prop = tikv_util::thread_group::current_properties();
tikv_util::thread_group::set_properties(Some(self.group_props[&node_id].clone()));
match self.sim.write() {
Ok(mut sim) => sim.stop_node(node_id),
Err(_) => safe_panic!("failed to acquire write lock."),
}
self.pd_client.shutdown_store(node_id);
debug!("node {} stopped", node_id);
tikv_util::thread_group::set_properties(previous_prop);
}
pub fn get_engine(&self, node_id: u64) -> RocksEngine {
self.engines[&node_id].kv.clone()
}
pub fn get_raft_engine(&self, node_id: u64) -> RaftTestEngine {
self.engines[&node_id].raft.clone()
}
pub fn get_all_engines(&self, node_id: u64) -> Engines<RocksEngine, RaftTestEngine> {
self.engines[&node_id].clone()
}
pub fn send_raft_msg(&mut self, msg: RaftMessage) -> Result<()> {
self.sim.wl().send_raft_msg(msg)
}
pub fn call_command_on_node(
&self,
node_id: u64,
request: RaftCmdRequest,
timeout: Duration,
) -> Result<RaftCmdResponse> {
match self
.sim
.rl()
.call_command_on_node(node_id, request.clone(), timeout)
{
Err(e) => {
warn!("failed to call command {:?}: {:?}", request, e);
Err(e)
}
a => a,
}
}
pub fn read(
&self,
batch_id: Option<ThreadReadId>,
request: RaftCmdRequest,
timeout: Duration,
) -> Result<RaftCmdResponse> {
match self.sim.wl().read(batch_id, request.clone(), timeout) {
Err(e) => {
warn!("failed to read {:?}: {:?}", request, e);
Err(e)
}
a => a,
}
}
pub fn call_command(
&self,
request: RaftCmdRequest,
timeout: Duration,
) -> Result<RaftCmdResponse> {
let mut is_read = false;
for req in request.get_requests() {
match req.get_cmd_type() {
CmdType::Get | CmdType::Snap | CmdType::ReadIndex => {
is_read = true;
}
_ => (),
}
}
let ret = if is_read {
self.sim.wl().read(None, request.clone(), timeout)
} else {
self.sim.rl().call_command(request.clone(), timeout)
};
match ret {
Err(e) => {
warn!("failed to call command {:?}: {:?}", request, e);
Err(e)
}
a => a,
}
}
pub fn call_command_on_leader(
&mut self,
mut request: RaftCmdRequest,
timeout: Duration,
) -> Result<RaftCmdResponse> {
let timer = Instant::now();
let region_id = request.get_header().get_region_id();
loop {
let leader = match self.leader_of_region(region_id) {
None => return Err(Error::NotLeader(region_id, None)),
Some(l) => l,
};
request.mut_header().set_peer(leader);
let resp = match self.call_command(request.clone(), timeout) {
e @ Err(_) => return e,
Ok(resp) => resp,
};
if self.refresh_leader_if_needed(&resp, region_id)
&& timer.saturating_elapsed() < timeout
{
warn!(
"{:?} is no longer leader, let's retry",
request.get_header().get_peer()
);
continue;
}
return Ok(resp);
}
}
fn valid_leader_id(&self, region_id: u64, leader_id: u64) -> bool {
let store_ids = match self.voter_store_ids_of_region(region_id) {
None => return false,
Some(ids) => ids,
};
let node_ids = self.sim.rl().get_node_ids();
store_ids.contains(&leader_id) && node_ids.contains(&leader_id)
}
fn voter_store_ids_of_region(&self, region_id: u64) -> Option<Vec<u64>> {
block_on(self.pd_client.get_region_by_id(region_id))
.unwrap()
.map(|region| {
region
.get_peers()
.iter()
.flat_map(|p| {
if p.get_role() != PeerRole::Learner {
Some(p.get_store_id())
} else {
None
}
})
.collect()
})
}
pub fn query_leader(
&self,
store_id: u64,
region_id: u64,
timeout: Duration,
) -> Option<metapb::Peer> {
// To get region leader, we don't care real peer id, so use 0 instead.
let peer = new_peer(store_id, 0);
let find_leader = new_status_request(region_id, peer, new_region_leader_cmd());
let mut resp = match self.call_command(find_leader, timeout) {
Ok(resp) => resp,
Err(err) => {
error!(
"fail to get leader of region {} on store {}, error: {:?}",
region_id, store_id, err
);
return None;
}
};
let mut region_leader = resp.take_status_response().take_region_leader();
// NOTE: node id can't be 0.
if self.valid_leader_id(region_id, region_leader.get_leader().get_store_id()) {
Some(region_leader.take_leader())
} else {
None
}
}
pub fn leader_of_region(&mut self, region_id: u64) -> Option<metapb::Peer> {
let timer = Instant::now_coarse();
let timeout = Duration::from_secs(5);
let mut store_ids = None;
while timer.saturating_elapsed() < timeout {
match self.voter_store_ids_of_region(region_id) {
None => thread::sleep(Duration::from_millis(10)),
Some(ids) => {
store_ids = Some(ids);
break;
}
};
}
let store_ids = store_ids?;
if let Some(l) = self.leaders.get(®ion_id) {
// leader may be stopped in some tests.
if self.valid_leader_id(region_id, l.get_store_id()) {
return Some(l.clone());
}
}
self.reset_leader_of_region(region_id);
let mut leader = None;
let mut leaders = HashMap::default();
let node_ids = self.sim.rl().get_node_ids();
// For some tests, we stop the node but pd still has this information,
// and we must skip this.
let alive_store_ids: Vec<_> = store_ids
.iter()
.filter(|id| node_ids.contains(id))
.cloned()
.collect();
while timer.saturating_elapsed() < timeout {
for store_id in &alive_store_ids {
let l = match self.query_leader(*store_id, region_id, Duration::from_secs(1)) {
None => continue,
Some(l) => l,
};
leaders
.entry(l.get_id())
.or_insert((l, vec![]))
.1
.push(*store_id);
}
if let Some((_, (l, c))) = leaders.iter().max_by_key(|(_, (_, c))| c.len()) {
// It may be a step down leader.
if c.contains(&l.get_store_id()) {
leader = Some(l.clone());
// Technically, correct calculation should use two quorum when in joint
// state. Here just for simplicity.
if c.len() > store_ids.len() / 2 {
break;
}
}
}
debug!("failed to detect leaders"; "leaders" => ?leaders, "store_ids" => ?store_ids);
sleep_ms(10);
leaders.clear();
}
if let Some(l) = leader {
self.leaders.insert(region_id, l);
}
self.leaders.get(®ion_id).cloned()
}
pub fn check_regions_number(&self, len: u32) {
assert_eq!(self.pd_client.get_regions_number() as u32, len)
}
// For test when a node is already bootstrapped the cluster with the first
// region But another node may request bootstrap at same time and get
// is_bootstrap false Add Region but not set bootstrap to true
pub fn add_first_region(&self) -> Result<()> {
let mut region = metapb::Region::default();
let region_id = self.pd_client.alloc_id().unwrap();
let peer_id = self.pd_client.alloc_id().unwrap();
region.set_id(region_id);
region.set_start_key(keys::EMPTY_KEY.to_vec());
region.set_end_key(keys::EMPTY_KEY.to_vec());
region.mut_region_epoch().set_version(INIT_EPOCH_VER);
region.mut_region_epoch().set_conf_ver(INIT_EPOCH_CONF_VER);
let peer = new_peer(peer_id, peer_id);
region.mut_peers().push(peer);
self.pd_client.add_region(®ion);
Ok(())
}
/// Multiple nodes with fixed node id, like node 1, 2, .. 5,
/// First region 1 is in all stores with peer 1, 2, .. 5.
/// Peer 1 is in node 1, store 1, etc.
///
/// Must be called after `create_engines`.
pub fn bootstrap_region(&mut self) -> Result<()> {
for (i, engines) in self.dbs.iter().enumerate() {
let id = i as u64 + 1;
self.engines.insert(id, engines.clone());
let store_meta = Arc::new(Mutex::new(StoreMeta::new(PENDING_MSG_CAP)));
self.store_metas.insert(id, store_meta);
self.key_managers_map
.insert(id, self.key_managers[i].clone());
self.sst_workers_map.insert(id, i);
}
let mut region = metapb::Region::default();
region.set_id(1);
region.set_start_key(keys::EMPTY_KEY.to_vec());
region.set_end_key(keys::EMPTY_KEY.to_vec());
region.mut_region_epoch().set_version(INIT_EPOCH_VER);
region.mut_region_epoch().set_conf_ver(INIT_EPOCH_CONF_VER);
for (&id, engines) in &self.engines {
let peer = new_peer(id, id);
region.mut_peers().push(peer.clone());
bootstrap_store(engines, self.id(), id).unwrap();
}
for engines in self.engines.values() {
prepare_bootstrap_cluster(engines, ®ion)?;
}
self.bootstrap_cluster(region);
Ok(())
}
// Return first region id.
pub fn bootstrap_conf_change(&mut self) -> u64 {
for (i, engines) in self.dbs.iter().enumerate() {
let id = i as u64 + 1;
self.engines.insert(id, engines.clone());
let store_meta = Arc::new(Mutex::new(StoreMeta::new(PENDING_MSG_CAP)));
self.store_metas.insert(id, store_meta);
self.key_managers_map
.insert(id, self.key_managers[i].clone());
self.sst_workers_map.insert(id, i);
}
for (&id, engines) in &self.engines {
bootstrap_store(engines, self.id(), id).unwrap();
}
let node_id = 1;
let region_id = 1;
let peer_id = 1;
let region = initial_region(node_id, region_id, peer_id);
prepare_bootstrap_cluster(&self.engines[&node_id], ®ion).unwrap();
self.bootstrap_cluster(region);
region_id
}
// This is only for fixed id test.
fn bootstrap_cluster(&mut self, region: metapb::Region) {
self.pd_client
.bootstrap_cluster(new_store(1, "".to_owned()), region)
.unwrap();
for id in self.engines.keys() {
let mut store = new_store(*id, "".to_owned());
if let Some(labels) = self.labels.get(id) {
for (key, value) in labels.iter() {
store.labels.push(StoreLabel {
key: key.clone(),
value: value.clone(),
..Default::default()
});
}
}
self.pd_client.put_store(store).unwrap();
}
}
pub fn add_label(&mut self, node_id: u64, key: &str, value: &str) {
self.labels
.entry(node_id)
.or_default()
.insert(key.to_owned(), value.to_owned());
}
pub fn add_new_engine(&mut self) -> u64 {
self.create_engine(None);
self.count += 1;
let node_id = self.count as u64;
let engines = self.dbs.last().unwrap().clone();
bootstrap_store(&engines, self.id(), node_id).unwrap();
self.engines.insert(node_id, engines);
let key_mgr = self.key_managers.last().unwrap().clone();
self.key_managers_map.insert(node_id, key_mgr);
self.sst_workers_map
.insert(node_id, self.sst_workers.len() - 1);
self.run_node(node_id).unwrap();
node_id
}
pub fn reset_leader_of_region(&mut self, region_id: u64) {
self.leaders.remove(®ion_id);
}
pub fn assert_quorum<F: FnMut(&RocksEngine) -> bool>(&self, mut condition: F) {
if self.engines.is_empty() {
return;
}
let half = self.engines.len() / 2;
let mut qualified_cnt = 0;
for (id, engines) in &self.engines {
if !condition(&engines.kv) {
debug!("store {} is not qualified yet.", id);
continue;
}
debug!("store {} is qualified", id);
qualified_cnt += 1;
if half < qualified_cnt {
return;
}
}
panic!(
"need at lease {} qualified stores, but only got {}",
half + 1,
qualified_cnt
);
}
pub fn shutdown(&mut self) {
debug!("about to shutdown cluster");
let keys = match self.sim.read() {
Ok(s) => s.get_node_ids(),
Err(_) => {
safe_panic!("failed to acquire read lock");
// Leave the resource to avoid double panic.
return;
}
};
for id in keys {
self.stop_node(id);
}
self.leaders.clear();
self.store_metas.clear();
for sst_worker in self.sst_workers.drain(..) {
sst_worker.stop_worker();
}
debug!("all nodes are shut down.");
}
// If the resp is "not leader error", get the real leader.
// Otherwise reset or refresh leader if needed.
// Returns if the request should retry.
fn refresh_leader_if_needed(&mut self, resp: &RaftCmdResponse, region_id: u64) -> bool {
if !is_error_response(resp) {
return false;
}
let err = resp.get_header().get_error();
if err
.get_message()
.contains("peer has not applied to current term")
{
// leader peer has not applied to current term
return true;
}
// If command is stale, leadership may have changed.
// EpochNotMatch is not checked as leadership is checked first in raftstore.
if err.has_stale_command() {
self.reset_leader_of_region(region_id);
return true;
}
if !err.has_not_leader() {
return false;
}
let err = err.get_not_leader();
if !err.has_leader() {
self.reset_leader_of_region(region_id);
return true;
}
self.leaders.insert(region_id, err.get_leader().clone());
true
}
pub fn request(
&mut self,
key: &[u8],
reqs: Vec<Request>,
read_quorum: bool,
timeout: Duration,
) -> RaftCmdResponse {
let timer = Instant::now();
let mut tried_times = 0;
// At least retry once.
while tried_times < 2 || timer.saturating_elapsed() < timeout {
tried_times += 1;
let mut region = self.get_region(key);
let region_id = region.get_id();
let req = new_request(
region_id,
region.take_region_epoch(),
reqs.clone(),
read_quorum,
);
let result = self.call_command_on_leader(req, timeout);
let resp = match result {
e @ Err(Error::Timeout(_))
| e @ Err(Error::NotLeader(..))
| e @ Err(Error::StaleCommand) => {
warn!("call command failed, retry it"; "err" => ?e);
sleep_ms(100);
continue;
}
Err(e) => panic!("call command failed {:?}", e),
Ok(resp) => resp,
};
if resp.get_header().get_error().has_epoch_not_match() {
warn!("seems split, let's retry");
sleep_ms(100);
continue;
}
if resp
.get_header()
.get_error()
.get_message()
.contains("merging mode")
{
warn!("seems waiting for merge, let's retry");
sleep_ms(100);
continue;
}
return resp;
}
panic!("request timeout");
}
// Get region when the `filter` returns true.
pub fn get_region_with<F>(&self, key: &[u8], filter: F) -> metapb::Region
where
F: Fn(&metapb::Region) -> bool,
{
for _ in 0..100 {
if let Ok(region) = self.pd_client.get_region(key) {
if filter(®ion) {
return region;
}
}
// We may meet range gap after split, so here we will
// retry to get the region again.
sleep_ms(20);
}
panic!("find no region for {}", log_wrappers::hex_encode_upper(key));
}
pub fn get_region(&self, key: &[u8]) -> metapb::Region {
self.get_region_with(key, |_| true)
}
pub fn get_region_id(&self, key: &[u8]) -> u64 {
self.get_region(key).get_id()
}
pub fn get_down_peers(&self) -> HashMap<u64, pdpb::PeerStats> {
self.pd_client.get_down_peers()
}
pub fn get(&mut self, key: &[u8]) -> Option<Vec<u8>> {
self.get_impl(CF_DEFAULT, key, false)
}
pub fn get_cf(&mut self, cf: &str, key: &[u8]) -> Option<Vec<u8>> {
self.get_impl(cf, key, false)
}
pub fn must_get(&mut self, key: &[u8]) -> Option<Vec<u8>> {
self.get_impl(CF_DEFAULT, key, true)
}
fn get_impl(&mut self, cf: &str, key: &[u8], read_quorum: bool) -> Option<Vec<u8>> {
let mut resp = self.request(
key,
vec![new_get_cf_cmd(cf, key)],
read_quorum,
Duration::from_secs(5),
);
if resp.get_header().has_error() {
panic!("response {:?} has error", resp);
}
assert_eq!(resp.get_responses().len(), 1);
assert_eq!(resp.get_responses()[0].get_cmd_type(), CmdType::Get);
if resp.get_responses()[0].has_get() {
Some(resp.mut_responses()[0].mut_get().take_value())
} else {
None
}
}
pub fn async_request(
&mut self,
req: RaftCmdRequest,
) -> Result<BoxFuture<'static, RaftCmdResponse>> {
self.async_request_with_opts(req, Default::default())
}
pub fn async_request_with_opts(
&mut self,
mut req: RaftCmdRequest,
opts: RaftCmdExtraOpts,
) -> Result<BoxFuture<'static, RaftCmdResponse>> {
let region_id = req.get_header().get_region_id();
let leader = self.leader_of_region(region_id).unwrap();
req.mut_header().set_peer(leader.clone());
let (cb, mut rx) = make_cb(&req);
self.sim
.rl()
.async_command_on_node_with_opts(leader.get_store_id(), req, cb, opts)?;
Ok(Box::pin(async move {
let fut = rx.next();
fut.await.unwrap()
}))
}
pub fn async_exit_joint(