diff --git a/proxy_tests/proxy/shared/fast_add_peer/fp.rs b/proxy_tests/proxy/shared/fast_add_peer/fp.rs index 5c5dbf5d240..b22805dfaa9 100644 --- a/proxy_tests/proxy/shared/fast_add_peer/fp.rs +++ b/proxy_tests/proxy/shared/fast_add_peer/fp.rs @@ -986,7 +986,7 @@ fn test_msgsnapshot_before_msgappend() { .unwrap(); let mut t = 0; - while true { + loop { let mut buf = Vec::::new(); cluster .get_engines(1) diff --git a/proxy_tests/proxy/shared/replica_read.rs b/proxy_tests/proxy/shared/replica_read.rs index d87a6ab87d2..438410d3a7b 100644 --- a/proxy_tests/proxy/shared/replica_read.rs +++ b/proxy_tests/proxy/shared/replica_read.rs @@ -210,6 +210,8 @@ fn test_read_index_normal() { /// If a read index request is received while region state is Applying, /// it could be handled correctly. +/// NOTE even if we can handle Applying properly here, the read index will be +/// reject in `resolveLocksAndWriteRegion` in TiFlash. #[test] fn test_read_index_applying() { // Initialize cluster @@ -620,3 +622,102 @@ fn test_raft_message_can_advanve_max_ts() { cluster.shutdown(); fail::remove("on_pre_write_apply_state") } + +#[test] +fn test_read_index_diff_epoch() { + // Initialize cluster + let (mut cluster, pd_client) = new_mock_cluster(0, 2); + configure_for_lease_read(&mut cluster, Some(50), Some(10_000)); + cluster.cfg.raft_store.raft_heartbeat_ticks = 1; + cluster.cfg.raft_store.raft_log_compact_sync_interval = ReadableDuration::millis(500); + pd_client.disable_default_operator(); + disable_auto_gen_compact_log(&mut cluster); + // Otherwise will panic with `assert_eq!(apply_state, last_applied_state)`. + fail::cfg("on_pre_write_apply_state", "return(true)").unwrap(); + // Set region and peers + let r1 = cluster.run_conf_change(); + let p1 = new_peer(1, 1); + let p2 = new_learner_peer(2, 2); + + cluster.pd_client.must_add_peer(r1, p2.clone()); + cluster.must_put(b"k0", b"v"); + + let region = cluster.get_region(b"k0"); + assert_eq!(cluster.leader_of_region(region.get_id()).unwrap(), p1); + + check_key(&cluster, b"k0", b"v", Some(true), None, Some(vec![1, 2])); + + for i in 1..5 { + cluster.must_put(format!("k{}0", i).as_bytes(), b"v"); + } + + check_key(&cluster, b"k40", b"v", Some(true), None, Some(vec![1, 2])); + + cluster.must_split(®ion, b"k20"); + + cluster.must_put(b"k11", b"v"); + cluster.must_put(b"k31", b"v"); + check_key(&cluster, b"k11", b"v", Some(true), None, Some(vec![1, 2])); + check_key(&cluster, b"k31", b"v", Some(true), None, Some(vec![1, 2])); + + let waker = Waker::new(); + + let new_region = cluster.get_region(b"31"); + let new_region_peer = new_region + .get_peers() + .iter() + .filter(|x| x.get_store_id() == 2) + .last() + .unwrap(); + + for (id, peer, f) in &[(2, p2, true), (2, new_region_peer.clone(), true)] { + iter_ffi_helpers( + &cluster, + Some(vec![*id]), + &mut |_, ffi_helper: &mut FFIHelperSet| { + assert_eq!( + general_get_region_local_state( + &ffi_helper.engine_store_server.engines.as_ref().unwrap().kv, + r1 + ) + .unwrap() + .get_state(), + PeerState::Normal + ); + let mut request = kvproto::kvrpcpb::ReadIndexRequest::default(); + + { + let context = request.mut_context(); + context.set_region_id(region.get_id()); + context.set_peer(peer.clone()); + context.set_region_epoch(region.get_region_epoch().clone()); + request.set_start_ts(666); + + let mut range = kvproto::kvrpcpb::KeyRange::default(); + range.set_start_key(region.get_start_key().to_vec()); + range.set_end_key(region.get_end_key().to_vec()); + request.mut_ranges().push(range); + + debug!("make read index request {:?}", &request); + } + let w = if *f { Some(&waker) } else { None }; + let resp = blocked_read_index(&request, &*ffi_helper.proxy_helper, w).unwrap(); + assert_eq!(resp.get_read_index(), 0); + debug!("resp detail {:?}", resp); + // Epoch Not Match error + assert!(resp.has_region_error()); + }, + ); + } + + drop(waker); + + { + assert!(!GC_MONITOR.is_empty()); + assert!(GC_MONITOR.valid_clean()); + } + + cluster.shutdown(); + fail::remove("on_pre_write_apply_state"); + fail::remove("region_apply_snap"); +}