From bac5355e258bf8fd4a80ec4619495bbce3bdcd47 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Adam=20Chuda=C5=9B?= Date: Thu, 9 Jan 2025 00:00:28 +0100 Subject: [PATCH 1/7] Generalize check_state_shard_uid_mapping_after_resharding --- .../src/test_loop/tests/resharding_v3.rs | 59 +++++++--------- .../src/test_loop/utils/sharding.rs | 22 ++++++ .../src/test_loop/utils/trie_sanity.rs | 68 ++++++++++++++++--- 3 files changed, 103 insertions(+), 46 deletions(-) diff --git a/integration-tests/src/test_loop/tests/resharding_v3.rs b/integration-tests/src/test_loop/tests/resharding_v3.rs index 95dacbf9e47..e389865b26d 100644 --- a/integration-tests/src/test_loop/tests/resharding_v3.rs +++ b/integration-tests/src/test_loop/tests/resharding_v3.rs @@ -459,10 +459,8 @@ fn test_resharding_v3_base(params: TestReshardingParameters) { TrieSanityCheck::new(&clients, params.load_mem_tries_for_tracked_shards); let latest_block_height = Cell::new(0u64); - // Height of a block after resharding. - let new_layout_block_height = Cell::new(None); - // Height of an epoch after resharding. - let new_layout_epoch_height = Cell::new(None); + let resharding_block_hash = Cell::new(None); + let epoch_height_after_resharding = Cell::new(None); let success_condition = |test_loop_data: &mut TestLoopData| -> bool { params .loop_actions @@ -485,56 +483,47 @@ fn test_resharding_v3_base(params: TestReshardingParameters) { let client = clients[client_index]; let block_header = client.chain.get_block_header(&tip.last_block_hash).unwrap(); - let shard_layout = client.epoch_manager.get_shard_layout(&tip.epoch_id).unwrap(); - println!("Block: {:?} {} {:?}", tip.last_block_hash, tip.height, block_header.chunk_mask()); - println!("Shard IDs: {:?}", shard_layout.shard_ids().collect_vec()); // Check that all chunks are included. if params.all_chunks_expected && params.chunk_ranges_to_drop.is_empty() { assert!(block_header.chunk_mask().iter().all(|chunk_bit| *chunk_bit)); } - let shard_layout = client.epoch_manager.get_shard_layout(&tip.epoch_id).unwrap(); - println!( - "new block #{} shards: {:?} chunk mask {:?}", - tip.height, - shard_layout.shard_ids().collect_vec(), - block_header.chunk_mask().to_vec() - ); - trie_sanity_check.assert_state_sanity(&clients, expected_num_shards); - let epoch_height = - client.epoch_manager.get_epoch_height_from_prev_block(&tip.prev_block_hash).unwrap(); - - // Return false if we have not yet passed an epoch with increased number of shards. - if new_layout_epoch_height.get().is_none() { - assert!(epoch_height < 6); - let prev_epoch_id = client - .epoch_manager - .get_prev_epoch_id_from_prev_block(&tip.prev_block_hash) - .unwrap(); - let epoch_config = client.epoch_manager.get_epoch_config(&prev_epoch_id).unwrap(); + let epoch_id = + client.epoch_manager.get_epoch_id_from_prev_block(&tip.prev_block_hash).unwrap(); + let epoch_info = client.epoch_manager.get_epoch_info(&epoch_id).unwrap(); + let epoch_height = epoch_info.epoch_height(); + + // Return false if we have not resharded yet. + if epoch_height_after_resharding.get().is_none() { + assert!(epoch_height < 5); + let epoch_config = client.epoch_manager.get_epoch_config(&epoch_id).unwrap(); if epoch_config.shard_layout.num_shards() != expected_num_shards { return false; } - // Just passed an epoch with increased number of shards. - new_layout_block_height.set(Some(latest_block_height.get())); - new_layout_epoch_height.set(Some(epoch_height)); + // Just resharded. + resharding_block_hash.set(Some(tip.prev_block_hash)); + epoch_height_after_resharding.set(Some(epoch_height)); // Assert that we will have a chance for gc to kick in before the test is over. assert!(epoch_height + GC_NUM_EPOCHS_TO_KEEP < TESTLOOP_NUM_EPOCHS_TO_WAIT); println!("State after resharding:"); print_and_assert_shard_accounts(&clients, &tip); } - check_state_shard_uid_mapping_after_resharding( - &client, - parent_shard_uid, - params.allow_negative_refcount, - ); + for client in clients { + check_state_shard_uid_mapping_after_resharding( + client, + &tip.prev_block_hash, + &resharding_block_hash.get().unwrap(), + parent_shard_uid, + params.allow_negative_refcount, + ); + } // Return false if garbage collection window has not passed yet since resharding. - if epoch_height <= new_layout_epoch_height.get().unwrap() + GC_NUM_EPOCHS_TO_KEEP { + if epoch_height <= epoch_height_after_resharding.get().unwrap() + GC_NUM_EPOCHS_TO_KEEP { return false; } for loop_action in ¶ms.loop_actions { diff --git a/integration-tests/src/test_loop/utils/sharding.rs b/integration-tests/src/test_loop/utils/sharding.rs index 8f377f122b1..9db148659e2 100644 --- a/integration-tests/src/test_loop/utils/sharding.rs +++ b/integration-tests/src/test_loop/utils/sharding.rs @@ -118,3 +118,25 @@ pub fn shard_was_split(shard_layout: &ShardLayout, shard_id: ShardId) -> bool { }; parent != shard_id } + +pub fn get_tracked_shards_from_prev_block( + client: &Client, + prev_block_hash: &CryptoHash, +) -> Vec { + let account_id = + client.validator_signer.get().map(|validator| validator.validator_id().clone()); + let mut tracked_shards = vec![]; + for shard_uid in + client.epoch_manager.get_shard_layout_from_prev_block(prev_block_hash).unwrap().shard_uids() + { + if client.shard_tracker.care_about_shard( + account_id.as_ref(), + prev_block_hash, + shard_uid.shard_id(), + true, + ) { + tracked_shards.push(shard_uid); + } + } + tracked_shards +} diff --git a/integration-tests/src/test_loop/utils/trie_sanity.rs b/integration-tests/src/test_loop/utils/trie_sanity.rs index 31acd1e32cb..fdad582ebdf 100644 --- a/integration-tests/src/test_loop/utils/trie_sanity.rs +++ b/integration-tests/src/test_loop/utils/trie_sanity.rs @@ -1,5 +1,7 @@ use super::sharding::shard_was_split; -use crate::test_loop::utils::sharding::{client_tracking_shard, get_memtrie_for_shard}; +use crate::test_loop::utils::sharding::{ + client_tracking_shard, get_memtrie_for_shard, get_tracked_shards_from_prev_block, +}; use borsh::BorshDeserialize; use itertools::Itertools; use near_chain::types::Tip; @@ -10,6 +12,7 @@ use near_primitives::shard_layout::ShardLayout; use near_primitives::state::FlatStateValue; use near_primitives::types::{AccountId, EpochId, NumShards}; use near_primitives::version::PROTOCOL_VERSION; +use near_store::adapter::trie_store::get_shard_uid_mapping; use near_store::adapter::StoreAdapter; use near_store::db::refcount::decode_value_with_rc; use near_store::flat::FlatStorageStatus; @@ -340,6 +343,8 @@ fn should_assert_state_sanity( /// Asserts that all parent shard State is accessible via parent and children shards. pub fn check_state_shard_uid_mapping_after_resharding( client: &Client, + prev_block_hash: &CryptoHash, + resharding_block_hash: &CryptoHash, parent_shard_uid: ShardUId, allow_negative_refcount: bool, ) { @@ -350,17 +355,35 @@ pub fn check_state_shard_uid_mapping_after_resharding( epoch_config.shard_layout.get_children_shards_uids(parent_shard_uid.shard_id()).unwrap(); assert_eq!(children_shard_uids.len(), 2); - let store = client.chain.chain_store.store().trie_store(); - let mut checked_any = false; - for kv in store.store().iter_raw_bytes(DBCol::State) { + // Currently tracked shards. + let tracked_shards = get_tracked_shards_from_prev_block(client, prev_block_hash); + // ShardUId mappings (different than map to itself) that we have stored in DB. + let mut shard_uid_mapping = HashMap::new(); + // Currently tracked children shards that are mapped to an ancestor. + let mut tracked_mapped_children = vec![]; + let store = client.chain.chain_store.store(); + for child_shard_uid in &children_shard_uids { + let mapped_shard_uid = get_shard_uid_mapping(store, *child_shard_uid); + if &mapped_shard_uid == child_shard_uid { + continue; + } + shard_uid_mapping.insert(child_shard_uid, mapped_shard_uid); + if tracked_shards.contains(child_shard_uid) { + tracked_mapped_children.push(*child_shard_uid); + } + } + + // Whether we found any value in DB for which we could test the mapping. + let mut checked_any_key = false; + let trie_store = store.trie_store(); + for kv in store.iter_raw_bytes(DBCol::State) { let (key, value) = kv.unwrap(); let shard_uid = ShardUId::try_from_slice(&key[0..8]).unwrap(); // Just after resharding, no State data must be keyed using children ShardUIds. - assert!(!children_shard_uids.contains(&shard_uid)); + assert!(!shard_uid_mapping.contains_key(&shard_uid)); if shard_uid != parent_shard_uid { continue; } - checked_any = true; let node_hash = CryptoHash::try_from_slice(&key[8..]).unwrap(); let (value, rc) = decode_value_with_rc(&value); // It is possible we have delayed receipts leftovers on disk, @@ -374,14 +397,37 @@ pub fn check_state_shard_uid_mapping_after_resharding( assert!(value.is_none()); continue; } - let parent_value = store.get(parent_shard_uid, &node_hash); - // Parent shard data must still be accessible using parent ShardUId. + let parent_value = trie_store.get(parent_shard_uid, &node_hash); + // Sanity check: parent shard data must still be accessible using Trie interface and parent ShardUId. assert_eq!(&parent_value.unwrap()[..], value.unwrap()); + // All parent shard data is available via both children shards. - for child_shard_uid in &children_shard_uids { - let child_value = store.get(*child_shard_uid, &node_hash); + for child_shard_uid in &tracked_mapped_children { + let child_value = trie_store.get(*child_shard_uid, &node_hash); assert_eq!(&child_value.unwrap()[..], value.unwrap()); } + checked_any_key = true; + } + assert!(checked_any_key); + + let shards_tracked_after_resharding = + get_tracked_shards_from_prev_block(client, resharding_block_hash); + // Sanity checks if the node tracks all shards (e.g. it is RPC node). + if !client.config.tracked_shards.is_empty() { + assert_eq!(tracked_mapped_children.len(), 2); + assert_eq!( + shards_tracked_after_resharding.len(), + epoch_config.shard_layout.num_shards() as usize + ); + } + // If any child shard was tracked after resharding, it means the node had to split the parent shard. + if children_shard_uids + .iter() + .any(|child_shard_uid| shards_tracked_after_resharding.contains(child_shard_uid)) + { + assert_eq!(shard_uid_mapping.len(), 2); + } else { + // Otherwise, no mapping was set and no shard State would be mapped. + assert!(shard_uid_mapping.is_empty()); } - assert!(checked_any); } From fa8e29e23c8101664359deac81ced59b21d34f98 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Adam=20Chuda=C5=9B?= Date: Thu, 9 Jan 2025 13:28:57 +0100 Subject: [PATCH 2/7] address review comments --- .../src/test_loop/tests/resharding_v3.rs | 45 ++++++++++++++++--- .../src/test_loop/utils/sharding.rs | 17 ++++--- .../src/test_loop/utils/trie_sanity.rs | 23 ++++++---- 3 files changed, 63 insertions(+), 22 deletions(-) diff --git a/integration-tests/src/test_loop/tests/resharding_v3.rs b/integration-tests/src/test_loop/tests/resharding_v3.rs index e389865b26d..c3b42a959a2 100644 --- a/integration-tests/src/test_loop/tests/resharding_v3.rs +++ b/integration-tests/src/test_loop/tests/resharding_v3.rs @@ -483,6 +483,15 @@ fn test_resharding_v3_base(params: TestReshardingParameters) { let client = clients[client_index]; let block_header = client.chain.get_block_header(&tip.last_block_hash).unwrap(); + let shard_layout = client.epoch_manager.get_shard_layout(&tip.epoch_id).unwrap(); + + println!( + "new block #{} shards: {:?} chunk mask {:?} block hash {}", + tip.height, + shard_layout.shard_ids().collect_vec(), + block_header.chunk_mask().to_vec(), + tip.last_block_hash, + ); // Check that all chunks are included. if params.all_chunks_expected && params.chunk_ranges_to_drop.is_empty() { @@ -491,16 +500,13 @@ fn test_resharding_v3_base(params: TestReshardingParameters) { trie_sanity_check.assert_state_sanity(&clients, expected_num_shards); - let epoch_id = - client.epoch_manager.get_epoch_id_from_prev_block(&tip.prev_block_hash).unwrap(); - let epoch_info = client.epoch_manager.get_epoch_info(&epoch_id).unwrap(); - let epoch_height = epoch_info.epoch_height(); + let epoch_height = + client.epoch_manager.get_epoch_height_from_prev_block(&tip.prev_block_hash).unwrap(); // Return false if we have not resharded yet. if epoch_height_after_resharding.get().is_none() { assert!(epoch_height < 5); - let epoch_config = client.epoch_manager.get_epoch_config(&epoch_id).unwrap(); - if epoch_config.shard_layout.num_shards() != expected_num_shards { + if shard_layout.num_shards() != expected_num_shards { return false; } // Just resharded. @@ -515,7 +521,6 @@ fn test_resharding_v3_base(params: TestReshardingParameters) { for client in clients { check_state_shard_uid_mapping_after_resharding( client, - &tip.prev_block_hash, &resharding_block_hash.get().unwrap(), parent_shard_uid, params.allow_negative_refcount, @@ -590,6 +595,32 @@ fn test_resharding_v3_state_cleanup() { ); } +#[test] +fn test_resharding_v3_do_not_track_children_after_resharding() { + // Track parent shard before resharding, but do not track any child shard after resharding. + let account_in_stable_shard: AccountId = "account0".parse().unwrap(); + let split_boundary_account: AccountId = NEW_BOUNDARY_ACCOUNT.parse().unwrap(); + let base_shard_layout = get_base_shard_layout(DEFAULT_SHARD_LAYOUT_VERSION); + let new_shard_layout = + ShardLayout::derive_shard_layout(&base_shard_layout, split_boundary_account.clone()); + let parent_shard_id = base_shard_layout.account_id_to_shard_id(&split_boundary_account); + let unrelated_shard_id = new_shard_layout.account_id_to_shard_id(&account_in_stable_shard); + + let tracked_shard_sequence = + vec![parent_shard_id, parent_shard_id, unrelated_shard_id, unrelated_shard_id]; + let num_clients = 8; + let tracked_shard_schedule = TrackedShardSchedule { + client_index: (num_clients - 1) as usize, + schedule: shard_sequence_to_schedule(tracked_shard_sequence), + }; + test_resharding_v3_base( + TestReshardingParametersBuilder::default() + .num_clients(num_clients) + .tracked_shard_schedule(Some(tracked_shard_schedule.clone())) + .build(), + ); +} + #[test] fn test_resharding_v3_track_all_shards() { test_resharding_v3_base( diff --git a/integration-tests/src/test_loop/utils/sharding.rs b/integration-tests/src/test_loop/utils/sharding.rs index 9db148659e2..0da5f7a23c9 100644 --- a/integration-tests/src/test_loop/utils/sharding.rs +++ b/integration-tests/src/test_loop/utils/sharding.rs @@ -123,14 +123,14 @@ pub fn get_tracked_shards_from_prev_block( client: &Client, prev_block_hash: &CryptoHash, ) -> Vec { - let account_id = - client.validator_signer.get().map(|validator| validator.validator_id().clone()); + let signer = client.validator_signer.get(); + let account_id = signer.as_ref().map(|s| s.validator_id()); + let shard_layout = + client.epoch_manager.get_shard_layout_from_prev_block(prev_block_hash).unwrap(); let mut tracked_shards = vec![]; - for shard_uid in - client.epoch_manager.get_shard_layout_from_prev_block(prev_block_hash).unwrap().shard_uids() - { + for shard_uid in shard_layout.shard_uids() { if client.shard_tracker.care_about_shard( - account_id.as_ref(), + account_id, prev_block_hash, shard_uid.shard_id(), true, @@ -140,3 +140,8 @@ pub fn get_tracked_shards_from_prev_block( } tracked_shards } + +pub fn get_tracked_shards(client: &Client, block_hash: &CryptoHash) -> Vec { + let block_header = client.chain.get_block_header(block_hash).unwrap(); + get_tracked_shards_from_prev_block(client, block_header.prev_hash()) +} diff --git a/integration-tests/src/test_loop/utils/trie_sanity.rs b/integration-tests/src/test_loop/utils/trie_sanity.rs index fdad582ebdf..1a9d76949ac 100644 --- a/integration-tests/src/test_loop/utils/trie_sanity.rs +++ b/integration-tests/src/test_loop/utils/trie_sanity.rs @@ -1,6 +1,7 @@ use super::sharding::shard_was_split; use crate::test_loop::utils::sharding::{ - client_tracking_shard, get_memtrie_for_shard, get_tracked_shards_from_prev_block, + client_tracking_shard, get_memtrie_for_shard, get_tracked_shards, + get_tracked_shards_from_prev_block, }; use borsh::BorshDeserialize; use itertools::Itertools; @@ -343,20 +344,19 @@ fn should_assert_state_sanity( /// Asserts that all parent shard State is accessible via parent and children shards. pub fn check_state_shard_uid_mapping_after_resharding( client: &Client, - prev_block_hash: &CryptoHash, resharding_block_hash: &CryptoHash, parent_shard_uid: ShardUId, allow_negative_refcount: bool, ) { let tip = client.chain.head().unwrap(); let epoch_id = tip.epoch_id; - let epoch_config = client.epoch_manager.get_epoch_config(&epoch_id).unwrap(); + let shard_layout = client.epoch_manager.get_shard_layout(&epoch_id).unwrap(); let children_shard_uids = - epoch_config.shard_layout.get_children_shards_uids(parent_shard_uid.shard_id()).unwrap(); + shard_layout.get_children_shards_uids(parent_shard_uid.shard_id()).unwrap(); assert_eq!(children_shard_uids.len(), 2); // Currently tracked shards. - let tracked_shards = get_tracked_shards_from_prev_block(client, prev_block_hash); + let tracked_shards = get_tracked_shards_from_prev_block(client, &tip.prev_block_hash); // ShardUId mappings (different than map to itself) that we have stored in DB. let mut shard_uid_mapping = HashMap::new(); // Currently tracked children shards that are mapped to an ancestor. @@ -410,15 +410,16 @@ pub fn check_state_shard_uid_mapping_after_resharding( } assert!(checked_any_key); + let shards_tracked_before_resharding = get_tracked_shards(client, resharding_block_hash); + let tracked_parent_before_resharding = + shards_tracked_before_resharding.contains(&parent_shard_uid); let shards_tracked_after_resharding = get_tracked_shards_from_prev_block(client, resharding_block_hash); + // Sanity checks if the node tracks all shards (e.g. it is RPC node). if !client.config.tracked_shards.is_empty() { assert_eq!(tracked_mapped_children.len(), 2); - assert_eq!( - shards_tracked_after_resharding.len(), - epoch_config.shard_layout.num_shards() as usize - ); + assert_eq!(shards_tracked_after_resharding.len(), shard_layout.num_shards() as usize,); } // If any child shard was tracked after resharding, it means the node had to split the parent shard. if children_shard_uids @@ -426,6 +427,10 @@ pub fn check_state_shard_uid_mapping_after_resharding( .any(|child_shard_uid| shards_tracked_after_resharding.contains(child_shard_uid)) { assert_eq!(shard_uid_mapping.len(), 2); + } else if tracked_parent_before_resharding { + // Parent was tracked before resharding, but no child was tracked after resharding. + // TODO(resharding) Consider not resharding in such case. If fixed, the assert below should change from 2 to 0. + assert_eq!(shard_uid_mapping.len(), 2); } else { // Otherwise, no mapping was set and no shard State would be mapped. assert!(shard_uid_mapping.is_empty()); From c617d072c440a62c93d14096e799a2c24601c17d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Adam=20Chuda=C5=9B?= Date: Thu, 9 Jan 2025 14:34:01 +0100 Subject: [PATCH 3/7] clippy --- integration-tests/src/test_loop/tests/resharding_v3.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/integration-tests/src/test_loop/tests/resharding_v3.rs b/integration-tests/src/test_loop/tests/resharding_v3.rs index c3b42a959a2..c6b39c8eff1 100644 --- a/integration-tests/src/test_loop/tests/resharding_v3.rs +++ b/integration-tests/src/test_loop/tests/resharding_v3.rs @@ -616,7 +616,7 @@ fn test_resharding_v3_do_not_track_children_after_resharding() { test_resharding_v3_base( TestReshardingParametersBuilder::default() .num_clients(num_clients) - .tracked_shard_schedule(Some(tracked_shard_schedule.clone())) + .tracked_shard_schedule(Some(tracked_shard_schedule)) .build(), ); } From 73d28fd7fc0e221ed3c963f85ffd2f8e727f33c2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Adam=20Chuda=C5=9B?= Date: Fri, 10 Jan 2025 11:17:48 +0100 Subject: [PATCH 4/7] add test, extend test duration --- .../src/test_loop/tests/resharding_v3.rs | 49 +++++++++++++++++-- 1 file changed, 44 insertions(+), 5 deletions(-) diff --git a/integration-tests/src/test_loop/tests/resharding_v3.rs b/integration-tests/src/test_loop/tests/resharding_v3.rs index c6b39c8eff1..f62cb493e8d 100644 --- a/integration-tests/src/test_loop/tests/resharding_v3.rs +++ b/integration-tests/src/test_loop/tests/resharding_v3.rs @@ -50,7 +50,7 @@ const INCREASED_EPOCH_LENGTH: u64 = 8; const GC_NUM_EPOCHS_TO_KEEP: u64 = 3; /// Maximum number of epochs under which the test should finish. -const TESTLOOP_NUM_EPOCHS_TO_WAIT: u64 = 8; +const TESTLOOP_NUM_EPOCHS_TO_WAIT: u64 = 10; /// Default shard layout version used in resharding tests. const DEFAULT_SHARD_LAYOUT_VERSION: u64 = 2; @@ -135,7 +135,7 @@ struct TestReshardingParameters { impl TestReshardingParametersBuilder { fn build(self) -> TestReshardingParameters { // Give enough time for GC to kick in after resharding. - assert!(GC_NUM_EPOCHS_TO_KEEP + 2 < TESTLOOP_NUM_EPOCHS_TO_WAIT); + assert!(GC_NUM_EPOCHS_TO_KEEP + 3 < TESTLOOP_NUM_EPOCHS_TO_WAIT); let epoch_length = self.epoch_length.unwrap_or(DEFAULT_EPOCH_LENGTH); let tracked_shard_schedule = self.tracked_shard_schedule.unwrap_or(None); @@ -486,11 +486,12 @@ fn test_resharding_v3_base(params: TestReshardingParameters) { let shard_layout = client.epoch_manager.get_shard_layout(&tip.epoch_id).unwrap(); println!( - "new block #{} shards: {:?} chunk mask {:?} block hash {}", + "new block #{} shards: {:?} chunk mask {:?} block hash {} epoch id {:?}", tip.height, shard_layout.shard_ids().collect_vec(), block_header.chunk_mask().to_vec(), tip.last_block_hash, + tip.epoch_id.0, ); // Check that all chunks are included. @@ -528,7 +529,7 @@ fn test_resharding_v3_base(params: TestReshardingParameters) { } // Return false if garbage collection window has not passed yet since resharding. - if epoch_height <= epoch_height_after_resharding.get().unwrap() + GC_NUM_EPOCHS_TO_KEEP { + if epoch_height <= TESTLOOP_NUM_EPOCHS_TO_WAIT { return false; } for loop_action in ¶ms.loop_actions { @@ -539,7 +540,7 @@ fn test_resharding_v3_base(params: TestReshardingParameters) { env.test_loop.run_until( success_condition, - // Give enough time to produce ~TESTLOOP_NUM_EPOCHS_TO_WAIT epochs. + // Give enough time to produce TESTLOOP_NUM_EPOCHS_TO_WAIT epochs. Duration::seconds((TESTLOOP_NUM_EPOCHS_TO_WAIT * params.epoch_length) as i64), ); let client = &env.test_loop.data.get(&client_handles[client_index]).client; @@ -621,6 +622,40 @@ fn test_resharding_v3_do_not_track_children_after_resharding() { ); } +#[test] +fn test_resharding_v3_stop_track_child_for_2_epochs() { + // Track parent shard before resharding, and a child shard after resharding. + // Then do not track the child for 2 epochs and start tracking it again. + let account_in_stable_shard: AccountId = "account0".parse().unwrap(); + let split_boundary_account: AccountId = NEW_BOUNDARY_ACCOUNT.parse().unwrap(); + let base_shard_layout = get_base_shard_layout(DEFAULT_SHARD_LAYOUT_VERSION); + let new_shard_layout = + ShardLayout::derive_shard_layout(&base_shard_layout, split_boundary_account.clone()); + let parent_shard_id = base_shard_layout.account_id_to_shard_id(&split_boundary_account); + let child_shard_id = new_shard_layout.account_id_to_shard_id(&split_boundary_account); + let unrelated_shard_id = new_shard_layout.account_id_to_shard_id(&account_in_stable_shard); + + let tracked_shard_sequence = vec![ + parent_shard_id, + parent_shard_id, + child_shard_id, + unrelated_shard_id, + unrelated_shard_id, + child_shard_id, + ]; + let num_clients = 8; + let tracked_shard_schedule = TrackedShardSchedule { + client_index: (num_clients - 1) as usize, + schedule: shard_sequence_to_schedule(tracked_shard_sequence), + }; + test_resharding_v3_base( + TestReshardingParametersBuilder::default() + .num_clients(num_clients) + .tracked_shard_schedule(Some(tracked_shard_schedule)) + .build(), + ); +} + #[test] fn test_resharding_v3_track_all_shards() { test_resharding_v3_base( @@ -712,6 +747,8 @@ fn test_resharding_v3_double_sign_resharding_block() { } #[test] +// TODO(resharding): fix nearcore and un-ignore this test +#[ignore] fn test_resharding_v3_shard_shuffling() { let params = TestReshardingParametersBuilder::default() .shuffle_shard_assignment_for_chunk_producers(true) @@ -752,6 +789,8 @@ fn test_resharding_v3_shard_shuffling_untrack_then_track() { } #[test] +// TODO(resharding): fix nearcore and un-ignore this test +#[ignore] fn test_resharding_v3_shard_shuffling_intense() { let chunk_ranges_to_drop = HashMap::from([(0, -1..2), (1, -3..0), (2, -3..3), (3, 0..1)]); let params = TestReshardingParametersBuilder::default() From c22af859bbf978247a5b2fe99593b5ab6d40d426 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Adam=20Chuda=C5=9B?= Date: Fri, 10 Jan 2025 15:35:58 +0100 Subject: [PATCH 5/7] ignore --- integration-tests/src/test_loop/tests/resharding_v3.rs | 2 ++ 1 file changed, 2 insertions(+) diff --git a/integration-tests/src/test_loop/tests/resharding_v3.rs b/integration-tests/src/test_loop/tests/resharding_v3.rs index f62cb493e8d..8106afb4b48 100644 --- a/integration-tests/src/test_loop/tests/resharding_v3.rs +++ b/integration-tests/src/test_loop/tests/resharding_v3.rs @@ -1020,6 +1020,8 @@ fn test_resharding_v3_slower_post_processing_tasks() { #[test] #[cfg_attr(not(feature = "test_features"), ignore)] +// TODO(resharding): fix nearcore and un-ignore this test +#[ignore] fn test_resharding_v3_shard_shuffling_slower_post_processing_tasks() { let params = TestReshardingParametersBuilder::default() .shuffle_shard_assignment_for_chunk_producers(true) From e68f8baff2bfd0dfa22657dca98de2bea9ba7e1e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Adam=20Chuda=C5=9B?= Date: Fri, 10 Jan 2025 15:49:43 +0100 Subject: [PATCH 6/7] fix --- integration-tests/src/test_loop/tests/resharding_v3.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/integration-tests/src/test_loop/tests/resharding_v3.rs b/integration-tests/src/test_loop/tests/resharding_v3.rs index 8106afb4b48..e70533da869 100644 --- a/integration-tests/src/test_loop/tests/resharding_v3.rs +++ b/integration-tests/src/test_loop/tests/resharding_v3.rs @@ -1019,9 +1019,9 @@ fn test_resharding_v3_slower_post_processing_tasks() { } #[test] -#[cfg_attr(not(feature = "test_features"), ignore)] -// TODO(resharding): fix nearcore and un-ignore this test +// TODO(resharding): fix nearcore and un-ignore this test, then uncomment the conditional ignore below #[ignore] +//#[cfg_attr(not(feature = "test_features"), ignore)] fn test_resharding_v3_shard_shuffling_slower_post_processing_tasks() { let params = TestReshardingParametersBuilder::default() .shuffle_shard_assignment_for_chunk_producers(true) From 5459d2504d5fc7c69a860ce3cb1f4cc1f2187b76 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Adam=20Chuda=C5=9B?= Date: Fri, 10 Jan 2025 16:38:47 +0100 Subject: [PATCH 7/7] revert ignores --- .../src/test_loop/tests/resharding_v3.rs | 12 ++++-------- 1 file changed, 4 insertions(+), 8 deletions(-) diff --git a/integration-tests/src/test_loop/tests/resharding_v3.rs b/integration-tests/src/test_loop/tests/resharding_v3.rs index e70533da869..c9413ba5841 100644 --- a/integration-tests/src/test_loop/tests/resharding_v3.rs +++ b/integration-tests/src/test_loop/tests/resharding_v3.rs @@ -50,7 +50,7 @@ const INCREASED_EPOCH_LENGTH: u64 = 8; const GC_NUM_EPOCHS_TO_KEEP: u64 = 3; /// Maximum number of epochs under which the test should finish. -const TESTLOOP_NUM_EPOCHS_TO_WAIT: u64 = 10; +const TESTLOOP_NUM_EPOCHS_TO_WAIT: u64 = 8; /// Default shard layout version used in resharding tests. const DEFAULT_SHARD_LAYOUT_VERSION: u64 = 2; @@ -623,6 +623,8 @@ fn test_resharding_v3_do_not_track_children_after_resharding() { } #[test] +// TODO(resharding): Increase `TESTLOOP_NUM_EPOCHS_TO_WAIT` to 10, fix nearcore, and un-ignore this test +#[ignore] fn test_resharding_v3_stop_track_child_for_2_epochs() { // Track parent shard before resharding, and a child shard after resharding. // Then do not track the child for 2 epochs and start tracking it again. @@ -747,8 +749,6 @@ fn test_resharding_v3_double_sign_resharding_block() { } #[test] -// TODO(resharding): fix nearcore and un-ignore this test -#[ignore] fn test_resharding_v3_shard_shuffling() { let params = TestReshardingParametersBuilder::default() .shuffle_shard_assignment_for_chunk_producers(true) @@ -789,8 +789,6 @@ fn test_resharding_v3_shard_shuffling_untrack_then_track() { } #[test] -// TODO(resharding): fix nearcore and un-ignore this test -#[ignore] fn test_resharding_v3_shard_shuffling_intense() { let chunk_ranges_to_drop = HashMap::from([(0, -1..2), (1, -3..0), (2, -3..3), (3, 0..1)]); let params = TestReshardingParametersBuilder::default() @@ -1019,9 +1017,7 @@ fn test_resharding_v3_slower_post_processing_tasks() { } #[test] -// TODO(resharding): fix nearcore and un-ignore this test, then uncomment the conditional ignore below -#[ignore] -//#[cfg_attr(not(feature = "test_features"), ignore)] +#[cfg_attr(not(feature = "test_features"), ignore)] fn test_resharding_v3_shard_shuffling_slower_post_processing_tasks() { let params = TestReshardingParametersBuilder::default() .shuffle_shard_assignment_for_chunk_producers(true)