Skip to content

Commit

Permalink
address review comments
Browse files Browse the repository at this point in the history
  • Loading branch information
staffik committed Jan 9, 2025
1 parent bac5355 commit fa8e29e
Show file tree
Hide file tree
Showing 3 changed files with 63 additions and 22 deletions.
45 changes: 38 additions & 7 deletions integration-tests/src/test_loop/tests/resharding_v3.rs
Original file line number Diff line number Diff line change
Expand Up @@ -483,6 +483,15 @@ fn test_resharding_v3_base(params: TestReshardingParameters) {

let client = clients[client_index];
let block_header = client.chain.get_block_header(&tip.last_block_hash).unwrap();
let shard_layout = client.epoch_manager.get_shard_layout(&tip.epoch_id).unwrap();

println!(
"new block #{} shards: {:?} chunk mask {:?} block hash {}",
tip.height,
shard_layout.shard_ids().collect_vec(),
block_header.chunk_mask().to_vec(),
tip.last_block_hash,
);

// Check that all chunks are included.
if params.all_chunks_expected && params.chunk_ranges_to_drop.is_empty() {
Expand All @@ -491,16 +500,13 @@ fn test_resharding_v3_base(params: TestReshardingParameters) {

trie_sanity_check.assert_state_sanity(&clients, expected_num_shards);

let epoch_id =
client.epoch_manager.get_epoch_id_from_prev_block(&tip.prev_block_hash).unwrap();
let epoch_info = client.epoch_manager.get_epoch_info(&epoch_id).unwrap();
let epoch_height = epoch_info.epoch_height();
let epoch_height =
client.epoch_manager.get_epoch_height_from_prev_block(&tip.prev_block_hash).unwrap();

// Return false if we have not resharded yet.
if epoch_height_after_resharding.get().is_none() {
assert!(epoch_height < 5);
let epoch_config = client.epoch_manager.get_epoch_config(&epoch_id).unwrap();
if epoch_config.shard_layout.num_shards() != expected_num_shards {
if shard_layout.num_shards() != expected_num_shards {
return false;
}
// Just resharded.
Expand All @@ -515,7 +521,6 @@ fn test_resharding_v3_base(params: TestReshardingParameters) {
for client in clients {
check_state_shard_uid_mapping_after_resharding(
client,
&tip.prev_block_hash,
&resharding_block_hash.get().unwrap(),
parent_shard_uid,
params.allow_negative_refcount,
Expand Down Expand Up @@ -590,6 +595,32 @@ fn test_resharding_v3_state_cleanup() {
);
}

#[test]
fn test_resharding_v3_do_not_track_children_after_resharding() {
// Track parent shard before resharding, but do not track any child shard after resharding.
let account_in_stable_shard: AccountId = "account0".parse().unwrap();
let split_boundary_account: AccountId = NEW_BOUNDARY_ACCOUNT.parse().unwrap();
let base_shard_layout = get_base_shard_layout(DEFAULT_SHARD_LAYOUT_VERSION);
let new_shard_layout =
ShardLayout::derive_shard_layout(&base_shard_layout, split_boundary_account.clone());
let parent_shard_id = base_shard_layout.account_id_to_shard_id(&split_boundary_account);
let unrelated_shard_id = new_shard_layout.account_id_to_shard_id(&account_in_stable_shard);

let tracked_shard_sequence =
vec![parent_shard_id, parent_shard_id, unrelated_shard_id, unrelated_shard_id];
let num_clients = 8;
let tracked_shard_schedule = TrackedShardSchedule {
client_index: (num_clients - 1) as usize,
schedule: shard_sequence_to_schedule(tracked_shard_sequence),
};
test_resharding_v3_base(
TestReshardingParametersBuilder::default()
.num_clients(num_clients)
.tracked_shard_schedule(Some(tracked_shard_schedule.clone()))
.build(),
);
}

#[test]
fn test_resharding_v3_track_all_shards() {
test_resharding_v3_base(
Expand Down
17 changes: 11 additions & 6 deletions integration-tests/src/test_loop/utils/sharding.rs
Original file line number Diff line number Diff line change
Expand Up @@ -123,14 +123,14 @@ pub fn get_tracked_shards_from_prev_block(
client: &Client,
prev_block_hash: &CryptoHash,
) -> Vec<ShardUId> {
let account_id =
client.validator_signer.get().map(|validator| validator.validator_id().clone());
let signer = client.validator_signer.get();
let account_id = signer.as_ref().map(|s| s.validator_id());
let shard_layout =
client.epoch_manager.get_shard_layout_from_prev_block(prev_block_hash).unwrap();
let mut tracked_shards = vec![];
for shard_uid in
client.epoch_manager.get_shard_layout_from_prev_block(prev_block_hash).unwrap().shard_uids()
{
for shard_uid in shard_layout.shard_uids() {
if client.shard_tracker.care_about_shard(
account_id.as_ref(),
account_id,
prev_block_hash,
shard_uid.shard_id(),
true,
Expand All @@ -140,3 +140,8 @@ pub fn get_tracked_shards_from_prev_block(
}
tracked_shards
}

pub fn get_tracked_shards(client: &Client, block_hash: &CryptoHash) -> Vec<ShardUId> {
let block_header = client.chain.get_block_header(block_hash).unwrap();
get_tracked_shards_from_prev_block(client, block_header.prev_hash())
}
23 changes: 14 additions & 9 deletions integration-tests/src/test_loop/utils/trie_sanity.rs
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
use super::sharding::shard_was_split;
use crate::test_loop::utils::sharding::{
client_tracking_shard, get_memtrie_for_shard, get_tracked_shards_from_prev_block,
client_tracking_shard, get_memtrie_for_shard, get_tracked_shards,
get_tracked_shards_from_prev_block,
};
use borsh::BorshDeserialize;
use itertools::Itertools;
Expand Down Expand Up @@ -343,20 +344,19 @@ fn should_assert_state_sanity(
/// Asserts that all parent shard State is accessible via parent and children shards.
pub fn check_state_shard_uid_mapping_after_resharding(
client: &Client,
prev_block_hash: &CryptoHash,
resharding_block_hash: &CryptoHash,
parent_shard_uid: ShardUId,
allow_negative_refcount: bool,
) {
let tip = client.chain.head().unwrap();
let epoch_id = tip.epoch_id;
let epoch_config = client.epoch_manager.get_epoch_config(&epoch_id).unwrap();
let shard_layout = client.epoch_manager.get_shard_layout(&epoch_id).unwrap();
let children_shard_uids =
epoch_config.shard_layout.get_children_shards_uids(parent_shard_uid.shard_id()).unwrap();
shard_layout.get_children_shards_uids(parent_shard_uid.shard_id()).unwrap();
assert_eq!(children_shard_uids.len(), 2);

// Currently tracked shards.
let tracked_shards = get_tracked_shards_from_prev_block(client, prev_block_hash);
let tracked_shards = get_tracked_shards_from_prev_block(client, &tip.prev_block_hash);
// ShardUId mappings (different than map to itself) that we have stored in DB.
let mut shard_uid_mapping = HashMap::new();
// Currently tracked children shards that are mapped to an ancestor.
Expand Down Expand Up @@ -410,22 +410,27 @@ pub fn check_state_shard_uid_mapping_after_resharding(
}
assert!(checked_any_key);

let shards_tracked_before_resharding = get_tracked_shards(client, resharding_block_hash);
let tracked_parent_before_resharding =
shards_tracked_before_resharding.contains(&parent_shard_uid);
let shards_tracked_after_resharding =
get_tracked_shards_from_prev_block(client, resharding_block_hash);

// Sanity checks if the node tracks all shards (e.g. it is RPC node).
if !client.config.tracked_shards.is_empty() {
assert_eq!(tracked_mapped_children.len(), 2);
assert_eq!(
shards_tracked_after_resharding.len(),
epoch_config.shard_layout.num_shards() as usize
);
assert_eq!(shards_tracked_after_resharding.len(), shard_layout.num_shards() as usize,);
}
// If any child shard was tracked after resharding, it means the node had to split the parent shard.
if children_shard_uids
.iter()
.any(|child_shard_uid| shards_tracked_after_resharding.contains(child_shard_uid))
{
assert_eq!(shard_uid_mapping.len(), 2);
} else if tracked_parent_before_resharding {
// Parent was tracked before resharding, but no child was tracked after resharding.
// TODO(resharding) Consider not resharding in such case. If fixed, the assert below should change from 2 to 0.
assert_eq!(shard_uid_mapping.len(), 2);
} else {
// Otherwise, no mapping was set and no shard State would be mapped.
assert!(shard_uid_mapping.is_empty());
Expand Down

0 comments on commit fa8e29e

Please sign in to comment.