Skip to content

Commit

Permalink
test: produce chunks in resharding v3 test (#12196)
Browse files Browse the repository at this point in the history
Improve the test by checking that all chunks are included, and the chain
passes through one epoch after resharding.

For that, I need to properly skip chunk validation if resharding
happened in the middle, as this is not a part of early MVP. One more
useful and necessary change: on iterating over block range of state
transition, I need to move from shard_id to prev_shard_id, so that we
query proper chunk in a block.

Checked offline that without validation skip, chunks for shards 1-2 are
not included.
  • Loading branch information
Longarithm authored Oct 11, 2024
1 parent 189222e commit 0febdb5
Show file tree
Hide file tree
Showing 4 changed files with 140 additions and 53 deletions.
5 changes: 4 additions & 1 deletion chain/chain/src/resharding/manager.rs
Original file line number Diff line number Diff line change
Expand Up @@ -48,7 +48,10 @@ impl ReshardingManager {
let block_hash = block.hash();
let block_height = block.header().height();
let prev_hash = block.header().prev_hash();
if !self.epoch_manager.will_shard_layout_change(prev_hash)? {
let next_block_has_new_shard_layout =
self.epoch_manager.will_shard_layout_change(prev_hash)?
&& self.epoch_manager.is_next_block_epoch_start(block.hash())?;
if !next_block_has_new_shard_layout {
return Ok(());
}

Expand Down
157 changes: 110 additions & 47 deletions chain/chain/src/stateless_validation/chunk_validation.rs
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,7 @@ use crate::types::{
RuntimeStorageConfig, StorageDataSource,
};
use crate::validate::validate_chunk_with_chunk_extra_and_receipts_root;
use crate::{Chain, ChainStoreAccess};
use crate::{Chain, ChainStore, ChainStoreAccess};
use lru::LruCache;
use near_async::futures::AsyncComputationSpawnerExt;
use near_chain_primitives::Error;
Expand All @@ -28,7 +28,7 @@ use near_primitives::stateless_validation::state_witness::{
};
use near_primitives::transaction::SignedTransaction;
use near_primitives::types::chunk_extra::ChunkExtra;
use near_primitives::types::{ProtocolVersion, ShardId};
use near_primitives::types::{ProtocolVersion, ShardId, ShardIndex};
use near_primitives::utils::compression::CompressedData;
use near_store::PartialStorage;
use std::collections::HashMap;
Expand All @@ -40,13 +40,20 @@ use std::time::Instant;
pub enum MainTransition {
Genesis { chunk_extra: ChunkExtra, block_hash: CryptoHash, shard_id: ShardId },
NewChunk(NewChunkData),
// TODO(#11881): this is temporary indicator that resharding happened in the
// state transition covered by state witness. Won't happen in production
// until resharding release.
// Instead, we can store a separate field `resharding_transition` in
// `ChunkStateWitness` and use it for proper validation of this case.
ShardLayoutChange,
}

impl MainTransition {
pub fn block_hash(&self) -> CryptoHash {
match self {
Self::Genesis { block_hash, .. } => *block_hash,
Self::NewChunk(data) => data.block.block_hash,
Self::ShardLayoutChange => panic!("block_hash called on ShardLayoutChange"),
}
}

Expand All @@ -56,6 +63,7 @@ impl MainTransition {
// It is ok to use the shard id from the header because it is a new
// chunk. An old chunk may have the shard id from the parent shard.
Self::NewChunk(data) => data.chunk_header.shard_id(),
Self::ShardLayoutChange => panic!("shard_id called on ShardLayoutChange"),
}
}
}
Expand Down Expand Up @@ -104,6 +112,74 @@ pub fn validate_prepared_transactions(
)
}

struct StateWitnessBlockRange {
/// Blocks from the last new chunk (exclusive) to the parent block
/// (inclusive).
blocks_after_last_chunk: Vec<Block>,
/// Blocks from the last last new chunk (exclusive) to the last new chunk
/// (inclusive).
blocks_after_last_last_chunk: Vec<Block>,
last_chunk_shard_id: ShardId,
last_chunk_shard_index: ShardIndex,
}

fn get_state_witness_block_range(
store: &ChainStore,
epoch_manager: &dyn EpochManagerAdapter,
state_witness: &ChunkStateWitness,
) -> Result<StateWitnessBlockRange, Error> {
let mut blocks_after_last_chunk = Vec::new();
let mut blocks_after_last_last_chunk = Vec::new();

let mut block_hash = *state_witness.chunk_header.prev_block_hash();
let mut prev_chunks_seen = 0;

// It is ok to use the shard id from the header because it is a new
// chunk. An old chunk may have the shard id from the parent shard.
let (mut current_shard_id, mut current_shard_index) =
epoch_manager.get_prev_shard_id(&block_hash, state_witness.chunk_header.shard_id())?;

let mut last_chunk_shard_id = current_shard_id;
let mut last_chunk_shard_index = current_shard_index;
loop {
let block = store.get_block(&block_hash)?;
let prev_hash = *block.header().prev_hash();
let chunks = block.chunks();
let Some(chunk) = chunks.get(current_shard_index) else {
return Err(Error::InvalidChunkStateWitness(format!(
"Shard {} does not exist in block {:?}",
current_shard_id, block_hash
)));
};
let is_new_chunk = chunk.is_new_chunk(block.header().height());
let is_genesis = block.header().is_genesis();
if is_new_chunk {
prev_chunks_seen += 1;
}
if prev_chunks_seen == 0 {
last_chunk_shard_id = current_shard_id;
last_chunk_shard_index = current_shard_index;
blocks_after_last_chunk.push(block);
} else if prev_chunks_seen == 1 {
blocks_after_last_last_chunk.push(block);
}
if prev_chunks_seen == 2 || is_genesis {
break;
}

block_hash = prev_hash;
(current_shard_id, current_shard_index) =
epoch_manager.get_prev_shard_id(&prev_hash, current_shard_id)?;
}

Ok(StateWitnessBlockRange {
blocks_after_last_chunk,
blocks_after_last_last_chunk,
last_chunk_shard_id,
last_chunk_shard_index,
})
}

/// Pre-validates the chunk's receipts and transactions against the chain.
/// We do this before handing off the computationally intensive part to a
/// validation thread.
Expand All @@ -114,55 +190,34 @@ pub fn pre_validate_chunk_state_witness(
runtime_adapter: &dyn RuntimeAdapter,
) -> Result<PreValidationOutput, Error> {
let store = chain.chain_store();
let epoch_id = state_witness.epoch_id;
let shard_layout = epoch_manager.get_shard_layout(&epoch_id)?;

// It is ok to use the shard id from the header because it is a new
// chunk. An old chunk may have the shard id from the parent shard.
let shard_id = state_witness.chunk_header.shard_id();
let shard_index = shard_layout.get_shard_index(shard_id);

// First, go back through the blockchain history to locate the last new chunk
// and last last new chunk for the shard.
let StateWitnessBlockRange {
blocks_after_last_chunk,
blocks_after_last_last_chunk,
last_chunk_shard_id,
last_chunk_shard_index,
} = get_state_witness_block_range(store, epoch_manager, state_witness)?;

// Blocks from the last new chunk (exclusive) to the parent block (inclusive).
let mut blocks_after_last_chunk = Vec::new();
// Blocks from the last last new chunk (exclusive) to the last new chunk (inclusive).
let mut blocks_after_last_last_chunk = Vec::new();

{
let mut block_hash = *state_witness.chunk_header.prev_block_hash();
let mut prev_chunks_seen = 0;
loop {
let block = store.get_block(&block_hash)?;
let chunks = block.chunks();
let Some(chunk) = chunks.get(shard_index) else {
return Err(Error::InvalidChunkStateWitness(format!(
"Shard {} does not exist in block {:?}",
shard_id, block_hash
)));
};
let is_new_chunk = chunk.is_new_chunk(block.header().height());
let is_genesis = block.header().is_genesis();
block_hash = *block.header().prev_hash();
if is_new_chunk {
prev_chunks_seen += 1;
}
if prev_chunks_seen == 0 {
blocks_after_last_chunk.push(block);
} else if prev_chunks_seen == 1 {
blocks_after_last_last_chunk.push(block);
}
if prev_chunks_seen == 2 || is_genesis {
break;
}
}
let last_chunk_block = blocks_after_last_last_chunk.first().ok_or_else(|| {
Error::Other("blocks_after_last_last_chunk is empty, this should be impossible!".into())
})?;
let last_chunk_shard_layout =
epoch_manager.get_shard_layout(&last_chunk_block.header().epoch_id())?;
let chunk_shard_layout = epoch_manager
.get_shard_layout_from_prev_block(state_witness.chunk_header.prev_block_hash())?;
if last_chunk_shard_layout != chunk_shard_layout {
return Ok(PreValidationOutput {
main_transition_params: MainTransition::ShardLayoutChange,
implicit_transition_params: Vec::new(),
});
}

let receipts_to_apply = validate_source_receipt_proofs(
&state_witness.source_receipt_proofs,
&blocks_after_last_last_chunk,
shard_id,
last_chunk_shard_id,
)?;
let applied_receipts_hash = hash(&borsh::to_vec(receipts_to_apply.as_slice()).unwrap());
if applied_receipts_hash != state_witness.applied_receipts_hash {
Expand All @@ -175,7 +230,8 @@ pub fn pre_validate_chunk_state_witness(
let last_chunk_block = blocks_after_last_last_chunk.first().ok_or_else(|| {
Error::Other("blocks_after_last_last_chunk is empty, this should be impossible!".into())
})?;
let last_new_chunk_tx_root = last_chunk_block.chunks().get(shard_index).unwrap().tx_root();
let last_new_chunk_tx_root =
last_chunk_block.chunks().get(last_chunk_shard_index).unwrap().tx_root();
if last_new_chunk_tx_root != tx_root_from_state_witness {
return Err(Error::InvalidChunkStateWitness(format!(
"Transaction root {:?} does not match expected transaction root {:?}",
Expand Down Expand Up @@ -226,19 +282,23 @@ pub fn pre_validate_chunk_state_witness(
let shard_layout = epoch_manager.get_shard_layout(&epoch_id)?;
let congestion_info = last_chunk_block
.block_congestion_info()
.get(&shard_id)
.get(&last_chunk_shard_id)
.map(|info| info.congestion_info);
let genesis_protocol_version = epoch_manager.get_epoch_protocol_version(&epoch_id)?;
let chunk_extra = chain.genesis_chunk_extra(
&shard_layout,
shard_id,
last_chunk_shard_id,
genesis_protocol_version,
congestion_info,
)?;
MainTransition::Genesis { chunk_extra, block_hash: *last_chunk_block.hash(), shard_id }
MainTransition::Genesis {
chunk_extra,
block_hash: *last_chunk_block.hash(),
shard_id: last_chunk_shard_id,
}
} else {
MainTransition::NewChunk(NewChunkData {
chunk_header: last_chunk_block.chunks().get(shard_index).unwrap().clone(),
chunk_header: last_chunk_block.chunks().get(last_chunk_shard_index).unwrap().clone(),
transactions: state_witness.transactions.clone(),
receipts: receipts_to_apply,
block: Chain::get_apply_chunk_block_context(
Expand Down Expand Up @@ -423,6 +483,9 @@ pub fn validate_chunk_state_witness(

(chunk_extra, outgoing_receipts)
}
(MainTransition::ShardLayoutChange, _) => {
panic!("shard layout change should not be validated")
}
(_, Some(result)) => (result.chunk_extra, result.outgoing_receipts),
};
if chunk_extra.state_root() != &state_witness.main_state_transition.post_state_root {
Expand Down
13 changes: 13 additions & 0 deletions chain/client/src/stateless_validation/chunk_validator/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -101,6 +101,19 @@ impl ChunkValidator {
let chunk_header = state_witness.chunk_header.clone();
let network_sender = self.network_sender.clone();
let epoch_manager = self.epoch_manager.clone();
if matches!(
pre_validation_result.main_transition_params,
chunk_validation::MainTransition::ShardLayoutChange
) {
send_chunk_endorsement_to_block_producers(
&chunk_header,
epoch_manager.as_ref(),
signer,
&network_sender,
);
return Ok(());
}

// If we have the chunk extra for the previous block, we can validate the chunk without state witness.
// This usually happens because we are a chunk producer and
// therefore have the chunk extra for the previous block saved on disk.
Expand Down
18 changes: 13 additions & 5 deletions integration-tests/src/test_loop/tests/resharding_v3.rs
Original file line number Diff line number Diff line change
Expand Up @@ -101,17 +101,25 @@ fn test_resharding_v3() {
let success_condition = |test_loop_data: &mut TestLoopData| -> bool {
let client = &test_loop_data.get(&client_handle).client;
let tip = client.chain.head().unwrap();

// Check that all chunks are included.
let block_header = client.chain.get_block_header(&tip.last_block_hash).unwrap();
assert!(block_header.chunk_mask().iter().all(|chunk_bit| *chunk_bit));

// Return true if we passed an epoch with increased number of shards.
let epoch_height =
client.epoch_manager.get_epoch_height_from_prev_block(&tip.prev_block_hash).unwrap();
assert!(epoch_height < 5);
let epoch_config = client.epoch_manager.get_epoch_config(&tip.epoch_id).unwrap();
return epoch_config.shard_layout.shard_ids().count() == expected_num_shards;
assert!(epoch_height < 6);
let prev_epoch_id =
client.epoch_manager.get_prev_epoch_id_from_prev_block(&tip.prev_block_hash).unwrap();
let epoch_config = client.epoch_manager.get_epoch_config(&prev_epoch_id).unwrap();
epoch_config.shard_layout.shard_ids().count() == expected_num_shards
};

test_loop.run_until(
success_condition,
// Give enough time to produce ~6 epochs.
Duration::seconds((6 * epoch_length) as i64),
// Give enough time to produce ~7 epochs.
Duration::seconds((7 * epoch_length) as i64),
);

TestLoopEnv { test_loop, datas: node_datas, tempdir }
Expand Down

0 comments on commit 0febdb5

Please sign in to comment.