diff --git a/Cargo.lock b/Cargo.lock index e2f38661a..e1889f0db 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -284,12 +284,13 @@ dependencies = [ [[package]] name = "cc" -version = "1.0.94" +version = "1.0.95" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "17f6e324229dc011159fcc089755d1e2e216a90d43a7dea6853ca740b84f35e7" +checksum = "d32a725bc159af97c3e629873bb9f88fb8cf8a4867175f76dc987815ea07c83b" dependencies = [ "jobserver", "libc", + "once_cell", ] [[package]] @@ -603,7 +604,6 @@ dependencies = [ [[package]] name = "everscale-types" version = "0.1.0-rc.6" -source = "git+https://github.com/broxus/everscale-types.git#40f2cd862ede93943a254351fe4eea313be83233" dependencies = [ "ahash", "base64 0.21.7", @@ -623,7 +623,6 @@ dependencies = [ [[package]] name = "everscale-types-proc" version = "0.1.4" -source = "git+https://github.com/broxus/everscale-types.git#40f2cd862ede93943a254351fe4eea313be83233" dependencies = [ "proc-macro2", "quote", @@ -657,9 +656,9 @@ dependencies = [ [[package]] name = "fiat-crypto" -version = "0.2.7" +version = "0.2.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c007b1ae3abe1cb6f85a16305acd418b7ca6343b953633fee2b76d8f108b830f" +checksum = "38793c55593b33412e3ae40c2c9781ffaa6f438f6f8c10f24e71846fbd7ae01e" [[package]] name = "futures-core" @@ -795,9 +794,9 @@ checksum = "49f1f14873335454500d59611f1cf4a4b0f786f9ac11f4312a78e4cf2566695b" [[package]] name = "jobserver" -version = "0.1.30" +version = "0.1.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "685a7d121ee3f65ae4fddd72b25a04bb36b6af81bc0828f7d5434c0fe60fa3a2" +checksum = "d2b099aaa34a9751c5bf0878add70444e1ed2dd73f347be99003d4577277de6e" dependencies = [ "libc", ] @@ -1517,9 +1516,9 @@ dependencies = [ [[package]] name = "rustix" -version = "0.38.32" +version = "0.38.34" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "65e04861e65f21776e67888bfbea442b3642beaa0138fdb1dd7a84a52dffdb89" +checksum = "70dc5ec042f7a43c4a73241207cecc9873a06d45debb38b329f8541d85c2730f" dependencies = [ "bitflags 2.5.0", "errno", @@ -1530,9 +1529,9 @@ dependencies = [ [[package]] name = "rustls" -version = "0.21.10" +version = "0.21.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f9d5a6813c0759e4609cd494e8e725babae6a2ca7b62a5536a13daaec6fcb7ba" +checksum = "7fecbfb7b1444f477b345853b1fce097a2c6fb637b2bfb87e6bc5db0f043fae4" dependencies = [ "log", "ring 0.17.8", @@ -1654,9 +1653,9 @@ checksum = "0fda2ff0d084019ba4d7c6f371c95d8fd75ce3524c3cb8fb653a3023f6323e64" [[package]] name = "signal-hook-registry" -version = "1.4.1" +version = "1.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d8229b473baa5980ac72ef434c4415e70c4b5e71b423043adb4ba059f89c99a1" +checksum = "a9e9e0b4211b72e7b8b6e85c807d36c212bdb33ea8587f7569562a84df5465b1" dependencies = [ "libc", ] @@ -1804,18 +1803,18 @@ dependencies = [ [[package]] name = "thiserror" -version = "1.0.58" +version = "1.0.59" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "03468839009160513471e86a034bb2c5c0e4baae3b43f79ffc55c4a5427b3297" +checksum = "f0126ad08bff79f29fc3ae6a55cc72352056dfff61e3ff8bb7129476d44b23aa" dependencies = [ "thiserror-impl", ] [[package]] name = "thiserror-impl" -version = "1.0.58" +version = "1.0.59" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c61f3ba182994efc43764a46c018c347bc492c79f024e705f46567b418f6d4f7" +checksum = "d1cd413b5d558b4c5bf3680e324a6fa5014e7b7c067a51e69dbdf47eb7148b66" dependencies = [ "proc-macro2", "quote", @@ -2133,10 +2132,14 @@ version = "0.0.1" dependencies = [ "anyhow", "async-trait", + "bytesize", "everscale-crypto", "everscale-types", "futures-util", + "log", "rand", + "sha2", + "tempfile", "tl-proto", "tokio", "tracing", diff --git a/Cargo.toml b/Cargo.toml index 1c8bcf22a..c1de24264 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -38,7 +38,7 @@ crc = "3.0.1" dashmap = "5.4" ed25519 = "2.0" everscale-crypto = { version = "0.2", features = ["tl-proto", "serde"] } -everscale-types = "0.1.0-rc.6" +everscale-types = { version = "0.1.0-rc.6", features = ["tycho"] } exponential-backoff = "1" fdlimit = "0.3.0" futures-util = "0.3" @@ -99,7 +99,8 @@ tycho-util = { path = "./util" } # NOTE: use crates.io dependency when it is released # https://github.com/sagebind/castaway/issues/18 castaway = { git = "https://github.com/sagebind/castaway.git" } -everscale-types = { git = "https://github.com/broxus/everscale-types.git" } +#everscale-types = { git = "https://github.com/broxus/everscale-types.git", branch = "tycho" } +everscale-types = { path = "../everscale-types" } [workspace.lints.rust] future_incompatible = "warn" diff --git a/block-util/Cargo.toml b/block-util/Cargo.toml index 3631c29a2..0343ad964 100644 --- a/block-util/Cargo.toml +++ b/block-util/Cargo.toml @@ -25,5 +25,8 @@ tycho-util = { workspace = true } [dev-dependencies] rand = { workspace = true } +[features] +test = [] + [lints] workspace = true diff --git a/block-util/src/block/block_stuff.rs b/block-util/src/block/block_stuff.rs index 6acbed2a9..e02c6c437 100644 --- a/block-util/src/block/block_stuff.rs +++ b/block-util/src/block/block_stuff.rs @@ -18,6 +18,39 @@ pub struct BlockStuff { } impl BlockStuff { + #[cfg(any(test, feature = "test"))] + pub fn new_empty(shard: ShardIdent, seqno: u32) -> Self { + use everscale_types::merkle::MerkleUpdate; + + let block_info = BlockInfo { + shard, + seqno, + ..Default::default() + }; + + let block = Block { + global_id: 0, + info: Lazy::new(&BlockInfo::default()).unwrap(), + value_flow: Lazy::new(&ValueFlow::default()).unwrap(), + state_update: Lazy::new(&MerkleUpdate::default()).unwrap(), + out_msg_queue_updates: None, + extra: Lazy::new(&BlockExtra::default()).unwrap(), + }; + + let cell = CellBuilder::build_from(&block).unwrap(); + let root_hash = *cell.repr_hash(); + let file_hash = sha2::Sha256::digest(Boc::encode(&cell)).into(); + + let block_id = BlockId { + shard: block_info.shard, + seqno: block_info.seqno, + root_hash, + file_hash, + }; + + Self::with_block(block_id, block) + } + pub fn with_block(id: BlockId, block: Block) -> Self { Self { inner: Arc::new(Inner { id, block }), diff --git a/block-util/src/config/mod.rs b/block-util/src/config/mod.rs new file mode 100644 index 000000000..36aec4d3b --- /dev/null +++ b/block-util/src/config/mod.rs @@ -0,0 +1,24 @@ +use anyhow::Result; + +use everscale_types::{dict::Dict, models::BlockchainConfig}; + +pub trait BlockchainConfigExt { + /// Check that config is valid. + fn validate_params( + &self, + relax_par0: bool, + mandatory_params: Option>, + ) -> Result; +} + +impl BlockchainConfigExt for BlockchainConfig { + fn validate_params( + &self, + _relax_par0: bool, + _mandatory_params: Option>, + ) -> Result { + //TODO: refer to https://github.com/everx-labs/ever-block/blob/master/src/config_params.rs#L452 + //STUB: currently should not be invoked in prototype + todo!() + } +} diff --git a/block-util/src/lib.rs b/block-util/src/lib.rs index 76db95c9f..3fbbd97cf 100644 --- a/block-util/src/lib.rs +++ b/block-util/src/lib.rs @@ -1,3 +1,4 @@ pub mod archive; pub mod block; +pub mod config; pub mod state; diff --git a/cli/src/tools/gen_zerostate.rs b/cli/src/tools/gen_zerostate.rs index 2e98af1d9..cb38490a9 100644 --- a/cli/src/tools/gen_zerostate.rs +++ b/cli/src/tools/gen_zerostate.rs @@ -343,21 +343,9 @@ fn make_shard_state(global_id: i32, shard_ident: ShardIdent, now: u32) -> ShardS ShardStateUnsplit { global_id, shard_ident, - seqno: 0, - vert_seqno: 0, gen_utime: now, - gen_lt: 0, min_ref_mc_seqno: u32::MAX, - out_msg_queue_info: Default::default(), - before_split: false, - accounts: Lazy::new(&Default::default()).unwrap(), - overload_history: 0, - underload_history: 0, - total_balance: CurrencyCollection::ZERO, - total_validator_fees: CurrencyCollection::ZERO, - libraries: Dict::new(), - master_ref: None, - custom: None, + ..Default::default() } } diff --git a/collator/Cargo.toml b/collator/Cargo.toml index 0a605596a..84d9fdb7b 100644 --- a/collator/Cargo.toml +++ b/collator/Cargo.toml @@ -12,9 +12,12 @@ license.workspace = true # crates.io deps anyhow = { workspace = true } async-trait = { workspace = true } +bytesize = { workspace = true } futures-util = { workspace = true } rand = { workspace = true } +sha2 = { workspace = true } tl-proto = { workspace = true } +tempfile = { workspace = true } tokio = { workspace = true, features = ["macros", "rt", "signal"] } tracing = { workspace = true } tracing-subscriber = { workspace = true } @@ -29,9 +32,19 @@ tycho-network = { workspace = true } tycho-storage = { workspace = true } tycho-util = { workspace = true } tycho-block-util = { workspace = true } +log = "0.4.21" [dev-dependencies] +tempfile = { workspace = true } +tokio = { version = "1", features = ["rt-multi-thread"] } tracing-test = { workspace = true } +tycho-block-util = { workspace = true, features = ["test"] } +tycho-core = { workspace = true, features = ["test"] } +tycho-storage = { workspace = true, features = ["test"] } +tycho-util = { workspace = true, features = ["test"] } + +[features] +test = [] [lints] workspace = true diff --git a/collator/src/collator/build_block.rs b/collator/src/collator/build_block.rs new file mode 100644 index 000000000..b180368f6 --- /dev/null +++ b/collator/src/collator/build_block.rs @@ -0,0 +1,452 @@ +use std::collections::HashMap; + +use anyhow::{bail, Result}; + +use everscale_types::models::{BlockExtra, BlockInfo, ShardStateUnsplit}; +use everscale_types::{ + cell::{Cell, CellBuilder, HashBytes, UsageTree}, + dict::Dict, + merkle::MerkleUpdate, + models::{ + Block, BlockId, BlockRef, BlockchainConfig, CreatorStats, GlobalCapability, GlobalVersion, + Lazy, LibDescr, McBlockExtra, McStateExtra, ShardHashes, WorkchainDescription, + }, +}; +use sha2::Digest; +use tycho_block_util::config::BlockchainConfigExt; +use tycho_block_util::state::ShardStateStuff; + +use crate::{ + mempool::MempoolAdapter, msg_queue::MessageQueueAdapter, state_node::StateNodeAdapter, + types::BlockCandidate, +}; + +use super::super::types::{AccountBlocksDict, BlockCollationData, PrevData, ShardAccountStuff}; + +use super::{execution_manager::ExecutionManager, CollatorProcessorStdImpl}; + +impl CollatorProcessorStdImpl +where + MQ: MessageQueueAdapter, + MP: MempoolAdapter, + ST: StateNodeAdapter, +{ + pub(super) async fn finalize_block( + &mut self, + collation_data: &mut BlockCollationData, + mut exec_manager: ExecutionManager, + ) -> Result<(BlockCandidate, ShardStateStuff)> { + let mc_data = &self.working_state().mc_data; + let prev_shard_data = &self.working_state().prev_shard_data; + + // update shard accounts tree and prepare accounts blocks + let shard_accounts = prev_shard_data.observable_accounts().clone(); + let account_blocks = AccountBlocksDict::default(); + let mut changed_accounts = HashMap::new(); + + let new_config_opt: Option = None; + + for (account_id, (sender, handle)) in exec_manager.changed_accounts.drain() { + // drop sender to stop the task that process messages and force it to return updated shard account + std::mem::drop(sender); + let shard_acc_stuff = handle.await??; + //TODO: read account + //TODO: get updated blockchain config if it stored in account + //TODO: if have transactions, build AccountBlock and add to account_blocks + changed_accounts.insert(account_id, shard_acc_stuff); + } + + //TODO: update new_config_opt from hard fork + + // calc value flow + //TODO: init collation_data.value_flow + let mut value_flow = collation_data.value_flow.clone(); + //TODO: init collation_data.in_msgs + value_flow.imported = collation_data.in_msgs.root_extra().value_imported.clone(); + //TODO: init collation_data.out_msgs + value_flow.exported = collation_data.out_msgs.root_extra().clone(); + value_flow.fees_collected = account_blocks.root_extra().clone(); + value_flow + .fees_collected + .try_add_assign_tokens(collation_data.in_msgs.root_extra().fees_collected)?; + value_flow + .fees_collected + .try_add_assign(&value_flow.fees_imported)?; + value_flow + .fees_collected + .try_add_assign(&value_flow.created); + value_flow.to_next_block = shard_accounts.root_extra().balance.clone(); + + // build master state extra or get a ref to last applied master block + //TODO: extract min_ref_mc_seqno from processed_upto info when we have many shards + let (out_msg_queue_info, _min_ref_mc_seqno) = + collation_data.out_msg_queue_stuff.get_out_msg_queue_info(); + //collation_data.update_ref_min_mc_seqno(min_ref_mc_seqno); + let (mc_state_extra, master_ref) = if self.shard_id.is_masterchain() { + let (extra, min_ref_mc_seqno) = + self.create_mc_state_extra(collation_data, new_config_opt)?; + collation_data.update_ref_min_mc_seqno(min_ref_mc_seqno); + (Some(extra), None) + } else { + (None, Some(mc_data.get_master_ref())) + }; + + // build block info + let mut new_block_info = BlockInfo { + version: 0, + ..Default::default() + }; + new_block_info.set_prev_ref(&prev_shard_data.get_blocks_ref()?); + + //TODO: should set when slpit/merge logic implemented + // info.after_merge = false; + // info.before_split = false; + // info.after_split = false; + // info.want_split = false; + // info.want_merge = false; + + if matches!(mc_state_extra, Some(ref extra) if extra.after_key_block) { + new_block_info.key_block = true; + } + new_block_info.shard = collation_data.block_id_short.shard; + new_block_info.seqno = collation_data.block_id_short.seqno; + new_block_info.gen_utime = collation_data.chain_time; + new_block_info.start_lt = collation_data.start_lt; + new_block_info.end_lt = collation_data.max_lt + 1; + new_block_info.gen_validator_list_hash_short = + self.collation_session.collators().short_hash; + new_block_info.gen_catchain_seqno = self.collation_session.seqno(); + new_block_info.min_ref_mc_seqno = collation_data.min_ref_mc_seqno()?; + new_block_info.prev_key_block_seqno = mc_data.prev_key_block_seqno(); + new_block_info.master_ref = master_ref.as_ref().map(Lazy::new).transpose()?; + let global_version = mc_data.config().get_global_version()?; + if global_version + .capabilities + .contains(GlobalCapability::CapReportVersion) + { + new_block_info.set_gen_software(Some(GlobalVersion { + version: self.config.supported_block_version, + capabilities: self.config.supported_capabilities.into(), + })); + } + + // build new state + let global_id = prev_shard_data.observable_states()[0].state().global_id; + let mut new_state = ShardStateUnsplit { + global_id, + shard_ident: new_block_info.shard, + seqno: new_block_info.seqno, + vert_seqno: 0, + gen_utime: new_block_info.gen_utime, + #[cfg(feature = "venom")] + gen_utime_ms: info.gen_utime_ms, + gen_lt: new_block_info.end_lt, + min_ref_mc_seqno: new_block_info.min_ref_mc_seqno, + out_msg_queue_info: Lazy::new(&out_msg_queue_info)?, + // TODO: Check if total fits into 4 refs + externals_processed_upto: collation_data.externals_processed_upto.clone(), + before_split: new_block_info.before_split, + accounts: Lazy::new(&shard_accounts)?, + overload_history: 0, + underload_history: 0, + total_balance: value_flow.to_next_block.clone(), + total_validator_fees: prev_shard_data.total_validator_fees().clone(), + libraries: Dict::new(), + master_ref, + custom: mc_state_extra.as_ref().map(Lazy::new).transpose()?, + #[cfg(feature = "venom")] + shard_block_refs: None, + }; + + new_state + .total_balance + .try_add_assign(&value_flow.fees_collected)?; + + new_state + .total_validator_fees + .checked_sub(&value_flow.recovered)?; + + if self.shard_id.is_masterchain() { + new_state.libraries = + self.update_public_libraries(exec_manager.libraries.clone(), &changed_accounts)?; + } + + //TODO: update smc on hard fork + + // calc merkle update + let new_state_root = CellBuilder::build_from(&new_state)?; + let state_update = Self::create_merkle_update( + &self.collator_descr, + prev_shard_data, + &new_state_root, + &self.working_state().usage_tree, + )?; + + // calc block extra + let mut new_block_extra = BlockExtra { + in_msg_description: Lazy::new(&collation_data.in_msgs)?, + out_msg_description: Lazy::new(&collation_data.out_msgs)?, + account_blocks: Lazy::new(&account_blocks)?, + rand_seed: collation_data.rand_seed, + ..Default::default() + }; + + //TODO: fill created_by + //extra.created_by = self.created_by.clone(); + if let Some(mc_state_extra) = mc_state_extra { + let new_mc_block_extra = McBlockExtra { + shards: mc_state_extra.shards.clone(), + fees: collation_data.shard_fees.clone(), + //TODO: Signatures for previous blocks + prev_block_signatures: Default::default(), + mint_msg: collation_data + .mint_msg + .as_ref() + .map(Lazy::new) + .transpose()?, + recover_create_msg: collation_data + .recover_create_msg + .as_ref() + .map(Lazy::new) + .transpose()?, + copyleft_msgs: Default::default(), + config: if mc_state_extra.after_key_block { + Some(mc_state_extra.config.clone()) + } else { + None + }, + }; + + new_block_extra.custom = Some(Lazy::new(&new_mc_block_extra)?); + } + + // construct block + let new_block = Block { + global_id, + info: Lazy::new(&new_block_info)?, + value_flow: Lazy::new(&value_flow)?, + state_update: Lazy::new(&state_update)?, + // do not use out msgs queue updates + out_msg_queue_updates: None, + extra: Lazy::new(&new_block_extra)?, + }; + let new_block_root = CellBuilder::build_from(&new_block)?; + let new_block_boc = everscale_types::boc::Boc::encode(&new_block_root); + let new_block_id = BlockId { + shard: collation_data.block_id_short.shard, + seqno: collation_data.block_id_short.seqno, + root_hash: *new_block_root.repr_hash(), + file_hash: sha2::Sha256::digest(&new_block_boc).into(), + }; + + //TODO: build collated data from collation_data.shard_top_block_descriptors + let collated_data = vec![]; + + let block_candidate = BlockCandidate::new( + new_block_id, + new_block, + prev_shard_data.blocks_ids().clone(), + collation_data.top_shard_blocks_ids.clone(), + new_block_boc, + collated_data, + HashBytes::ZERO, + new_block_info.gen_utime as u64, + ); + + let new_state_stuff = ShardStateStuff::from_state_and_root( + new_block_id, + new_state, + new_state_root, + &self.state_tracker, + )?; + + Ok((block_candidate, new_state_stuff)) + } + + fn create_mc_state_extra( + &self, + collation_data: &mut BlockCollationData, + new_config_opt: Option, + ) -> Result<(McStateExtra, u32)> { + let prev_shard_data = &self.working_state().prev_shard_data; + let prev_state = &prev_shard_data.observable_states()[0]; + + // 1. update config params and detect key block + let prev_state_extra = prev_state.state_extra()?; + let prev_config = &prev_state_extra.config; + let (config, is_key_block) = if let Some(new_config) = new_config_opt { + if !new_config.validate_params(true, None)? { + bail!( + "configuration smart contract {} contains an invalid configuration in its data", + new_config.address + ); + } + let is_key_block = &new_config != prev_config; + (new_config, is_key_block) + } else { + (prev_config.clone(), false) + }; + + let current_chain_time = collation_data.chain_time; + let prev_chain_time = prev_state.state().gen_utime; + + // 2. update shard_hashes and shard_fees + let cc_config = config.get_catchain_config()?; + let workchains = config.get_workchains()?; + // check if need to start new collation session for shards + let update_shard_cc = { + let lifetimes = current_chain_time / cc_config.shard_catchain_lifetime; + let prev_lifetimes = prev_chain_time / cc_config.shard_catchain_lifetime; + is_key_block || (lifetimes > prev_lifetimes) + }; + let min_ref_mc_seqno = + self.update_shard_config(collation_data, &workchains, update_shard_cc)?; + + // 3. save new shard_hashes + let shards_iter = collation_data + .shards()? + .iter() + .map(|(k, v)| (k, v.as_ref())); + let shards = ShardHashes::from_shards(shards_iter)?; + + // 4. check extension flags + // prev_state_extra.flags is checked in the McStateExtra::load_from + + // 5. update validator_info + //TODO: check `create_mc_state_extra()` for a reference implementation + //STUB: currently we do not use validator_info and just do nothing there + let validator_info = prev_state_extra.validator_info.clone(); + + // 6. update prev_blocks (add prev block's id to the dictionary) + let prev_is_key_block = collation_data.block_id_short.seqno == 1 // prev block is a keyblock if it is a zerostate + || prev_state_extra.after_key_block; + let mut prev_blocks = prev_state_extra.prev_blocks.clone(); + let prev_blk_ref = BlockRef { + end_lt: prev_state.state().gen_lt, + seqno: prev_state.block_id().seqno, + root_hash: prev_state.block_id().root_hash, + file_hash: prev_state.block_id().file_hash, + }; + //TODO: use AugDict::set when it be implemented + // prev_blocks.set( + // &prev_state.block_id().seqno, + // &KeyBlockRef { + // is_key_block, + // block_ref: prev_blk_ref.clone(), + // }, + // &KeyMaxLt { + // has_key_block: is_key_block, + // max_end_lt: prev_state.state().gen_lt, + // }, + // )?; + + // 7. update last_key_block + let last_key_block = if prev_state_extra.after_key_block { + Some(prev_blk_ref) + } else { + prev_state_extra.last_key_block.clone() + }; + + // 8. update global balance + let mut global_balance = prev_state_extra.global_balance.clone(); + global_balance.try_add_assign(&collation_data.value_flow.created)?; + global_balance.try_add_assign(&collation_data.value_flow.minted)?; + global_balance.try_add_assign(&collation_data.shard_fees.root_extra().create)?; + + // 9. update block creator stats + let block_create_stats = if prev_state_extra + .config + .get_global_version()? + .capabilities + .contains(GlobalCapability::CapCreateStatsEnabled) + { + let mut stats = prev_state_extra + .block_create_stats + .clone() + .unwrap_or_default(); + self.update_block_creator_stats(collation_data, &mut stats)?; + Some(stats) + } else { + None + }; + + // 10. pack new McStateExtra + let mc_state_extra = McStateExtra { + shards, + config, + validator_info, + prev_blocks, + after_key_block: is_key_block, + last_key_block, + block_create_stats, + global_balance, + copyleft_rewards: Default::default(), + }; + + Ok((mc_state_extra, min_ref_mc_seqno)) + } + + fn update_shard_config( + &self, + collation_data: &mut BlockCollationData, + wc_set: &Dict, + update_cc: bool, + ) -> Result { + //TODO: here should be the split/merge logic, refer to old node impl + + //STUB: just do nothing for now: no split/merge, no session rotation + let mut min_ref_mc_seqno = u32::max_value(); + for (_shard_id, shard_descr) in collation_data.shards_mut()? { + min_ref_mc_seqno = std::cmp::min(min_ref_mc_seqno, shard_descr.min_ref_mc_seqno); + } + + Ok(min_ref_mc_seqno) + } + + fn update_block_creator_stats( + &self, + collation_data: &BlockCollationData, + block_create_stats: &mut Dict, + ) -> Result<()> { + //TODO: implement if we really need it + //STUB: do not update anything + Ok(()) + } + + fn update_public_libraries( + &self, + mut libraries: Dict, + accounts: &HashMap, + ) -> Result> { + for (_, acc) in accounts.iter() { + acc.update_public_libraries(&mut libraries)?; + } + Ok(libraries) + } + + fn create_merkle_update( + collator_descr: &str, + prev_shard_data: &PrevData, + new_state_root: &Cell, + usage_tree: &UsageTree, + ) -> Result { + let timer = std::time::Instant::now(); + + let merkle_update_builder = MerkleUpdate::create( + prev_shard_data.pure_state_root().as_ref(), + new_state_root.as_ref(), + usage_tree, + ); + let state_update = merkle_update_builder.build()?; + + tracing::debug!( + "Collator ({}): merkle update created in {}ms", + collator_descr, + timer.elapsed().as_millis(), + ); + + // do not need to calc out_queue_updates + + Ok(state_update) + } +} diff --git a/collator/src/collator/collator.rs b/collator/src/collator/collator.rs index e85a8d8b6..a64e4c6c6 100644 --- a/collator/src/collator/collator.rs +++ b/collator/src/collator/collator.rs @@ -3,8 +3,8 @@ use std::sync::Arc; use anyhow::Result; use async_trait::async_trait; -use everscale_types::models::{BlockId, BlockIdShort, ShardIdent}; -use tycho_block_util::state::ShardStateStuff; +use everscale_types::models::{BlockId, BlockIdShort, BlockInfo, ShardIdent, ValueFlow}; +use tycho_block_util::state::{MinRefMcStateTracker, ShardStateStuff}; use crate::{ mempool::{MempoolAdapter, MempoolAnchor}, @@ -12,7 +12,7 @@ use crate::{ msg_queue::MessageQueueAdapter, state_node::StateNodeAdapter, tracing_targets, - types::{BlockCollationResult, CollationSessionId}, + types::{BlockCollationResult, CollationConfig, CollationSessionId, CollationSessionInfo}, utils::async_queued_dispatcher::{ AsyncQueuedDispatcher, STANDARD_DISPATCHER_QUEUE_BUFFER_SIZE, }, @@ -20,24 +20,7 @@ use crate::{ use super::collator_processor::CollatorProcessor; -// EVENTS EMITTER AMD LISTENER - -//TODO: remove emitter -#[async_trait] -pub(crate) trait CollatorEventEmitter { - /// When there are no internals and an empty anchor was received from mempool - /// collator skips such anchor and notify listener. Manager may schedule - /// a master block collation when the corresponding interval elapsed - async fn on_skipped_empty_anchor_event( - &self, - shard_id: ShardIdent, - anchor: Arc, - ) -> Result<()>; - /// When new shard or master block was collated - async fn on_block_candidate_event(&self, collation_result: BlockCollationResult) -> Result<()>; - /// When collator was stopped - async fn on_collator_stopped_event(&self, stop_key: CollationSessionId) -> Result<()>; -} +// EVENTS LISTENER #[async_trait] pub(crate) trait CollatorEventListener: Send + Sync { @@ -58,23 +41,34 @@ pub(crate) trait CollatorEventListener: Send + Sync { #[async_trait] pub(crate) trait Collator: Send + Sync + 'static { //TODO: use factory that takes CollationManager and creates Collator impl + /// Create collator, start its tasks queue, and equeue first initialization task async fn start( + config: Arc, + collation_session: Arc, listener: Arc, mq_adapter: Arc, mpool_adapter: Arc, state_node_adapter: Arc, shard_id: ShardIdent, prev_blocks_ids: Vec, - mc_state: Arc, + mc_state: ShardStateStuff, + state_tracker: MinRefMcStateTracker, ) -> Self; /// Enqueue collator stop task async fn equeue_stop(&self, stop_key: CollationSessionId) -> Result<()>; + /// Enqueue update of McData in working state and run attempt to collate shard block + async fn equeue_update_mc_data_and_resume_shard_collation( + &self, + mc_state: ShardStateStuff, + ) -> Result<()>; + /// Enqueue next attemt to collate block + async fn equeue_try_collate(&self) -> Result<()>; /// Enqueue new block collation async fn equeue_do_collate( &self, next_chain_time: u64, - top_shard_blocks_ids: Vec, + top_shard_blocks_info: Vec<(BlockId, BlockInfo, ValueFlow)>, ) -> Result<()>; } @@ -102,13 +96,16 @@ where ST: StateNodeAdapter, { async fn start( + config: Arc, + collation_session: Arc, listener: Arc, mq_adapter: Arc, mpool_adapter: Arc, state_node_adapter: Arc, shard_id: ShardIdent, prev_blocks_ids: Vec, - mc_state: Arc, + mc_state: ShardStateStuff, + state_tracker: MinRefMcStateTracker, ) -> Self { let max_prev_seqno = prev_blocks_ids.iter().map(|id| id.seqno).max().unwrap(); let next_block_id = BlockIdShort { @@ -126,12 +123,15 @@ where // create processor and run dispatcher for own tasks queue let processor = W::new( collator_descr.clone(), + config, + collation_session, dispatcher.clone(), listener, mq_adapter, mpool_adapter, state_node_adapter, shard_id, + state_tracker, ); AsyncQueuedDispatcher::run(processor, receiver); tracing::trace!(target: tracing_targets::COLLATOR, "Tasks queue dispatcher started"); @@ -166,16 +166,34 @@ where todo!() } + async fn equeue_update_mc_data_and_resume_shard_collation( + &self, + mc_state: ShardStateStuff, + ) -> Result<()> { + self.dispatcher + .enqueue_task(method_to_async_task_closure!( + update_mc_data_and_resume_collation, + mc_state + )) + .await + } + + async fn equeue_try_collate(&self) -> Result<()> { + self.dispatcher + .enqueue_task(method_to_async_task_closure!(try_collate_next_shard_block,)) + .await + } + async fn equeue_do_collate( &self, next_chain_time: u64, - top_shard_blocks_ids: Vec, + top_shard_blocks_info: Vec<(BlockId, BlockInfo, ValueFlow)>, ) -> Result<()> { self.dispatcher .enqueue_task(method_to_async_task_closure!( do_collate, next_chain_time, - top_shard_blocks_ids + top_shard_blocks_info )) .await } diff --git a/collator/src/collator/collator_processor.rs b/collator/src/collator/collator_processor.rs index a1725c456..85bcf5dc7 100644 --- a/collator/src/collator/collator_processor.rs +++ b/collator/src/collator/collator_processor.rs @@ -4,41 +4,126 @@ use std::sync::Arc; use anyhow::Result; use async_trait::async_trait; -use everscale_types::models::{BlockId, BlockIdShort, OwnedMessage, ShardIdent}; +use everscale_types::models::{ + BlockId, BlockIdShort, BlockInfo, OwnedMessage, ShardIdent, ValueFlow, +}; -use tycho_block_util::state::ShardStateStuff; -use tycho_core::internal_queue::types::ext_types_stubs::EnqueuedMessage; -use tycho_core::internal_queue::types::QueueDiff; +use tycho_block_util::state::{MinRefMcStateTracker, ShardStateStuff}; -use crate::mempool::{MempoolAnchor, MempoolAnchorId}; -use crate::msg_queue::{IterItem, QueueIterator}; use crate::tracing_targets; use crate::{ - mempool::MempoolAdapter, + mempool::{MempoolAdapter, MempoolAnchor, MempoolAnchorId}, method_to_async_task_closure, msg_queue::MessageQueueAdapter, state_node::StateNodeAdapter, - types::{BlockCollationResult, CollationSessionId}, + types::{CollationConfig, CollationSessionInfo}, utils::async_queued_dispatcher::AsyncQueuedDispatcher, }; -use super::types::{McData, PrevData}; use super::{ - do_collate::DoCollate, types::WorkingState, CollatorEventEmitter, CollatorEventListener, + types::{McData, PrevData, WorkingState}, + CollatorEventListener, }; +#[path = "./build_block.rs"] +mod build_block; +#[path = "./do_collate.rs"] +mod do_collate; +#[path = "./execution_manager.rs"] +mod execution_manager; + // COLLATOR PROCESSOR #[async_trait] -pub(super) trait CollatorProcessor: DoCollate +pub(super) trait CollatorProcessor: Sized + Send + 'static { + fn new( + collator_descr: Arc, + config: Arc, + collation_session: Arc, + dispatcher: Arc>, + listener: Arc, + mq_adapter: Arc, + mpool_adapter: Arc, + state_node_adapter: Arc, + shard_id: ShardIdent, + state_tracker: MinRefMcStateTracker, + ) -> Self; + + // Initialize collator working state then run collation + async fn init( + &mut self, + prev_blocks_ids: Vec, + mc_state: ShardStateStuff, + ) -> Result<()>; + + /// Update McData in working state + /// and equeue next attempt to collate block + async fn update_mc_data_and_resume_collation( + &mut self, + mc_state: ShardStateStuff, + ) -> Result<()>; + + /// Attempt to collate next shard block + /// 1. Run collation if there are internals or pending externals from previously imported anchors + /// 2. Otherwise request next anchor with externals + /// 3. If no internals or externals then notify manager about skipped empty anchor + async fn try_collate_next_shard_block(&mut self) -> Result<()>; + + /// Collate one block + async fn do_collate( + &mut self, + next_chain_time: u64, + top_shard_blocks_info: Vec<(BlockId, BlockInfo, ValueFlow)>, + ) -> Result<()>; +} + +#[async_trait] +impl CollatorProcessor for CollatorProcessorStdImpl where + MQ: MessageQueueAdapter, + MP: MempoolAdapter, ST: StateNodeAdapter, { + fn new( + collator_descr: Arc, + config: Arc, + collation_session: Arc, + dispatcher: Arc>, + listener: Arc, + mq_adapter: Arc, + mpool_adapter: Arc, + state_node_adapter: Arc, + shard_id: ShardIdent, + state_tracker: MinRefMcStateTracker, + ) -> Self { + Self { + collator_descr, + config, + collation_session, + dispatcher, + listener, + mq_adapter, + mpool_adapter, + state_node_adapter, + shard_id, + working_state: None, + + anchors_cache: BTreeMap::new(), + last_imported_anchor_id: None, + last_imported_anchor_chain_time: None, + + externals_read_upto: BTreeMap::new(), + has_pending_externals: false, + + state_tracker, + } + } + // Initialize collator working state then run collation async fn init( &mut self, prev_blocks_ids: Vec, - mc_state: Arc, + mc_state: ShardStateStuff, ) -> Result<()> { tracing::info!(target: tracing_targets::COLLATOR, "Collator init ({}): processing...", self.collator_descr()); @@ -47,8 +132,8 @@ where // load states tracing::info!(target: tracing_targets::COLLATOR, "Collator init ({}): loading initial shard state...", self.collator_descr()); let (mc_state, prev_states) = Self::load_init_states( - self.get_state_node_adapter(), - *self.shard_id(), + self.state_node_adapter.clone(), + self.shard_id, prev_blocks_ids.clone(), mc_state, ) @@ -60,16 +145,12 @@ where Self::build_and_validate_working_state(mc_state, prev_states, prev_blocks_ids.clone())?; self.set_working_state(working_state); - //TODO: fix work with internals, currently do not init mq iterator because do not need to integrate mq - // init message queue iterator - //self.init_mq_iterator().await?; - // master block collations will be called by the collation manager directly // enqueue collation attempt of next shard block - if !self.shard_id().is_masterchain() { - self.get_dispatcher() - .enqueue_task(method_to_async_task_closure!(try_collate,)) + if !self.shard_id.is_masterchain() { + self.dispatcher + .enqueue_task(method_to_async_task_closure!(try_collate_next_shard_block,)) .await?; tracing::info!(target: tracing_targets::COLLATOR, "Collator init ({}): collation attempt enqueued", self.collator_descr()); } @@ -79,70 +160,18 @@ where Ok(()) } - /// Load required initial states: - /// master state + list of previous shard states - async fn load_init_states( - state_node_adapter: Arc, - shard_id: ShardIdent, - prev_blocks_ids: Vec, - mc_state: Arc, - ) -> Result<(Arc, Vec>)> { - // if current shard is a masterchain then can take current master state - if shard_id.is_masterchain() { - return Ok((mc_state.clone(), vec![mc_state])); - } - - // otherwise await prev states by prev block ids - let mut prev_states = vec![]; - for prev_block_id in prev_blocks_ids { - // request state for prev block and wait for response - let state = state_node_adapter - .request_state(prev_block_id) - .await? - .try_recv() - .await?; - tracing::info!( - target: tracing_targets::COLLATOR, - "To init working state loaded prev shard state for prev_block_id {}", - prev_block_id.as_short_id(), - ); - prev_states.push(state); - } - - Ok((mc_state, prev_states)) - } - - /// Build working state structure: - /// * master state - /// * observable previous state - /// * usage tree that tracks data access to state cells - /// - /// Perform some validations on state - fn build_and_validate_working_state( - mc_state: Arc, - prev_states: Vec>, - prev_blocks_ids: Vec, - ) -> Result { - //TODO: make real implementation - - let mc_data = McData::new(mc_state)?; - let (prev_shard_data, usage_tree) = - PrevData::build(&mc_data, &prev_states, prev_blocks_ids)?; - - let working_state = WorkingState { - mc_data, - prev_shard_data, - usage_tree, - }; + async fn update_mc_data_and_resume_collation( + &mut self, + mc_state: ShardStateStuff, + ) -> Result<()> { + self.update_mc_data(mc_state)?; - Ok(working_state) + self.dispatcher + .enqueue_task(method_to_async_task_closure!(try_collate_next_shard_block,)) + .await } - /// Attempt to collate next shard block - /// 1. Run collation if there are internals or pending externals from previously imported anchors - /// 2. Otherwise request next anchor with externals - /// 3. If no internals or externals then notify manager about skipped empty anchor - async fn try_collate(&mut self) -> Result<()> { + async fn try_collate_next_shard_block(&mut self) -> Result<()> { tracing::trace!( target: tracing_targets::COLLATOR, "Collator ({}): checking if can collate next block", @@ -152,7 +181,7 @@ where //TODO: fix the work with internals // check internals - let has_internals = self.mq_iterator_has_next(); + let has_internals = self.has_internals()?; if has_internals { tracing::debug!( target: tracing_targets::COLLATOR, @@ -164,7 +193,7 @@ where // check pending externals let mut has_externals = true; if !has_internals { - has_externals = self.has_pending_externals(); + has_externals = self.has_pending_externals; if has_externals { tracing::debug!( target: tracing_targets::COLLATOR, @@ -200,7 +229,7 @@ where // queue collation if has internals or externals if has_internals || has_externals { let next_chain_time = self.get_last_imported_anchor_chain_time(); - self.get_dispatcher() + self.dispatcher .enqueue_task(method_to_async_task_closure!( do_collate, next_chain_time, @@ -215,89 +244,45 @@ where } else { // notify manager when next anchor was imported but id does not contain externals if let Some(anchor) = next_anchor { + // this will initiate master block collation or next shard block collation attempt tracing::debug!( target: tracing_targets::COLLATOR, "Collator ({}): just imported anchor has no externals, will notify collation manager", self.collator_descr(), ); - self.on_skipped_empty_anchor_event(*self.shard_id(), anchor) + self.listener + .on_skipped_empty_anchor(self.shard_id, anchor) + .await?; + } else { + // otherwise enqueue next shard block collation attempt right now + self.dispatcher + .enqueue_task(method_to_async_task_closure!(try_collate_next_shard_block,)) .await?; } } - // finally enqueue next collation attempt - // which will be processed right after current one - // or after previously scheduled collation - self.get_dispatcher() - .enqueue_task(method_to_async_task_closure!(try_collate,)) - .await + Ok(()) } -} -#[async_trait] -impl CollatorProcessor for CollatorProcessorStdImpl -where - MQ: MessageQueueAdapter, - QI: QueueIterator + Send + Sync + 'static, - MP: MempoolAdapter, - ST: StateNodeAdapter, -{ + async fn do_collate( + &mut self, + next_chain_time: u64, + top_shard_blocks_info: Vec<(BlockId, BlockInfo, ValueFlow)>, + ) -> Result<()> { + self.do_collate_impl(next_chain_time, top_shard_blocks_info) + .await + } } -/// Trait declares functions that need specific implementation. -/// For test purposes you can re-implement only this trait. -#[async_trait] -pub(super) trait CollatorProcessorSpecific: Sized { - fn new( - collator_descr: Arc, - dispatcher: Arc>, - listener: Arc, - mq_adapter: Arc, - mpool_adapter: Arc, - state_node_adapter: Arc, - shard_id: ShardIdent, - ) -> Self; - - fn collator_descr(&self) -> &str; - - fn shard_id(&self) -> &ShardIdent; - - fn get_dispatcher(&self) -> Arc>; - - fn get_mq_adapter(&self) -> Arc; - - fn get_state_node_adapter(&self) -> Arc; - - fn working_state(&self) -> &WorkingState; - fn set_working_state(&mut self, working_state: WorkingState); - fn update_working_state(&mut self, new_prev_block_id: BlockId) -> Result<()>; - - async fn init_mq_iterator(&mut self) -> Result<()>; - - fn mq_iterator_has_next(&self) -> bool; - fn mq_iterator_next(&mut self) -> Option; - fn mq_iterator_commit(&mut self); - fn mq_iterator_get_diff(&self, block_id_short: BlockIdShort) -> QueueDiff; - fn mq_iterator_add_message(&mut self, message: Arc) -> Result<()>; - - async fn import_next_anchor(&mut self) -> Result>; - fn last_imported_anchor_id(&self) -> Option<&MempoolAnchorId>; - fn get_last_imported_anchor_chain_time(&self) -> u64; - - /// TRUE - when exist imported anchors in cache and not all their externals were processed - fn has_pending_externals(&self) -> bool; - fn set_has_pending_externals(&mut self, value: bool); +pub(crate) struct CollatorProcessorStdImpl { + collator_descr: Arc, - /// (TODO) Should consider parallel processing for different accounts - fn get_next_external(&mut self) -> Option>; -} + config: Arc, + collation_session: Arc, -pub(crate) struct CollatorProcessorStdImpl { - collator_descr: Arc, dispatcher: Arc>, listener: Arc, mq_adapter: Arc, - mq_iterator: Option, mpool_adapter: Arc, state_node_adapter: Arc, shard_id: ShardIdent, @@ -319,66 +304,21 @@ pub(crate) struct CollatorProcessorStdImpl { /// /// Updated in the `get_next_external()` method has_pending_externals: bool, + + /// State tracker for creating ShardStateStuff locally + state_tracker: MinRefMcStateTracker, } -#[async_trait] -impl CollatorProcessorSpecific - for CollatorProcessorStdImpl +impl CollatorProcessorStdImpl where MQ: MessageQueueAdapter, - QI: QueueIterator + Send, MP: MempoolAdapter, ST: StateNodeAdapter, { - fn new( - collator_descr: Arc, - dispatcher: Arc>, - listener: Arc, - mq_adapter: Arc, - mpool_adapter: Arc, - state_node_adapter: Arc, - shard_id: ShardIdent, - ) -> Self { - Self { - collator_descr, - dispatcher, - listener, - mq_adapter, - mq_iterator: None, - mpool_adapter, - state_node_adapter, - shard_id, - working_state: None, - - anchors_cache: BTreeMap::new(), - last_imported_anchor_id: None, - last_imported_anchor_chain_time: None, - - externals_read_upto: BTreeMap::new(), - has_pending_externals: false, - } - } - fn collator_descr(&self) -> &str { &self.collator_descr } - fn shard_id(&self) -> &ShardIdent { - &self.shard_id - } - - fn get_dispatcher(&self) -> Arc> { - self.dispatcher.clone() - } - - fn get_mq_adapter(&self) -> Arc { - self.mq_adapter.clone() - } - - fn get_state_node_adapter(&self) -> Arc { - self.state_node_adapter.clone() - } - fn working_state(&self) -> &WorkingState { self.working_state .as_ref() @@ -388,25 +328,33 @@ where self.working_state = Some(working_state); } - ///(TODO) Update working state from new state after block collation - /// - ///STUB: currently have stub signature and implementation - fn update_working_state(&mut self, new_prev_block_id: BlockId) -> Result<()> { - let new_next_block_id = BlockIdShort { - shard: new_prev_block_id.shard, - seqno: new_prev_block_id.seqno + 1, + /// Update working state from new block and state after block collation + fn update_working_state(&mut self, new_state_stuff: ShardStateStuff) -> Result<()> { + let new_next_block_id_short = BlockIdShort { + shard: new_state_stuff.block_id().shard, + seqno: new_state_stuff.block_id().seqno + 1, }; - let new_collator_descr = format!("next block: {}", new_next_block_id); + let new_collator_descr = format!("next block: {}", new_next_block_id_short); - self.working_state + let working_state_mut = self + .working_state .as_mut() - .expect("should `init` collator before calling `update_working_state`") - .prev_shard_data - .update_state(vec![new_prev_block_id])?; + .expect("should `init` collator before calling `update_working_state`"); + + if new_state_stuff.block_id().shard.is_masterchain() { + let new_mc_data = McData::build(new_state_stuff.clone())?; + working_state_mut.mc_data = new_mc_data; + } + + let prev_states = vec![new_state_stuff]; + Self::check_prev_states_and_master(&working_state_mut.mc_data, &prev_states)?; + let (new_prev_shard_data, usage_tree) = PrevData::build(prev_states)?; + working_state_mut.prev_shard_data = new_prev_shard_data; + working_state_mut.usage_tree = usage_tree; tracing::debug!( target: tracing_targets::COLLATOR, - "Collator ({}): STUB: working state updated from just collated block...", + "Collator ({}): working state updated from just collated block", self.collator_descr(), ); @@ -415,28 +363,93 @@ where Ok(()) } - async fn init_mq_iterator(&mut self) -> Result<()> { - let mq_iterator = self.mq_adapter.get_iterator(&self.shard_id).await?; - self.mq_iterator = Some(mq_iterator); + /// Update McData in working state + fn update_mc_data(&mut self, mc_state: ShardStateStuff) -> Result<()> { + let mc_state_block_id = mc_state.block_id().as_short_id(); + + let new_mc_data = McData::build(mc_state)?; + + let working_state_mut = self + .working_state + .as_mut() + .expect("should `init` collator before calling `update_mc_data`"); + + working_state_mut.mc_data = new_mc_data; + + tracing::debug!( + target: tracing_targets::COLLATOR, + "Collator ({}): McData updated in working state from new master state on {}", + self.collator_descr(), + mc_state_block_id, + ); + Ok(()) } - fn mq_iterator_has_next(&self) -> bool { - //TODO: make real implementation - //STUB: always return false emulating that all internals were processed in prev block - false - } - fn mq_iterator_next(&mut self) -> Option { - todo!() - } - fn mq_iterator_commit(&mut self) { - todo!() + /// Load required initial states: + /// master state + list of previous shard states + async fn load_init_states( + state_node_adapter: Arc, + shard_id: ShardIdent, + prev_blocks_ids: Vec, + mc_state: ShardStateStuff, + ) -> Result<(ShardStateStuff, Vec)> { + // if current shard is a masterchain then can take current master state + if shard_id.is_masterchain() { + return Ok((mc_state.clone(), vec![mc_state])); + } + + // otherwise await prev states by prev block ids + let mut prev_states = vec![]; + for prev_block_id in prev_blocks_ids { + // request state for prev block and wait for response + let state = state_node_adapter.load_state(&prev_block_id).await?; + tracing::info!( + target: tracing_targets::COLLATOR, + "To init working state loaded prev shard state for prev_block_id {}", + prev_block_id.as_short_id(), + ); + prev_states.push(state); + } + + Ok((mc_state, prev_states)) } - fn mq_iterator_get_diff(&self, _block_id_short: BlockIdShort) -> QueueDiff { - todo!() + + /// Build working state structure: + /// * master state + /// * observable previous state + /// * usage tree that tracks data access to state cells + /// + /// Perform some validations on state + fn build_and_validate_working_state( + mc_state: ShardStateStuff, + prev_states: Vec, + prev_blocks_ids: Vec, + ) -> Result { + //TODO: make real implementation + + let mc_data = McData::build(mc_state)?; + Self::check_prev_states_and_master(&mc_data, &prev_states)?; + let (prev_shard_data, usage_tree) = PrevData::build(prev_states)?; + + let working_state = WorkingState { + mc_data, + prev_shard_data, + usage_tree, + }; + + Ok(working_state) } - fn mq_iterator_add_message(&mut self, _message: Arc) -> Result<()> { - todo!() + + /// (TODO) Perform some checks on master state and prev states + fn check_prev_states_and_master( + _mc_data: &McData, + _prev_states: &[ShardStateStuff], + ) -> Result<()> { + //TODO: make real implementation + // refer to the old node impl: + // Collator::unpack_last_state() + Ok(()) } /// 1. (TODO) Get last imported anchor from cache or last processed from `externals_processed_upto` @@ -470,20 +483,11 @@ where Ok(next_anchor) } - fn last_imported_anchor_id(&self) -> Option<&MempoolAnchorId> { - self.last_imported_anchor_id.as_ref() - } fn get_last_imported_anchor_chain_time(&self) -> u64 { self.last_imported_anchor_chain_time.unwrap() } - fn has_pending_externals(&self) -> bool { - self.has_pending_externals - } - fn set_has_pending_externals(&mut self, value: bool) { - self.has_pending_externals = value; - } - + /// (TODO) Should consider parallel processing for different accounts fn get_next_external(&mut self) -> Option> { //TODO: make real implementation @@ -492,29 +496,11 @@ where None } -} -#[async_trait] -impl CollatorEventEmitter for CollatorProcessorStdImpl -where - MQ: MessageQueueAdapter, - QI: QueueIterator + Send + Sync + 'static, - MP: MempoolAdapter, - ST: StateNodeAdapter, -{ - async fn on_skipped_empty_anchor_event( - &self, - shard_id: ShardIdent, - anchor: Arc, - ) -> Result<()> { - self.listener - .on_skipped_empty_anchor(shard_id, anchor) - .await - } - async fn on_block_candidate_event(&self, collation_result: BlockCollationResult) -> Result<()> { - self.listener.on_block_candidate(collation_result).await - } - async fn on_collator_stopped_event(&self, stop_key: CollationSessionId) -> Result<()> { - self.listener.on_collator_stopped(stop_key).await + /// (TODO) TRUE - when internal messages queue has internals + fn has_internals(&self) -> Result { + //TODO: make real implementation + //STUB: always return false emulating that all internals were processed in prev block + Ok(false) } } diff --git a/collator/src/collator/do_collate.rs b/collator/src/collator/do_collate.rs index e555b4407..47d1638d2 100644 --- a/collator/src/collator/do_collate.rs +++ b/collator/src/collator/do_collate.rs @@ -1,56 +1,54 @@ -use anyhow::Result; -use async_trait::async_trait; +use std::collections::HashMap; + +use anyhow::{anyhow, bail, Result}; +use sha2::Digest; + use everscale_types::{ - cell::CellBuilder, - models::{BlockId, BlockIdShort}, + cell::HashBytes, + models::{ + BlockId, BlockIdShort, BlockInfo, ConfigParam7, CurrencyCollection, ShardDescription, + ValueFlow, + }, + num::Tokens, }; use crate::{ + collator::{ + collator_processor::execution_manager::ExecutionManager, + types::{BlockCollationData, McData, OutMsgQueueInfoStuff, PrevData, ShardDescriptionExt}, + }, mempool::MempoolAdapter, - msg_queue::{MessageQueueAdapter, QueueIterator}, + msg_queue::MessageQueueAdapter, state_node::StateNodeAdapter, tracing_targets, - types::{BlockCandidate, BlockCollationResult}, + types::BlockCollationResult, }; -use super::{ - collator_processor::{CollatorProcessorSpecific, CollatorProcessorStdImpl}, - CollatorEventEmitter, -}; +use super::CollatorProcessorStdImpl; -#[async_trait] -pub(super) trait DoCollate: - CollatorProcessorSpecific + CollatorEventEmitter + Sized + Send + Sync + 'static -{ - async fn do_collate( - &mut self, - next_chain_time: u64, - top_shard_blocks_ids: Vec, - ) -> Result<()>; -} - -#[async_trait] -impl DoCollate for CollatorProcessorStdImpl +impl CollatorProcessorStdImpl where MQ: MessageQueueAdapter, - QI: QueueIterator + Send + Sync + 'static, MP: MempoolAdapter, ST: StateNodeAdapter, { - async fn do_collate( + pub(super) async fn do_collate_impl( &mut self, - mut next_chain_time: u64, - top_shard_blocks_ids: Vec, + next_chain_time: u64, + top_shard_blocks_info: Vec<(BlockId, BlockInfo, ValueFlow)>, ) -> Result<()> { //TODO: make real implementation - let _tracing_top_shard_blocks_descr = if top_shard_blocks_ids.is_empty() { + let mc_data = &self.working_state().mc_data; + let prev_shard_data = &self.working_state().prev_shard_data; + + let _tracing_top_shard_blocks_descr = if top_shard_blocks_info.is_empty() { "".to_string() } else { format!( ", top_shard_blocks: {:?}", - top_shard_blocks_ids + top_shard_blocks_info .iter() - .map(|id| id.as_short_id().to_string()) + .map(|(id, _, _)| id.as_short_id().to_string()) .collect::>() .as_slice(), ) @@ -63,12 +61,100 @@ where next_chain_time, ); + // generate seed from the chain_time from the anchor + let hash_bytes = sha2::Sha256::digest(next_chain_time.to_be_bytes()); + let rand_seed = HashBytes::from_slice(hash_bytes.as_slice()); + tracing::trace!( + target: tracing_targets::COLLATOR, + "Collator ({}{}): next chain time: {}: rand_seed from chain time: {}", + self.collator_descr(), + _tracing_top_shard_blocks_descr, + next_chain_time, + rand_seed, + ); + + // prepare block collation data + //STUB: consider split/merge in future for taking prev_block_id + let prev_block_id = prev_shard_data.blocks_ids()[0]; + let mut collation_data = BlockCollationData::default(); + collation_data.block_id_short = BlockIdShort { + shard: prev_block_id.shard, + seqno: prev_block_id.seqno + 1, + }; + collation_data.rand_seed = rand_seed; + + // init ShardHashes descriptions for master + if collation_data.block_id_short.shard.is_masterchain() { + let mut shards = HashMap::new(); + for (top_block_id, top_block_info, top_block_value_flow) in top_shard_blocks_info { + let mut shard_descr = ShardDescription::from_block_info( + top_block_id, + &top_block_info, + &top_block_value_flow, + ); + shard_descr.reg_mc_seqno = collation_data.block_id_short.seqno; + + collation_data.update_shards_max_end_lt(shard_descr.end_lt); + + shards.insert(top_block_id.shard, Box::new(shard_descr)); + collation_data.top_shard_blocks_ids.push(top_block_id); + } + collation_data.set_shards(shards); + + //TODO: setup ShardFees and update `collation_data.value_flow.fees_*` + } + + collation_data.update_ref_min_mc_seqno(mc_data.mc_state_stuff().state().seqno); + collation_data.chain_time = next_chain_time as u32; + collation_data.start_lt = Self::calc_start_lt( + self.collator_descr(), + mc_data, + prev_shard_data, + &collation_data, + )?; + collation_data.max_lt = collation_data.start_lt + 1; + + //TODO: should consider split/merge in future + let out_msg_queue_info = prev_shard_data.observable_states()[0] + .state() + .load_out_msg_queue_info() + .unwrap_or_default(); //TODO: should not fail there + collation_data.out_msg_queue_stuff = OutMsgQueueInfoStuff { + proc_info: out_msg_queue_info.proc_info, + }; + collation_data.externals_processed_upto = prev_shard_data.observable_states()[0] + .state() + .externals_processed_upto + .clone(); + + // compute created / minted / recovered / from_prev_block + self.update_value_flow(mc_data, prev_shard_data, &mut collation_data)?; + + // init execution manager + let exec_manager = ExecutionManager::new( + collation_data.chain_time, + collation_data.start_lt, + collation_data.max_lt, + collation_data.rand_seed, + mc_data.mc_state_stuff().state().libraries.clone(), + mc_data.config().clone(), + self.config.max_collate_threads, + ); + //STUB: just remove fisrt anchor from cache let _ext_msg = self.get_next_external(); - self.set_has_pending_externals(false); + self.has_pending_externals = false; + //STUB: do not execute transactions and produce empty block + + // build block candidate and new state + let (candidate, new_state_stuff) = self + .finalize_block(&mut collation_data, exec_manager) + .await?; + + /* //STUB: just send dummy block to collation manager - let prev_blocks_ids = self.working_state().prev_shard_data.blocks_ids().clone(); + let prev_blocks_ids = prev_shard_data.blocks_ids().clone(); let prev_block_id = prev_blocks_ids[0]; let collated_block_id_short = BlockIdShort { shard: prev_block_id.shard, @@ -87,32 +173,193 @@ where root_hash: *hash, file_hash: *hash, }; - let mut new_state = self.working_state().prev_shard_data.pure_states()[0] + let mut new_state = prev_shard_data.pure_states()[0] .state() .clone(); new_state.seqno = collated_block_id.seqno; + let candidate = BlockCandidate::new( + collated_block_id, + prev_blocks_ids, + top_shard_blocks_ids, + vec![], + vec![], + collated_block_id.file_hash, + next_chain_time, + ); + */ + let collation_result = BlockCollationResult { - candidate: BlockCandidate::new( - collated_block_id, - prev_blocks_ids, - top_shard_blocks_ids, - vec![], - vec![], - collated_block_id.file_hash, - next_chain_time, - ), - new_state, + candidate, + new_state_stuff: new_state_stuff.clone(), }; - self.on_block_candidate_event(collation_result).await?; + self.listener.on_block_candidate(collation_result).await?; tracing::info!( target: tracing_targets::COLLATOR, - "Collator ({}{}): STUB: created and sent dummy block candidate...", + "Collator ({}{}): STUB: created and sent empty block candidate...", self.collator_descr(), _tracing_top_shard_blocks_descr, ); - self.update_working_state(collated_block_id)?; + self.update_working_state(new_state_stuff)?; + + Ok(()) + } + + fn calc_start_lt( + collator_descr: &str, + mc_data: &McData, + prev_shard_data: &PrevData, + collation_data: &BlockCollationData, + ) -> Result { + tracing::trace!("Collator ({}): calc_start_lt()", collator_descr); + + let mut start_lt = if !collation_data.block_id_short.shard.is_masterchain() { + std::cmp::max( + mc_data.mc_state_stuff().state().gen_lt, + prev_shard_data.gen_lt(), + ) + } else { + std::cmp::max( + mc_data.mc_state_stuff().state().gen_lt, + collation_data.shards_max_end_lt(), + ) + }; + + let align = mc_data.get_lt_align(); + let incr = align - start_lt % align; + if incr < align || 0 == start_lt { + if start_lt >= (!incr + 1) { + bail!("cannot compute start logical time (uint64 overflow)"); + } + start_lt += incr; + } + + tracing::debug!( + "Collator ({}): start_lt set to {}", + collator_descr, + start_lt, + ); + + Ok(start_lt) + } + + fn update_value_flow( + &self, + mc_data: &McData, + prev_shard_data: &PrevData, + collation_data: &mut BlockCollationData, + ) -> Result<()> { + tracing::trace!("Collator ({}): update_value_flow()", self.collator_descr); + + if collation_data.block_id_short.shard.is_masterchain() { + collation_data.value_flow.created.tokens = + mc_data.config().get_block_creation_reward(true)?; + collation_data.value_flow.recovered = collation_data.value_flow.created.clone(); + collation_data + .value_flow + .recovered + .try_add_assign(&collation_data.value_flow.fees_collected)?; + collation_data + .value_flow + .recovered + .try_add_assign(&mc_data.mc_state_stuff().state().total_validator_fees)?; + + match mc_data.config().get_fee_collector_address() { + Err(_) => { + tracing::debug!( + "Collator ({}): fee recovery disabled (no collector smart contract defined in configuration)", + self.collator_descr, + ); + collation_data.value_flow.recovered = CurrencyCollection::default(); + } + Ok(_addr) => { + if collation_data.value_flow.recovered.tokens < Tokens::new(1_000_000_000) { + tracing::debug!( + "Collator({}): fee recovery skipped ({:?})", + self.collator_descr, + collation_data.value_flow.recovered, + ); + collation_data.value_flow.recovered = CurrencyCollection::default(); + } + } + }; + + collation_data.value_flow.minted = self.compute_minted_amount(mc_data)?; + + if collation_data.value_flow.minted != CurrencyCollection::ZERO + && mc_data.config().get_minter_address().is_err() + { + tracing::warn!( + "Collator ({}): minting of {:?} disabled: no minting smart contract defined", + self.collator_descr, + collation_data.value_flow.minted, + ); + collation_data.value_flow.minted = CurrencyCollection::default(); + } + } else { + collation_data.value_flow.created.tokens = + mc_data.config().get_block_creation_reward(false)?; + //TODO: should check if it is good to cast `prefix_len` from u16 to u8 + collation_data.value_flow.created.tokens >>= + collation_data.block_id_short.shard.prefix_len() as u8; + } + // info: `prev_data.observable_accounts().root_extra().balance` is `prev_data.total_balance()` in old node + collation_data.value_flow.from_prev_block = prev_shard_data + .observable_accounts() + .root_extra() + .balance + .clone(); Ok(()) } + + fn compute_minted_amount(&self, mc_data: &McData) -> Result { + //TODO: just copied from old node, needs to review + tracing::trace!("Collator ({}): compute_minted_amount", self.collator_descr); + + let mut to_mint = CurrencyCollection::default(); + + let to_mint_cp = match mc_data.config().get::() { + Ok(Some(v)) => v, + _ => { + tracing::warn!( + "Collator ({}): Can't get config param 7 (to_mint)", + self.collator_descr, + ); + return Ok(to_mint); + } + }; + + let old_global_balance = &mc_data.mc_state_extra().global_balance; + for item in to_mint_cp.as_dict().iter() { + let (key, amount) = item?; + let amount2 = old_global_balance + .other + .as_dict() + .get(key)? + .unwrap_or_default(); + if amount > amount2 { + let delta = amount.checked_sub(&amount2).ok_or_else(|| { + anyhow!( + "amount {:?} should sub amount2 {:?} without overflow", + amount, + amount2, + ) + })?; + tracing::debug!( + "{}: currency #{}: existing {:?}, required {:?}, to be minted {:?}", + self.collator_descr, + key, + amount2, + amount, + delta, + ); + if key != 0 { + to_mint.other.as_dict_mut().set(key, delta)?; + } + } + } + + Ok(to_mint) + } } diff --git a/collator/src/collator/execution_manager.rs b/collator/src/collator/execution_manager.rs new file mode 100644 index 000000000..ac6c4661e --- /dev/null +++ b/collator/src/collator/execution_manager.rs @@ -0,0 +1,67 @@ +use std::{ + collections::HashMap, + sync::{atomic::AtomicU64, Arc}, +}; + +use anyhow::Result; + +use everscale_types::{ + cell::HashBytes, + dict::Dict, + models::{BlockchainConfig, LibDescr}, +}; + +use super::super::types::{AccountId, AsyncMessage, ShardAccountStuff}; + +pub(super) struct ExecutionManager { + #[allow(clippy::type_complexity)] + pub changed_accounts: HashMap< + AccountId, + ( + tokio::sync::mpsc::Sender>, + tokio::task::JoinHandle>, + ), + >, + + // receive_tr: tokio::sync::mpsc::Receiver, Result)>>, + // wait_tr: Arc, Result)>>, + max_collate_threads: u16, + pub libraries: Dict, + + gen_utime: u32, + + // block's start logical time + start_lt: u64, + // actual maximum logical time + max_lt: Arc, + // this time is used if account's lt is smaller + min_lt: Arc, + // block random seed + seed_block: HashBytes, + + config: BlockchainConfig, +} + +impl ExecutionManager { + pub fn new( + gen_utime: u32, + start_lt: u64, + max_lt: u64, + seed_block: HashBytes, + libraries: Dict, + config: BlockchainConfig, + max_collate_threads: u16, + ) -> Self { + Self { + changed_accounts: HashMap::new(), + max_collate_threads, + libraries, + gen_utime, + start_lt, + max_lt: Arc::new(AtomicU64::new(max_lt)), + min_lt: Arc::new(AtomicU64::new(max_lt)), + seed_block, + config, + } + } +} diff --git a/collator/src/collator/mod.rs b/collator/src/collator/mod.rs index f37462b38..f900cf013 100644 --- a/collator/src/collator/mod.rs +++ b/collator/src/collator/mod.rs @@ -1,7 +1,6 @@ #[allow(clippy::module_inception)] mod collator; pub mod collator_processor; -mod do_collate; mod types; pub(crate) use collator::*; diff --git a/collator/src/collator/types.rs b/collator/src/collator/types.rs index 5da25dea8..78be39dd8 100644 --- a/collator/src/collator/types.rs +++ b/collator/src/collator/types.rs @@ -1,13 +1,20 @@ -use std::{collections::BTreeMap, sync::Arc}; +use std::collections::{BTreeMap, HashMap}; -use anyhow::Result; +use anyhow::{anyhow, bail, Result}; use everscale_types::{ - cell::{Cell, UsageTree, UsageTreeMode}, - models::{BlockId, CurrencyCollection, McStateExtra, ShardAccounts, ShardIdent}, + cell::{Cell, HashBytes, UsageTree, UsageTreeMode}, + dict::{AugDict, Dict}, + models::{ + AccountBlock, AccountState, BlockId, BlockIdShort, BlockInfo, BlockRef, BlockchainConfig, + CurrencyCollection, ImportFees, InMsg, LibDescr, McStateExtra, OutMsg, OutMsgQueueInfo, + OwnedMessage, PrevBlockRef, ProcessedUpto, ShardAccount, ShardAccounts, ShardDescription, + ShardFees, ShardIdent, SimpleLib, ValueFlow, + }, }; use tycho_block_util::state::{MinRefMcStateTracker, ShardStateStuff}; +use tycho_core::internal_queue::types::ext_types_stubs::EnqueuedMessage; use crate::mempool::MempoolAnchorId; @@ -96,15 +103,18 @@ pub(super) struct McData { mc_state_extra: McStateExtra, prev_key_block_seqno: u32, prev_key_block: Option, - state: Arc, + mc_state_stuff: ShardStateStuff, } impl McData { - pub fn new(mc_state: Arc) -> Result { - let mc_state_extra = mc_state.state_extra()?; + pub fn build(mc_state_stuff: ShardStateStuff) -> Result { + let mc_state_extra = mc_state_stuff.state_extra()?; // prev key block let (prev_key_block_seqno, prev_key_block) = if mc_state_extra.after_key_block { - (mc_state.block_id().seqno, Some(*mc_state.block_id())) + ( + mc_state_stuff.block_id().seqno, + Some(*mc_state_stuff.block_id()), + ) } else if let Some(block_ref) = mc_state_extra.last_key_block.as_ref() { ( block_ref.seqno, @@ -123,26 +133,53 @@ impl McData { mc_state_extra: mc_state_extra.clone(), prev_key_block, prev_key_block_seqno, - state: mc_state, + mc_state_stuff, }) } - pub fn state(&self) -> Arc { - self.state.clone() + pub fn prev_key_block_seqno(&self) -> u32 { + self.prev_key_block_seqno + } + + pub fn mc_state_stuff(&self) -> ShardStateStuff { + self.mc_state_stuff.clone() } pub fn mc_state_extra(&self) -> &McStateExtra { &self.mc_state_extra } + + pub fn get_master_ref(&self) -> BlockRef { + let end_lt = self.mc_state_stuff.state().gen_lt; + let block_id = self.mc_state_stuff.block_id(); + BlockRef { + end_lt, + seqno: block_id.seqno, + root_hash: block_id.root_hash.clone(), + file_hash: block_id.file_hash.clone(), + } + } + + pub fn config(&self) -> &BlockchainConfig { + &self.mc_state_extra.config + } + + pub fn libraries(&self) -> &Dict { + &self.mc_state_stuff.state().libraries + } + + pub fn get_lt_align(&self) -> u64 { + 1000000 + } } pub(super) struct PrevData { - observable_states: Vec>, + observable_states: Vec, observable_accounts: ShardAccounts, blocks_ids: Vec, - pure_states: Vec>, + pure_states: Vec, pure_state_root: Cell, gen_utime: u32, @@ -151,30 +188,27 @@ pub(super) struct PrevData { overload_history: u64, underload_history: u64, - externals_processed_upto: BTreeMap, + externals_processed_upto: BTreeMap, } impl PrevData { - pub fn build( - _mc_data: &McData, - prev_states: &Vec>, - prev_blocks_ids: Vec, - ) -> Result<(Self, UsageTree)> { + pub fn build(prev_states: Vec) -> Result<(Self, UsageTree)> { //TODO: make real implementation - // refer to the old node impl: + // consider split/merge logic // Collator::prepare_data() // Collator::unpack_last_state() + let prev_blocks_ids: Vec<_> = prev_states.iter().map(|s| *s.block_id()).collect(); let pure_prev_state_root = prev_states[0].root_cell(); let pure_prev_states = prev_states.clone(); let usage_tree = UsageTree::new(UsageTreeMode::OnDataAccess); let observable_root = usage_tree.track(pure_prev_state_root); let tracker = MinRefMcStateTracker::new(); - let observable_states = vec![Arc::new(ShardStateStuff::new( + let observable_states = vec![ShardStateStuff::new( *pure_prev_states[0].block_id(), observable_root, &tracker, - )?)]; + )?]; let gen_utime = observable_states[0].state().gen_utime; let gen_lt = observable_states[0].state().gen_lt; @@ -182,6 +216,12 @@ impl PrevData { let total_validator_fees = observable_states[0].state().total_validator_fees.clone(); let overload_history = observable_states[0].state().overload_history; let underload_history = observable_states[0].state().underload_history; + let iter = pure_prev_states[0] + .state() + .externals_processed_upto + .iter() + .filter_map(|kv| kv.ok()); + let externals_processed_upto = BTreeMap::from_iter(iter); let prev_data = Self { observable_states, @@ -198,7 +238,7 @@ impl PrevData { overload_history, underload_history, - externals_processed_upto: BTreeMap::new(), + externals_processed_upto, }; Ok((prev_data, usage_tree)) @@ -212,19 +252,258 @@ impl PrevData { Ok(()) } + pub fn observable_states(&self) -> &Vec { + &self.observable_states + } + + pub fn observable_accounts(&self) -> &ShardAccounts { + &self.observable_accounts + } + pub fn blocks_ids(&self) -> &Vec { &self.blocks_ids } - pub fn pure_states(&self) -> &Vec> { + pub fn get_blocks_ref(&self) -> Result { + if self.pure_states.len() < 1 || self.pure_states.len() > 2 { + bail!( + "There should be 1 or 2 prev states. Actual count is {}", + self.pure_states.len() + ) + } + + let mut block_refs = vec![]; + for state in self.pure_states.iter() { + block_refs.push(BlockRef { + end_lt: state.state().gen_lt, + seqno: state.block_id().seqno, + root_hash: state.block_id().root_hash.clone(), + file_hash: state.block_id().file_hash.clone(), + }); + } + + let prev_ref = if block_refs.len() == 2 { + PrevBlockRef::AfterMerge { + left: block_refs.remove(0), + right: block_refs.remove(0), + } + } else { + PrevBlockRef::Single(block_refs.remove(0)) + }; + + Ok(prev_ref) + } + + pub fn pure_states(&self) -> &Vec { &self.pure_states } - pub fn externals_processed_upto(&self) -> &BTreeMap { + pub fn pure_state_root(&self) -> &Cell { + &self.pure_state_root + } + + pub fn gen_lt(&self) -> u64 { + self.gen_lt + } + + pub fn total_validator_fees(&self) -> &CurrencyCollection { + &self.total_validator_fees + } + + pub fn externals_processed_upto(&self) -> &BTreeMap { &self.externals_processed_upto } } +#[derive(Debug, Default)] pub(super) struct BlockCollationData { - block_descr: Arc, + //block_descr: Arc, + pub block_id_short: BlockIdShort, + pub chain_time: u32, + + pub start_lt: u64, + // Should be updated on each tx finalization from ExecutionManager.max_lt + // which is updating during tx execution + pub max_lt: u64, + + pub in_msgs: InMsgDescr, + pub out_msgs: OutMsgDescr, + + // should read from prev_shard_state + pub out_msg_queue_stuff: OutMsgQueueInfoStuff, + /// Index of the highest external processed from the anchor: (anchor, index) + pub externals_processed_upto: Dict, + + /// Ids of top blocks from shards that be included in the master block + pub top_shard_blocks_ids: Vec, + + shards: Option>>, + shards_max_end_lt: u64, + + //TODO: setup update logic when ShardFees would be implemented + pub shard_fees: ShardFees, + + pub mint_msg: Option, + pub recover_create_msg: Option, + + pub value_flow: ValueFlow, + + min_ref_mc_seqno: Option, + + pub rand_seed: HashBytes, +} + +impl BlockCollationData { + pub fn shards(&self) -> Result<&HashMap>> { + self.shards + .as_ref() + .ok_or_else(|| anyhow!("`shards` is not initialized yet")) + } + pub fn set_shards(&mut self, shards: HashMap>) { + self.shards = Some(shards); + } + pub fn shards_mut(&mut self) -> Result<&mut HashMap>> { + self.shards + .as_mut() + .ok_or_else(|| anyhow!("`shards` is not initialized yet")) + } + + pub fn shards_max_end_lt(&self) -> u64 { + self.shards_max_end_lt + } + pub fn update_shards_max_end_lt(&mut self, val: u64) { + if val > self.shards_max_end_lt { + self.shards_max_end_lt = val; + } + } + + pub fn update_ref_min_mc_seqno(&mut self, mc_seqno: u32) -> u32 { + let min_ref_mc_seqno = + std::cmp::min(self.min_ref_mc_seqno.unwrap_or(std::u32::MAX), mc_seqno); + self.min_ref_mc_seqno = Some(min_ref_mc_seqno); + min_ref_mc_seqno + } + + pub fn min_ref_mc_seqno(&self) -> Result { + self.min_ref_mc_seqno + .ok_or_else(|| anyhow!("`min_ref_mc_seqno` is not initialized yet")) + } +} + +pub(super) type AccountId = HashBytes; + +pub(super) type InMsgDescr = AugDict; +pub(super) type OutMsgDescr = AugDict; + +pub(super) type AccountBlocksDict = AugDict; + +pub(super) struct ShardAccountStuff { + pub account_addr: AccountId, + pub shard_account: ShardAccount, + pub orig_libs: Dict, } + +impl ShardAccountStuff { + // pub fn update_shard_state(&mut self, shard_accounts: &mut ShardAccounts) -> Result { + // let account = self.shard_account.load_account()?; + // if account.is_none() { + // new_accounts.remove(self.account_addr().clone())?; + // } else { + // let shard_acc = ShardAccount::with_account_root(self.account_root(), self.last_trans_hash.clone(), self.last_trans_lt); + // let value = shard_acc.write_to_new_cell()?; + // new_accounts.set_builder_serialized(self.account_addr().clone(), &value, &account.aug()?)?; + // } + // AccountBlock::with_params(&self.account_addr, &self.transactions, &self.state_update) + // } + + pub fn update_public_libraries(&self, libraries: &mut Dict) -> Result<()> { + let opt_account = self.shard_account.account.load()?; + let state_init = match opt_account.state() { + Some(AccountState::Active(ref state_init)) => Some(state_init), + _ => None, + }; + let new_libs = state_init.map(|v| v.libraries.clone()).unwrap_or_default(); + if new_libs.root() != self.orig_libs.root() { + //TODO: implement when scan_diff be added + //STUB: just do nothing, no accounts, no libraries updates in prototype + // new_libs.scan_diff(&self.orig_libs, |key: UInt256, old, new| { + // let old = old.unwrap_or_default(); + // let new = new.unwrap_or_default(); + // if old.is_public_library() && !new.is_public_library() { + // self.remove_public_library(key, libraries)?; + // } else if !old.is_public_library() && new.is_public_library() { + // self.add_public_library(key, new.root, libraries)?; + // } + // Ok(true) + // })?; + } + Ok(()) + } +} + +#[derive(Debug, Default)] +pub(super) struct OutMsgQueueInfoStuff { + /// Dict (shard, seq_no): processed up to info + pub proc_info: Dict<(u64, u32), ProcessedUpto>, +} + +impl OutMsgQueueInfoStuff { + ///TODO: make real implementation + pub fn get_out_msg_queue_info(&self) -> (OutMsgQueueInfo, u32) { + let mut min_ref_mc_seqno = u32::MAX; + //STUB: just clone existing + let msg_queue_info = OutMsgQueueInfo { + proc_info: self.proc_info.clone(), + }; + (msg_queue_info, min_ref_mc_seqno) + } +} + +pub trait ShardDescriptionExt { + fn from_block_info( + block_id: BlockId, + block_info: &BlockInfo, + value_flow: &ValueFlow, + ) -> ShardDescription; +} +impl ShardDescriptionExt for ShardDescription { + fn from_block_info( + block_id: BlockId, + block_info: &BlockInfo, + value_flow: &ValueFlow, + ) -> ShardDescription { + ShardDescription { + seqno: block_id.seqno, + reg_mc_seqno: 0, + start_lt: block_info.start_lt, + end_lt: block_info.end_lt, + root_hash: block_id.root_hash, + file_hash: block_id.file_hash, + before_split: block_info.before_split, + before_merge: false, //TODO: by t-node, needs to review + want_split: block_info.want_split, + want_merge: block_info.want_merge, + nx_cc_updated: false, //TODO: by t-node, needs to review + next_catchain_seqno: block_info.gen_catchain_seqno, + next_validator_shard: block_info.shard.prefix(), // eq to `shard_prefix_with_tag` in old node + min_ref_mc_seqno: block_info.min_ref_mc_seqno, + gen_utime: block_info.gen_utime, + split_merge_at: None, //TODO: check if we really should not use it here + fees_collected: value_flow.fees_collected.clone(), + funds_created: value_flow.created.clone(), + copyleft_rewards: Default::default(), + proof_chain: None, + #[cfg(feature = "venom")] + collators: None, + } + } +} + +pub(super) enum AsyncMessage { + /// 0 - message; 1 - message.id_hash() + Ext(OwnedMessage, HashBytes), + /// 0 - message in execution queue; 1 - TRUE when from the same shard + Int(EnqueuedMessage, bool), +} + +pub mod ext_types_stubs {} diff --git a/collator/src/lib.rs b/collator/src/lib.rs index 3669af395..baa4b0a5c 100644 --- a/collator/src/lib.rs +++ b/collator/src/lib.rs @@ -7,6 +7,6 @@ pub mod test_utils; mod tracing_targets; pub mod types; mod utils; -mod validator; +pub mod validator; pub use validator::test_impl as validator_test_impl; diff --git a/collator/src/manager/collation_manager.rs b/collator/src/manager/collation_manager.rs index 1857ab867..e90259a4a 100644 --- a/collator/src/manager/collation_manager.rs +++ b/collator/src/manager/collation_manager.rs @@ -4,8 +4,7 @@ use anyhow::Result; use async_trait::async_trait; use everscale_types::models::{BlockId, ShardIdent}; - -use tycho_core::internal_queue::iterator::QueueIteratorImpl; +use tycho_block_util::state::ShardStateStuff; use crate::{ collator::{ @@ -51,6 +50,8 @@ where state_adapter_builder: impl StateNodeAdapterBuilder + Send, node_network: NodeNetwork, ) -> Self; + + fn get_state_node_adapter(&self) -> Arc; } /// Generic implementation of [`CollationManager`] @@ -65,6 +66,7 @@ where config: Arc, dispatcher: Arc, ()>>, + state_node_adapter: Arc, } #[allow(private_bounds)] @@ -79,7 +81,7 @@ where ST: StateNodeAdapter, { CollationManagerGenImpl::< - CollatorStdImpl, _, _, _>, + CollatorStdImpl, _, _, _>, ValidatorStdImpl, _>, MessageQueueAdapterStdImpl, MP, @@ -104,7 +106,7 @@ where V: ValidatorProcessor, { CollationManagerGenImpl::< - CollatorStdImpl, _, _, _>, + CollatorStdImpl, _, _, _>, ValidatorStdImpl, MessageQueueAdapterStdImpl, MP, @@ -149,11 +151,11 @@ where let state_node_adapter = Arc::new(state_node_adapter); // create validator and start its tasks queue - let validator = Arc::new(Validator::create( + let validator = Validator::create( dispatcher.clone(), state_node_adapter.clone(), node_network.into(), - )); + ); // create collation processor that will use these adapters // and run dispatcher for its own tasks queue @@ -171,6 +173,7 @@ where let mgr = Self { config, dispatcher: dispatcher.clone(), + state_node_adapter, }; // start other async processes @@ -195,6 +198,10 @@ where // return manager mgr } + + fn get_state_node_adapter(&self) -> Arc { + self.state_node_adapter.clone() + } } #[async_trait] @@ -226,12 +233,27 @@ where MP: MempoolAdapter, ST: StateNodeAdapter, { - async fn on_mc_block(&self, mc_block_id: BlockId) -> Result<()> { - self.enqueue_task(method_to_async_task_closure!( - process_mc_block_from_bc, - mc_block_id - )) - .await + async fn on_block_accepted(&self, block_id: &BlockId) -> Result<()> { + //TODO: remove accepted block from cache + //STUB: do nothing, currently we remove block from cache when it sent to state node + Ok(()) + } + + async fn on_block_accepted_external(&self, state: &ShardStateStuff) -> Result<()> { + //TODO: should store block info from blockchain if it was not already collated + // and validated by ourself. Will use this info for faster validation further: + // will consider that just collated block is already validated if it have the + // same root hash and file hash + if state.block_id().is_masterchain() { + let mc_block_id = *state.block_id(); + self.enqueue_task(method_to_async_task_closure!( + process_mc_block_from_bc, + mc_block_id + )) + .await + } else { + Ok(()) + } } } diff --git a/collator/src/manager/collation_processor.rs b/collator/src/manager/collation_processor.rs index 6d6233732..ee025ad89 100644 --- a/collator/src/manager/collation_processor.rs +++ b/collator/src/manager/collation_processor.rs @@ -3,11 +3,9 @@ use std::{ sync::Arc, }; -use everscale_crypto::ed25519::KeyPair; - use anyhow::{anyhow, bail, Result}; -use everscale_types::models::{BlockId, ShardIdent, ValidatorDescription, ValidatorSet}; +use everscale_types::models::{BlockId, BlockInfo, ShardIdent, ValueFlow}; use tycho_block_util::{ block::ValidatorSubsetInfo, state::{MinRefMcStateTracker, ShardStateStuff}, @@ -31,7 +29,7 @@ use crate::{ use super::{ types::{ BlockCacheKey, BlockCandidateContainer, BlockCandidateToSend, BlocksCache, - McBlockSubgraphToSend, SendSyncStatus, ShardStateStuffExt, + McBlockSubgraphToSend, SendSyncStatus, }, utils::{build_block_stuff_for_sync, find_us_in_collators_set}, }; @@ -51,8 +49,7 @@ where state_node_adapter: Arc, mq_adapter: Arc, - //TODO: possibly use V because manager may not need a ref to validator - validator: Arc, + validator: V, active_collation_sessions: HashMap>, collation_sessions_to_finish: HashMap>, @@ -64,6 +61,8 @@ where blocks_cache: BlocksCache, last_processed_mc_block_id: Option, + /// id of last master block collated by ourselves + last_collated_mc_block_id: Option, /// chain time of last collated master block or received from bc last_mc_block_chain_time: u64, /// chain time for next master block to be collated @@ -83,7 +82,7 @@ where dispatcher: Arc>, mpool_adapter: Arc, state_node_adapter: Arc, - validator: Arc, + validator: V, ) -> Self { Self { config, @@ -101,6 +100,7 @@ where blocks_cache: BlocksCache::default(), last_processed_mc_block_id: None, + last_collated_mc_block_id: None, last_mc_block_chain_time: 0, next_mc_block_chain_time: 0, } @@ -124,11 +124,17 @@ where fn last_processed_mc_block_id(&self) -> Option<&BlockId> { self.last_processed_mc_block_id.as_ref() } - fn set_last_processed_mc_block_id(&mut self, block_id: BlockId) { self.last_processed_mc_block_id = Some(block_id); } + fn last_collated_mc_block_id(&self) -> Option<&BlockId> { + self.last_collated_mc_block_id.as_ref() + } + fn set_last_collated_mc_block_id(&mut self, block_id: BlockId) { + self.last_collated_mc_block_id = Some(block_id); + } + /// (TODO) Check sync status between mempool and blockchain state /// and pause collation when we are far behind other nodesб /// jusct sync blcoks from blockchain @@ -147,49 +153,127 @@ where pub async fn process_mc_block_from_bc(&self, mc_block_id: BlockId) -> Result<()> { // check if we should skip this master block from the blockchain // because it is not far ahead of last collated by ourselves - if !self.should_process_mc_block_from_bc(&mc_block_id) { + if !self.check_should_process_mc_block_from_bc(&mc_block_id) { return Ok(()); } // request mc state for this master block - let receiver = self.state_node_adapter.request_state(mc_block_id).await?; + //TODO: should await state and schedule processing in async task + let mc_state = self.state_node_adapter.load_state(&mc_block_id).await?; // when state received execute master block processing routines let mpool_adapter = self.mpool_adapter.clone(); let dispatcher = self.dispatcher.clone(); - receiver - .process_on_recv(|mc_state| async move { + + tracing::info!( + target: tracing_targets::COLLATION_MANAGER, + "Processing requested mc state for block ({})...", + mc_state.block_id().as_short_id() + ); + Self::notify_mempool_about_mc_block(mpool_adapter, mc_state.clone()).await?; + + dispatcher + .enqueue_task(method_to_async_task_closure!( + refresh_collation_sessions, + mc_state + )) + .await?; + + Ok(()) + } + + /// 1. Skip if it is equal or not far ahead from last collated by ourselves + /// 2. Skip if it was already processed before + /// 3. Skip if waiting for the first own master block collation less then `max_mc_block_delta_from_bc_to_await_own` + fn check_should_process_mc_block_from_bc(&self, mc_block_id: &BlockId) -> bool { + let last_collated_mc_block_id_opt = self.last_collated_mc_block_id(); + let last_processed_mc_block_id_opt = self.last_processed_mc_block_id(); + if last_collated_mc_block_id_opt.is_some() { + // when we have last own collated master block then skip if incoming one is equal + // or not far ahead from last own collated + // then will wait for next own collated master block + let (seqno_delta, is_equal) = + Self::compare_mc_block_with(mc_block_id, self.last_collated_mc_block_id()); + if is_equal || seqno_delta <= self.config.max_mc_block_delta_from_bc_to_await_own { tracing::info!( target: tracing_targets::COLLATION_MANAGER, - "Processing requested mc state for block ({})...", - mc_state.block_id().as_short_id() + r#"Should NOT process mc block ({}) from bc: should wait for next own collated: + is_equal = {}, seqno_delta = {}, max_mc_block_delta_from_bc_to_await_own = {}"#, + mc_block_id.as_short_id(), is_equal, seqno_delta, + self.config.max_mc_block_delta_from_bc_to_await_own, ); - Self::notify_mempool_about_mc_block(mpool_adapter, mc_state.clone()).await?; - dispatcher - .enqueue_task(method_to_async_task_closure!( - refresh_collation_sessions, - mc_state - )) - .await - }) - .await; + return false; + } else if !is_equal { + //STUB: skip processing master block from bc even if it is far away from own last collated + // because the logic for updating collators in this case is not implemented yet + tracing::info!( + target: tracing_targets::COLLATION_MANAGER, + "STUB: skip processing mc block ({}) from bc anyway if we are collating by ourselves", + mc_block_id.as_short_id(), + ); + return false; + } + } else { + // When we do not have last own collated master block then check last processed master block + // If None then we should process incoming master block anyway to init collation process + // If we have already processed some previous incoming master block and colaltions were started + // then we should wait for the first own collated master block + // but not more then `max_mc_block_delta_from_bc_to_await_own` + if last_processed_mc_block_id_opt.is_some() { + let (seqno_delta, is_equal) = + Self::compare_mc_block_with(mc_block_id, last_processed_mc_block_id_opt); + let already_processed_before = is_equal || seqno_delta < 0; + if already_processed_before { + tracing::info!( + target: tracing_targets::COLLATION_MANAGER, + "Should NOT process mc block ({}) from bc: it was already processed before", + mc_block_id.as_short_id(), + ); - Ok(()) + return false; + } + let should_wait_for_next_own_collated = seqno_delta + <= self.config.max_mc_block_delta_from_bc_to_await_own + && self.active_collators.contains_key(&ShardIdent::MASTERCHAIN); + if should_wait_for_next_own_collated { + tracing::info!( + target: tracing_targets::COLLATION_MANAGER, + r#"Should NOT process mc block ({}) from bc: should wait for first own collated: + seqno_delta = {}, max_mc_block_delta_from_bc_to_await_own = {}"#, + mc_block_id.as_short_id(), seqno_delta, + self.config.max_mc_block_delta_from_bc_to_await_own, + ); + return false; + } + } + } + true } - /// 1. Skip if it was already processed before - /// 2. (TODO) Skip if it is not far ahead of last collated by ourselves - fn should_process_mc_block_from_bc(&self, mc_block_id: &BlockId) -> bool { - let is_not_ahead = self.check_if_mc_block_not_ahead_last_processed(mc_block_id); - if is_not_ahead { + /// Returns: (seqno delta from other, true - if equal) + fn compare_mc_block_with( + mc_block_id: &BlockId, + other_mc_block_id_opt: Option<&BlockId>, + ) -> (i32, bool) { + //TODO: consider block shard? + let (seqno_delta, is_equal) = match other_mc_block_id_opt { + None => (0, false), + Some(other_mc_block_id) => ( + mc_block_id.seqno as i32 - other_mc_block_id.seqno as i32, + mc_block_id == other_mc_block_id, + ), + }; + if seqno_delta < 0 || is_equal { tracing::info!( target: tracing_targets::COLLATION_MANAGER, - "Should NOT process mc block ({}) from bc", + "mc block ({}) is NOT AHEAD of other ({:?}): is_equal = {}, seqno_delta = {}", mc_block_id.as_short_id(), + other_mc_block_id_opt.map(|b| b.as_short_id()), + is_equal, seqno_delta, ); } - !is_not_ahead + (seqno_delta, is_equal) } /// * TRUE - provided `mc_block_id` is before or equal to last processed @@ -232,7 +316,7 @@ where ); let last_mc_block_id = self .state_node_adapter - .get_last_applied_mc_block_id() + .load_last_applied_mc_block_id() .await?; tracing::info!( target: tracing_targets::COLLATION_MANAGER, @@ -246,10 +330,7 @@ where /// Get shards info from the master state, /// then start missing sessions for these shards, or refresh existing. /// For each shard run collation process if current node is included in collators subset. - pub async fn refresh_collation_sessions( - &mut self, - mc_state: Arc, - ) -> Result<()> { + pub async fn refresh_collation_sessions(&mut self, mc_state: ShardStateStuff) -> Result<()> { tracing::debug!( target: tracing_targets::COLLATION_MANAGER, "Trying to refresh collation sessions by mc state for block ({})...", @@ -273,8 +354,11 @@ where // 2. Skip refreshing sessions if this master was processed by any chance // do not re-process this master block if it is lower then last processed or equal to it + // but process a new version of block with the same seqno let processing_mc_block_id = *mc_state.block_id(); - if self.check_if_mc_block_not_ahead_last_processed(&processing_mc_block_id) { + let (seqno_delta, is_equal) = + Self::compare_mc_block_with(&processing_mc_block_id, self.last_processed_mc_block_id()); + if seqno_delta < 0 || is_equal { return Ok(()); } @@ -319,15 +403,8 @@ where let new_session_seqno = mc_state_extra.validator_info.catchain_seqno; // we need full validators set to define the subset for each session and to check if current node should collate - //let full_validators_set = mc_state.config_params()?.get_current_validator_set()?; - //STUB: return dummy validator set - let full_validators_set = ValidatorSet { - utime_since: 0, - utime_until: 0, - main: std::num::NonZeroU16::MIN, - total_weight: 0, - list: vec![], - }; + let full_validators_set = mc_state.config_params()?.get_current_validator_set()?; + tracing::trace!(target: tracing_targets::COLLATION_MANAGER, "full_validators_set {:?}", full_validators_set); // compare with active sessions and detect new sessions to start and outdated sessions to finish let mut sessions_to_keep = HashMap::new(); @@ -373,14 +450,40 @@ where sessions_to_start.iter().map(|(k, _)| k).collect::>(), ); - // store existing sessions that we should keep - self.active_collation_sessions = sessions_to_keep; + let cc_config = mc_state_extra.config.get_catchain_config()?; + + // update master state in the collators of the existing sessions + for (shard_id, session_info) in sessions_to_keep { + self.active_collation_sessions + .insert(shard_id, session_info); + + // skip collator of masterchain because it's working state already updated + // after master block collation + if shard_id.is_masterchain() { + continue; + } + + // if there is no active collator then current node does not collate this shard + // so we do not need to do anything + let Some(collator) = self.active_collators.get(&shard_id) else { + continue; + }; + + tracing::info!( + target: tracing_targets::COLLATION_MANAGER, + "Updating McData in active collator for shard {} and resuming collation in it...", + shard_id, + ); + + collator + .equeue_update_mc_data_and_resume_shard_collation(mc_state.clone()) + .await?; + } // we may have sessions to finish, collators to stop, and sessions to start // additionally we may have some active collators // for each new session we should check if current node should collate, // then stop collators if should not, otherwise start missing collators - let cc_config = mc_state_extra.config.get_catchain_config()?; for (shard_id, prev_blocks_ids) in sessions_to_start { let (subset, hash_short) = full_validators_set .compute_subset(shard_id, &cc_config, new_session_seqno) @@ -390,14 +493,31 @@ where new_session_seqno, ))?; - //STUB: create subset with only us - let subset = vec![ValidatorDescription { - public_key: self.config.key_pair.public_key.to_bytes().into(), - adnl_addr: Some(self.config.key_pair.public_key.to_bytes().into()), - weight: 90, - prev_total_weight: 90, - mc_seqno_since: 0, - }]; + //TEST: override with test subset with test keypairs defined on test run + #[cfg(feature = "test")] + let subset = if self.config.test_validators_keypairs.is_empty() { + subset + } else { + let mut test_subset = vec![]; + for (i, keypair) in self.config.test_validators_keypairs.iter().enumerate() { + let val_descr = &subset[i]; + test_subset.push(ValidatorDescription { + public_key: keypair.public_key.to_bytes().into(), + adnl_addr: val_descr.adnl_addr, + weight: val_descr.weight, + mc_seqno_since: val_descr.mc_seqno_since, + prev_total_weight: val_descr.prev_total_weight, + }); + } + test_subset + }; + #[cfg(feature = "test")] + tracing::warn!( + target: tracing_targets::COLLATION_MANAGER, + "FOR TEST: overrided subset of validators to collate shard {}: {:?}", + shard_id, + subset, + ); let local_pubkey_opt = find_us_in_collators_set(&self.config, &subset); @@ -418,6 +538,8 @@ where shard_id, ); let collator = C::start( + self.config.clone(), + new_session_info.clone(), self.dispatcher.clone(), self.mq_adapter.clone(), self.mpool_adapter.clone(), @@ -425,6 +547,7 @@ where shard_id, prev_blocks_ids, mc_state.clone(), + self.state_tracker.clone(), ) .await; entry.insert(Arc::new(collator)); @@ -434,8 +557,15 @@ where self.validator .enqueue_add_session(Arc::new(new_session_info.clone().try_into()?)) .await?; - } else if let Some(collator) = self.active_collators.remove(&shard_id) { - to_stop_collators.insert((shard_id, new_session_seqno), collator); + } else { + tracing::info!( + target: tracing_targets::COLLATION_MANAGER, + "Node was not athorized to collate shard {}", + shard_id, + ); + if let Some(collator) = self.active_collators.remove(&shard_id) { + to_stop_collators.insert((shard_id, new_session_seqno), collator); + } } //TODO: possibly do not need to store collation sessions if we do not collate in them @@ -535,6 +665,8 @@ where candidate_id.as_short_id(), candidate_chain_time, ); + let new_state_stuff = collation_result.new_state_stuff; + let new_mc_state = new_state_stuff.clone(); self.store_candidate(collation_result.candidate)?; // send validation task to validator @@ -571,6 +703,11 @@ where { self.enqueue_mc_block_collation(next_mc_block_chain_time, Some(candidate_id)) .await?; + } else { + // if do not need to collate master block then can continue to collate shard blocks + // otherwise next shard block will be scheduled after master block collation + self.enqueue_try_collate_next_shard_block(&candidate_id.shard) + .await?; } } else { // store last master block chain time @@ -592,11 +729,7 @@ where candidate_chain_time, ); - let new_mc_state = ShardStateStuff::from_state( - candidate_id, - collation_result.new_state, - &self.state_tracker, - )?; + self.set_last_collated_mc_block_id(candidate_id); Self::notify_mempool_about_mc_block(self.mpool_adapter.clone(), new_mc_state.clone()) .await?; @@ -615,7 +748,7 @@ where /// Send master state related to master block to mempool (it may perform gc or nodes rotation) async fn notify_mempool_about_mc_block( mpool_adapter: Arc, - mc_state: Arc, + mc_state: ShardStateStuff, ) -> Result<()> { //TODO: in current implementation CollationProcessor should not notify mempool // about one master block more than once, but better to handle repeated request here or at mempool @@ -626,6 +759,7 @@ where /// 1. Store last collated chain time from anchor and check if master block interval elapsed in each shard /// 2. If true, schedule master block collation + /// 3. If no, schedule next shard block collation attempt pub async fn process_empty_skipped_anchor( &mut self, shard_id: ShardIdent, @@ -645,6 +779,10 @@ where { self.enqueue_mc_block_collation(next_mc_block_chain_time, None) .await?; + } else { + // if do not need to collate master block then run next attempt to collate shard block + // otherwise next shard block will be scheduled after master block collation + self.enqueue_try_collate_next_shard_block(&shard_id).await?; } Ok(()) } @@ -694,25 +832,28 @@ where } /// Find top shard blocks in cacche for the next master block collation - fn detect_top_shard_blocks_ids_for_mc_block( + fn detect_top_shard_blocks_info_for_mc_block( &self, _next_mc_block_chain_time: u64, _trigger_shard_block_id: Option, - ) -> Vec { + ) -> Result> { //TODO: make real implementation (see comments in `enqueue_mc_block_collation``) //STUB: when we work with only one shard we can just get the last shard block // because collator manager will try run master block collation before // before processing any next candidate from the shard collator // because of dispatcher tasks queue - let res = self + let mut res = vec![]; + for (_, v) in self .blocks_cache .shards .iter() - .filter_map(|(_, shard_cache)| shard_cache.last_key_value().map(|(_, v)| *v.block_id())) - .collect::>(); - - res + .filter_map(|(_, shard_cache)| shard_cache.last_key_value()) + { + let block = v.get_block()?; + res.push((*v.block_id(), block.load_info()?, block.load_value_flow()?)); + } + Ok(res) } /// (TODO) Enqueue master block collation task. Will determine top shard blocks for this collation @@ -734,23 +875,23 @@ where // Or the first from previouses (An-x) that includes externals for that shard (ShB) // if all next including required one ([An-x+1, An]) do not contain externals for shard (ShB). - let top_shard_blocks_ids = self.detect_top_shard_blocks_ids_for_mc_block( + let top_shard_blocks_info = self.detect_top_shard_blocks_info_for_mc_block( next_mc_block_chain_time, trigger_shard_block_id, - ); + )?; //TODO: We should somehow collect externals for masterchain during the shard blocks collation // or pull them directly when collating master self.set_next_mc_block_chain_time(next_mc_block_chain_time); - let _tracing_top_shard_blocks_descr = top_shard_blocks_ids + let _tracing_top_shard_blocks_descr = top_shard_blocks_info .iter() - .map(|id| id.as_short_id().to_string()) + .map(|(id, _, _)| id.as_short_id().to_string()) .collect::>(); mc_collator - .equeue_do_collate(next_mc_block_chain_time, top_shard_blocks_ids) + .equeue_do_collate(next_mc_block_chain_time, top_shard_blocks_info) .await?; tracing::info!( @@ -763,6 +904,28 @@ where Ok(()) } + async fn enqueue_try_collate_next_shard_block(&self, shard_id: &ShardIdent) -> Result<()> { + // get shardchain collator if exists + let Some(collator) = self.active_collators.get(shard_id).cloned() else { + tracing::warn!( + target: tracing_targets::COLLATION_MANAGER, + "Node does not collate blocks for shard {}", + shard_id, + ); + return Ok(()); + }; + + collator.equeue_try_collate().await?; + + tracing::debug!( + target: tracing_targets::COLLATION_MANAGER, + "Equeued next attempt to collate shard block for {}", + shard_id, + ); + + Ok(()) + } + /// Process validated block /// 1. Process invalid block (currently, just panic) /// 2. Update block in cache with validation info @@ -1084,7 +1247,7 @@ where } }); //TODO: make proper panic and error processing without waiting for spawned task - let _ = join_handle.await?; + join_handle.await??; } else { tracing::debug!( target: tracing_targets::COLLATION_MANAGER, diff --git a/collator/src/manager/types.rs b/collator/src/manager/types.rs index 4f252b3ba..ad3d8ea42 100644 --- a/collator/src/manager/types.rs +++ b/collator/src/manager/types.rs @@ -1,16 +1,13 @@ -use std::{ - collections::{BTreeMap, HashMap}, - sync::Arc, -}; +use std::collections::{BTreeMap, HashMap}; use anyhow::{anyhow, bail, Result}; use everscale_types::{ - cell::{Cell, CellBuilder, CellFamily, HashBytes, Store}, - models::{BlockId, BlockIdShort, ShardIdent, ShardStateUnsplit, Signature}, + cell::HashBytes, + models::{Block, BlockId, BlockIdShort, ShardIdent, Signature}, }; -use tycho_block_util::state::{MinRefMcStateTracker, ShardStateStuff}; +use tycho_util::FastHashMap; use crate::types::BlockCandidate; @@ -25,7 +22,7 @@ pub(super) struct BlocksCache { pub struct BlockCandidateEntry { pub key: BlockCacheKey, pub candidate: BlockCandidate, - pub signatures: HashMap, + pub signatures: FastHashMap, } pub enum SendSyncStatus { @@ -110,7 +107,7 @@ impl BlockCandidateContainer { &mut self, is_valid: bool, already_synced: bool, - signatures: HashMap, + signatures: FastHashMap, ) { if let Some(ref mut entry) = self.entry { entry.signatures = signatures; @@ -172,6 +169,14 @@ impl BlockCandidateContainer { } Ok(()) } + + pub fn get_block(&self) -> Result<&Block> { + let entry = self + .entry + .as_ref() + .ok_or_else(|| anyhow!("`entry` was extracted"))?; + Ok(entry.candidate.block()) + } } pub struct BlockCandidateToSend { @@ -183,30 +188,3 @@ pub struct McBlockSubgraphToSend { pub mc_block: BlockCandidateToSend, pub shard_blocks: Vec, } - -pub(in crate::manager) trait ShardStateStuffExt { - fn from_state( - block_id: BlockId, - shard_state: ShardStateUnsplit, - tracker: &MinRefMcStateTracker, - ) -> Result>; -} -impl ShardStateStuffExt for ShardStateStuff { - fn from_state( - block_id: BlockId, - shard_state: ShardStateUnsplit, - tracker: &MinRefMcStateTracker, - ) -> Result> { - let mut builder = CellBuilder::new(); - let mut cell_context = Cell::empty_context(); - shard_state.store_into(&mut builder, &mut cell_context)?; - let root = builder.build_ext(&mut cell_context)?; - - Ok(Arc::new(ShardStateStuff::from_state_and_root( - block_id, - shard_state, - root, - tracker, - )?)) - } -} diff --git a/collator/src/manager/utils.rs b/collator/src/manager/utils.rs index 3cacd2a5d..4c285c389 100644 --- a/collator/src/manager/utils.rs +++ b/collator/src/manager/utils.rs @@ -1,6 +1,8 @@ use anyhow::Result; use everscale_crypto::ed25519::PublicKey; +use everscale_types::boc::BocRepr; use everscale_types::models::ValidatorDescription; +use tycho_block_util::block::{BlockStuff, BlockStuffAug}; use crate::types::{BlockStuffForSync, CollationConfig}; @@ -9,13 +11,18 @@ use super::types::BlockCandidateEntry; pub fn build_block_stuff_for_sync( block_candidate: &BlockCandidateEntry, ) -> Result { - //TODO: make real implementation - //STUB: just build dummy block for sync + let block_data = block_candidate.candidate.data().to_vec(); + let block = BocRepr::decode(&block_data)?; + let block_stuff = BlockStuff::with_block(*block_candidate.candidate.block_id(), block); + + let block_stuff_aug = BlockStuffAug::new(block_stuff, block_data); + let res = BlockStuffForSync { block_id: *block_candidate.candidate.block_id(), - block_stuff: None, + block_stuff_aug, signatures: block_candidate.signatures.clone(), prev_blocks_ids: block_candidate.candidate.prev_blocks_ids().into(), + top_shard_blocks_ids: block_candidate.candidate.top_shard_blocks_ids().into(), }; Ok(res) diff --git a/collator/src/mempool/mempool_adapter.rs b/collator/src/mempool/mempool_adapter.rs index c218b12de..62c7a7a32 100644 --- a/collator/src/mempool/mempool_adapter.rs +++ b/collator/src/mempool/mempool_adapter.rs @@ -45,10 +45,7 @@ pub(crate) trait MempoolAdapter: Send + Sync + 'static { fn create(listener: Arc) -> Self; /// Schedule task to process new master block state (may perform gc or nodes rotation) - async fn enqueue_process_new_mc_block_state( - &self, - mc_state: Arc, - ) -> Result<()>; + async fn enqueue_process_new_mc_block_state(&self, mc_state: ShardStateStuff) -> Result<()>; /// Request, await, and return anchor from connected mempool by id. /// Return None if the requested anchor does not exist. @@ -125,10 +122,7 @@ impl MempoolAdapter for MempoolAdapterStdImpl { } } - async fn enqueue_process_new_mc_block_state( - &self, - mc_state: Arc, - ) -> Result<()> { + async fn enqueue_process_new_mc_block_state(&self, mc_state: ShardStateStuff) -> Result<()> { //TODO: make real implementation, currently does nothing tracing::info!( target: tracing_targets::MEMPOOL_ADAPTER, @@ -237,11 +231,8 @@ impl MempoolAdapter for MempoolAdapterStdImpl { fn _stub_create_random_anchor_with_stub_externals( anchor_id: MempoolAnchorId, ) -> Arc { - let chain_time = std::time::SystemTime::now() - .duration_since(std::time::SystemTime::UNIX_EPOCH) - .unwrap() - .as_millis() as u64; - let externals_count: i32 = rand::thread_rng().gen_range(-10..10).max(0); + let chain_time = anchor_id as u64 * 471 * 6 % 1000000000; + let externals_count = chain_time as i32 % 10; let mut externals = vec![]; for i in 0..externals_count { let rand_addr = (0..32).map(|_| rand::random::()).collect::>(); diff --git a/collator/src/msg_queue.rs b/collator/src/msg_queue.rs index 47fd55243..f67658b48 100644 --- a/collator/src/msg_queue.rs +++ b/collator/src/msg_queue.rs @@ -14,7 +14,7 @@ use tycho_core::internal_queue::{ types::QueueDiff, }; -pub(crate) use tycho_core::internal_queue::iterator::{IterItem, QueueIterator}; +pub(crate) use tycho_core::internal_queue::iterator::QueueIterator; use crate::{tracing_targets, utils::shard::SplitMergeAction}; diff --git a/collator/src/state_node.rs b/collator/src/state_node.rs index d991d34e5..5b5e39879 100644 --- a/collator/src/state_node.rs +++ b/collator/src/state_node.rs @@ -1,176 +1,269 @@ +use std::collections::{BTreeMap, HashMap}; use std::sync::Arc; -use anyhow::{anyhow, Result}; +use anyhow::{anyhow, Context, Result}; use async_trait::async_trait; -use everscale_types::boc::Boc; -use everscale_types::cell::HashBytes; -use everscale_types::models::{BlockId, ShardIdent, ShardStateUnsplit}; +use everscale_types::models::{BlockId, ShardIdent}; +use futures_util::future::BoxFuture; +use tokio::sync::{broadcast, Mutex}; -use tycho_block_util::state::MinRefMcStateTracker; use tycho_block_util::{block::BlockStuff, state::ShardStateStuff}; -use tycho_storage::BlockHandle; +use tycho_core::block_strider::{ + BlockProvider, OptionalBlockStuff, StateSubscriber, StateSubscriberContext, +}; +use tycho_storage::{BlockHandle, Storage}; use crate::tracing_targets; use crate::types::BlockStuffForSync; -use crate::utils::task_descr::TaskResponseReceiver; - -// BUILDER #[allow(private_bounds, private_interfaces)] pub trait StateNodeAdapterBuilder where T: StateNodeAdapter, { - fn new() -> Self; + fn new(storage: Storage) -> Self; fn build(self, listener: Arc) -> T; } -pub struct StateNodeAdapterBuilderStdImpl; +pub struct StateNodeAdapterBuilderStdImpl { + pub storage: Storage, +} impl StateNodeAdapterBuilder for StateNodeAdapterBuilderStdImpl { - fn new() -> Self { - Self {} + fn new(storage: Storage) -> Self { + Self { storage } } #[allow(private_interfaces)] fn build(self, listener: Arc) -> StateNodeAdapterStdImpl { - StateNodeAdapterStdImpl::create(listener) + StateNodeAdapterStdImpl::create(listener, self.storage) } } -// EVENTS LISTENER - #[async_trait] -pub(crate) trait StateNodeEventListener: Send + Sync { - /// When new masterchain block received from blockchain - async fn on_mc_block(&self, mc_block_id: BlockId) -> Result<()>; +pub trait StateNodeEventListener: Send + Sync { + /// When our collated block was accepted and applied in state node + async fn on_block_accepted(&self, block_id: &BlockId) -> Result<()>; + /// When new applied block was received from blockchain + async fn on_block_accepted_external(&self, state: &ShardStateStuff) -> Result<()>; } -// ADAPTER - #[async_trait] -pub(crate) trait StateNodeAdapter: Send + Sync + 'static { - async fn get_last_applied_mc_block_id(&self) -> Result; - async fn request_state( - &self, - block_id: BlockId, - ) -> Result>>; - async fn get_block(&self, block_id: BlockId) -> Result>>; - async fn request_block( - &self, - block_id: BlockId, - ) -> Result>>>; - async fn accept_block(&self, block: BlockStuffForSync) -> Result>; +pub trait StateNodeAdapter: BlockProvider + Send + Sync + 'static { + /// Return id of last master block that was applied to node local state + async fn load_last_applied_mc_block_id(&self) -> Result; + /// Return master or shard state on specified block from node local state + async fn load_state(&self, block_id: &BlockId) -> Result; + /// Return block by it's id from node local state + async fn load_block(&self, block_id: &BlockId) -> Result>; + /// Return block handle by it's id from node local state + async fn load_block_handle(&self, block_id: &BlockId) -> Result>; + /// Accept block: + /// 1. (TODO) Broadcast block to blockchain network + /// 2. Provide block to the block strider + async fn accept_block(&self, block: BlockStuffForSync) -> Result<()>; } pub struct StateNodeAdapterStdImpl { - _listener: Arc, + listener: Arc, + blocks: Arc>>>, + storage: Storage, + broadcaster: broadcast::Sender, } -impl StateNodeAdapterStdImpl { - fn create(listener: Arc) -> Self { - tracing::info!(target: tracing_targets::STATE_NODE_ADAPTER, "Creating state node adapter..."); +impl BlockProvider for StateNodeAdapterStdImpl { + type GetNextBlockFut<'a> = BoxFuture<'a, OptionalBlockStuff>; + type GetBlockFut<'a> = BoxFuture<'a, OptionalBlockStuff>; - tracing::info!(target: tracing_targets::STATE_NODE_ADAPTER, "State node adapter created"); + fn get_next_block<'a>(&'a self, prev_block_id: &'a BlockId) -> Self::GetNextBlockFut<'a> { + tracing::info!(target: tracing_targets::STATE_NODE_ADAPTER, "Get next block: {:?}", prev_block_id); + self.wait_for_block(prev_block_id) + } + fn get_block<'a>(&'a self, block_id: &'a BlockId) -> Self::GetBlockFut<'a> { + tracing::info!(target: tracing_targets::STATE_NODE_ADAPTER, "Get block: {:?}", block_id); + self.wait_for_block(block_id) + } +} + +impl StateNodeAdapterStdImpl { + pub fn create(listener: Arc, storage: Storage) -> Self { + tracing::info!(target: tracing_targets::STATE_NODE_ADAPTER, "State node adapter created"); + let (broadcaster, _) = broadcast::channel(10000); Self { - _listener: listener, + listener, + storage, + blocks: Default::default(), + broadcaster, } } -} -#[async_trait] -impl StateNodeAdapter for StateNodeAdapterStdImpl { - async fn get_last_applied_mc_block_id(&self) -> Result { - //TODO: make real implementation - - //STUB: return block 1 - let stub_mc_block_id = BlockId { - shard: ShardIdent::new_full(-1), - seqno: 1, - root_hash: HashBytes::ZERO, - file_hash: HashBytes::ZERO, - }; - tracing::info!(target: tracing_targets::STATE_NODE_ADAPTER, "STUB: returns stub last applied mc block ({})", stub_mc_block_id.as_short_id()); - Ok(stub_mc_block_id) + fn wait_for_block<'a>( + &'a self, + block_id: &'a BlockId, + ) -> ::GetBlockFut<'a> { + let mut receiver = self.broadcaster.subscribe(); + Box::pin(async move { + loop { + let blocks = self.blocks.lock().await; + if let Some(shard_blocks) = blocks.get(&block_id.shard) { + if let Some(block) = shard_blocks.get(&block_id.seqno) { + return Some(Ok(block.block_stuff_aug.clone())); + } + } + drop(blocks); + + loop { + match receiver.recv().await { + Ok(received_block_id) if received_block_id == *block_id => { + break; + } + Ok(_) => continue, + Err(broadcast::error::RecvError::Lagged(count)) => { + tracing::warn!(target: tracing_targets::STATE_NODE_ADAPTER, "Broadcast channel lagged: {}", count); + continue; + } + Err(broadcast::error::RecvError::Closed) => { + tracing::error!(target: tracing_targets::STATE_NODE_ADAPTER, "Broadcast channel closed"); + return None; + } + } + } + } + }) } +} - async fn request_state( - &self, - block_id: BlockId, - ) -> Result>> { - //TODO: make real implementation - let (sender, receiver) = tokio::sync::oneshot::channel::>>(); - - //STUB: emulating async task - tokio::spawn(async move { - tokio::time::sleep(tokio::time::Duration::from_millis(45)).await; - - let cell = if block_id.is_masterchain() { - tracing::info!(target: tracing_targets::STATE_NODE_ADAPTER, "STUB: returns stub master state on block 2"); - const BOC: &[u8] = include_bytes!("state_node/tests/data/test_state_2_master.boc"); - Boc::decode(BOC) +impl StateSubscriber for StateNodeAdapterStdImpl { + type HandleStateFut<'a> = BoxFuture<'a, Result<()>>; + + fn handle_state<'a>(&'a self, cx: &'a StateSubscriberContext) -> Self::HandleStateFut<'a> { + tracing::info!(target: tracing_targets::STATE_NODE_ADAPTER, "Handle block: {:?}", cx.block.id()); + let block_id = *cx.block.id(); + let shard = block_id.shard; + let seqno = block_id.seqno; + + let blocks_lock = self.blocks.clone(); + let listener = self.listener.clone(); + + Box::pin(async move { + let mut blocks_guard = blocks_lock.lock().await; + let mut to_split = Vec::new(); + let mut to_remove = Vec::new(); + + let result_future = if let Some(shard_blocks) = blocks_guard.get(&shard) { + if let Some(block_data) = shard_blocks.get(&seqno) { + if shard.is_masterchain() { + let prev_seqno = block_data + .prev_blocks_ids + .last() + .ok_or(anyhow!("no prev block"))? + .seqno; + for id in &block_data.top_shard_blocks_ids { + to_split.push((id.shard, id.seqno)); + to_remove.push((id.shard, id.seqno)); + } + to_split.push((shard, prev_seqno)); + to_remove.push((shard, prev_seqno)); + } else { + to_remove.push((shard, seqno)); + } + tracing::info!(target: tracing_targets::STATE_NODE_ADAPTER, "Block accepted: {:?}", block_id); + listener.on_block_accepted(&block_id) + } else { + tracing::info!(target: tracing_targets::STATE_NODE_ADAPTER, "Block accepted external: {:?}", block_id); + listener.on_block_accepted_external(&cx.state) + } } else { - tracing::info!(target: tracing_targets::STATE_NODE_ADAPTER, "STUB: returns stub shard state on block 2"); - const BOC: &[u8] = include_bytes!("state_node/tests/data/test_state_2_0:80.boc"); - Boc::decode(BOC) + tracing::info!(target: tracing_targets::STATE_NODE_ADAPTER, "Block accepted external: {:?}", block_id); + listener.on_block_accepted_external(&cx.state) }; - let cell = cell?; - let shard_state = cell.parse::()?; - tracing::debug!(target: tracing_targets::STATE_NODE_ADAPTER, "state: {:?}", shard_state); + for (shard, seqno) in &to_split { + if let Some(shard_blocks) = blocks_guard.get_mut(shard) { + shard_blocks.split_off(seqno); + } + } - let fixed_stub_block_id = BlockId { - shard: shard_state.shard_ident, - seqno: shard_state.seqno, - root_hash: block_id.root_hash, - file_hash: block_id.file_hash, - }; - let tracker = MinRefMcStateTracker::new(); - let state_stuff = - ShardStateStuff::new(fixed_stub_block_id, cell, &tracker).map(Arc::new); + for (shard, seqno) in &to_remove { + if let Some(shard_blocks) = blocks_guard.get_mut(shard) { + shard_blocks.remove(seqno); + } + } - sender - .send(state_stuff) - .map_err(|_err| anyhow!("eror sending result out of spawned future")) - }); + drop(blocks_guard); - let receiver = TaskResponseReceiver::create(receiver); + result_future.await?; - Ok(receiver) + Ok(()) + }) } +} - async fn get_block(&self, _block_id: BlockId) -> Result>> { - //TODO: make real implementation +#[async_trait] +impl StateNodeAdapter for StateNodeAdapterStdImpl { + async fn load_last_applied_mc_block_id(&self) -> Result { + tracing::info!(target: tracing_targets::STATE_NODE_ADAPTER, "Load last applied mc block id"); + self.storage + .node_state() + .load_last_mc_block_id() + .context("no blocks applied yet") + } - //STUB: just remove empty block - Ok(None) + async fn load_state(&self, block_id: &BlockId) -> Result { + tracing::info!(target: tracing_targets::STATE_NODE_ADAPTER, "Load state: {:?}", block_id); + let state = self + .storage + .shard_state_storage() + .load_state(block_id) + .await?; + Ok(state) } - async fn request_block( - &self, - _block_id: BlockId, - ) -> Result>>> { - //TODO: make real implementation - let (sender, receiver) = tokio::sync::oneshot::channel::>>>(); + async fn load_block(&self, block_id: &BlockId) -> Result> { + tracing::info!(target: tracing_targets::STATE_NODE_ADAPTER, "Load block: {:?}", block_id); + + let handle_storage = self.storage.block_handle_storage(); + let block_storage = self.storage.block_storage(); - //STUB: emulating async task - tokio::spawn(async move { - tokio::time::sleep(tokio::time::Duration::from_millis(85)).await; - sender.send(Ok(None)) - }); + let Some(handle) = handle_storage.load_handle(block_id) else { + return Ok(None); + }; + block_storage.load_block_data(&handle).await.map(Some) + } - Ok(TaskResponseReceiver::create(receiver)) + async fn load_block_handle(&self, block_id: &BlockId) -> Result> { + tracing::info!(target: tracing_targets::STATE_NODE_ADAPTER, "Load block handle: {:?}", block_id); + Ok(self.storage.block_handle_storage().load_handle(block_id)) } - async fn accept_block(&self, block: BlockStuffForSync) -> Result> { - //TODO: make real implementation - //STUB: create dummy blcok handle - let handle = BlockHandle::new( - &block.block_id, - Default::default(), - Arc::new(Default::default()), - ); - Ok(Arc::new(handle)) + async fn accept_block(&self, block: BlockStuffForSync) -> Result<()> { + tracing::info!(target: tracing_targets::STATE_NODE_ADAPTER, "Block accepted: {:?}", block.block_id); + let mut blocks = self.blocks.lock().await; + let block_id = match block.block_id.shard.is_masterchain() { + true => { + let prev_block_id = *block + .prev_blocks_ids + .last() + .ok_or(anyhow!("no prev block"))?; + + blocks + .entry(block.block_id.shard) + .or_insert_with(BTreeMap::new) + .insert(prev_block_id.seqno, block); + prev_block_id + } + false => { + let block_id = block.block_id; + blocks + .entry(block.block_id.shard) + .or_insert_with(BTreeMap::new) + .insert(block.block_id.seqno, block); + block_id + } + }; + let broadcast_result = self.broadcaster.send(block_id).ok(); + tracing::trace!(target: tracing_targets::STATE_NODE_ADAPTER, "Block broadcast_result: {:?}", broadcast_result); + Ok(()) } } diff --git a/collator/src/state_node/tests/data/test_state_2_0:80.boc b/collator/src/state_node/tests/data/test_state_2_0:80.boc index 177ab0e77..72d9bc429 100644 Binary files a/collator/src/state_node/tests/data/test_state_2_0:80.boc and b/collator/src/state_node/tests/data/test_state_2_0:80.boc differ diff --git a/collator/src/state_node/tests/data/test_state_2_master.boc b/collator/src/state_node/tests/data/test_state_2_master.boc index d2baea36c..d01c94996 100644 Binary files a/collator/src/state_node/tests/data/test_state_2_master.boc and b/collator/src/state_node/tests/data/test_state_2_master.boc differ diff --git a/collator/src/test_utils.rs b/collator/src/test_utils.rs index fd567f7b4..38b3b58b2 100644 --- a/collator/src/test_utils.rs +++ b/collator/src/test_utils.rs @@ -2,8 +2,17 @@ use std::net::Ipv4Addr; use std::time::Duration; use everscale_crypto::ed25519; +use everscale_types::boc::Boc; +use everscale_types::cell::HashBytes; +use everscale_types::models::{BlockId, ShardStateUnsplit}; +use futures_util::future::BoxFuture; +use futures_util::FutureExt; +use sha2::Digest; +use tycho_block_util::state::{MinRefMcStateTracker, ShardStateStuff}; +use tycho_core::block_strider::{BlockProvider, OptionalBlockStuff}; use tycho_network::{DhtConfig, DhtService, Network, OverlayService, PeerId, Router}; +use tycho_storage::{BlockMetaData, Db, DbOptions, Storage}; use crate::types::NodeNetwork; @@ -58,3 +67,97 @@ pub fn create_node_network() -> NodeNetwork { dht_client, } } + +pub async fn prepare_test_storage() -> anyhow::Result<(DummyArchiveProvider, Storage)> { + let provider = DummyArchiveProvider; + let temp = tempfile::tempdir().unwrap(); + let db = Db::open(temp.path().to_path_buf(), DbOptions::default()).unwrap(); + let storage = Storage::new(db, temp.path().join("file"), 1_000_000).unwrap(); + let tracker = MinRefMcStateTracker::default(); + + // master state + let master_bytes = include_bytes!("../src/state_node/tests/data/test_state_2_master.boc"); + let master_file_hash: HashBytes = sha2::Sha256::digest(master_bytes).into(); + let master_root = Boc::decode(master_bytes)?; + let master_root_hash = *master_root.repr_hash(); + let master_state = master_root.parse::()?; + + let mc_state_extra = master_state.load_custom()?; + let mc_state_extra = mc_state_extra.unwrap(); + let mut shard_info_opt = None; + for shard_info in mc_state_extra.shards.iter() { + shard_info_opt = Some(shard_info?); + break; + } + let shard_info = shard_info_opt.unwrap(); + + let master_id = BlockId { + shard: master_state.shard_ident, + seqno: master_state.seqno, + root_hash: master_root_hash, + file_hash: master_file_hash, + }; + let master_state_stuff = + ShardStateStuff::from_state_and_root(master_id, master_state, master_root, &tracker)?; + + let (handle, _) = storage.block_handle_storage().create_or_load_handle( + &master_id, + BlockMetaData { + is_key_block: mc_state_extra.after_key_block, + gen_utime: master_state_stuff.state().gen_utime, + mc_ref_seqno: 0, + }, + ); + + storage + .shard_state_storage() + .store_state(&handle, &master_state_stuff) + .await?; + + // shard state + let shard_bytes = include_bytes!("../src/state_node/tests/data/test_state_2_0:80.boc"); + + let shard_root = Boc::decode(shard_bytes)?; + + let shard_state = shard_root.parse::()?; + let shard_id = BlockId { + shard: shard_info.0, + seqno: shard_info.1.seqno, + root_hash: shard_info.1.root_hash, + file_hash: shard_info.1.file_hash, + }; + let shard_state_stuff = + ShardStateStuff::from_state_and_root(shard_id, shard_state, shard_root, &tracker)?; + + let (handle, _) = storage.block_handle_storage().create_or_load_handle( + &shard_id, + BlockMetaData { + is_key_block: false, + gen_utime: shard_state_stuff.state().gen_utime, + mc_ref_seqno: 0, + }, + ); + + storage + .shard_state_storage() + .store_state(&handle, &shard_state_stuff) + .await?; + + storage.node_state().store_last_mc_block_id(&master_id); + + Ok((provider, storage)) +} + +pub struct DummyArchiveProvider; +impl BlockProvider for DummyArchiveProvider { + type GetNextBlockFut<'a> = BoxFuture<'a, OptionalBlockStuff>; + type GetBlockFut<'a> = BoxFuture<'a, OptionalBlockStuff>; + + fn get_next_block<'a>(&'a self, prev_block_id: &'a BlockId) -> Self::GetNextBlockFut<'a> { + futures_util::future::ready(None).boxed() + } + + fn get_block<'a>(&'a self, block_id: &'a BlockId) -> Self::GetBlockFut<'a> { + futures_util::future::ready(None).boxed() + } +} diff --git a/collator/src/types.rs b/collator/src/types.rs index 1b5e4a1d5..e566008a4 100644 --- a/collator/src/types.rs +++ b/collator/src/types.rs @@ -1,28 +1,41 @@ -use everscale_crypto::ed25519::KeyPair; -use everscale_types::cell::HashBytes; -use everscale_types::models::{BlockId, OwnedMessage, ShardIdent, ShardStateUnsplit, Signature}; -use std::collections::HashMap; +use std::sync::Arc; -use tycho_block_util::block::ValidatorSubsetInfo; -use tycho_network::{DhtClient, OverlayService, PeerResolver}; +use anyhow::Result; -use std::sync::Arc; +use everscale_crypto::ed25519::KeyPair; +use everscale_types::cell::{CellBuilder, HashBytes}; +use everscale_types::models::{ + Block, BlockId, OwnedMessage, ShardIdent, ShardStateUnsplit, Signature, +}; -use tycho_block_util::block::BlockStuff; +use tycho_block_util::block::{BlockStuffAug, ValidatorSubsetInfo}; +use tycho_block_util::state::{MinRefMcStateTracker, ShardStateStuff}; +use tycho_network::{DhtClient, OverlayService, PeerResolver}; +use tycho_util::FastHashMap; pub struct CollationConfig { pub key_pair: KeyPair, pub mc_block_min_interval_ms: u64, + pub max_mc_block_delta_from_bc_to_await_own: i32, + + pub supported_block_version: u32, + pub supported_capabilities: u64, + + pub max_collate_threads: u16, + + #[cfg(feature = "test")] + pub test_validators_keypairs: Vec, } pub(crate) struct BlockCollationResult { pub candidate: BlockCandidate, - pub new_state: ShardStateUnsplit, + pub new_state_stuff: ShardStateStuff, } #[derive(Clone)] pub(crate) struct BlockCandidate { block_id: BlockId, + block: Block, prev_blocks_ids: Vec, top_shard_blocks_ids: Vec, data: Vec, @@ -33,6 +46,7 @@ pub(crate) struct BlockCandidate { impl BlockCandidate { pub fn new( block_id: BlockId, + block: Block, prev_blocks_ids: Vec, top_shard_blocks_ids: Vec, data: Vec, @@ -42,6 +56,7 @@ impl BlockCandidate { ) -> Self { Self { block_id, + block, prev_blocks_ids, top_shard_blocks_ids, data, @@ -53,6 +68,9 @@ impl BlockCandidate { pub fn block_id(&self) -> &BlockId { &self.block_id } + pub fn block(&self) -> &Block { + &self.block + } pub fn shard_id(&self) -> &ShardIdent { &self.block_id.shard } @@ -65,9 +83,32 @@ impl BlockCandidate { pub fn top_shard_blocks_ids(&self) -> &[BlockId] { &self.top_shard_blocks_ids } + pub fn data(&self) -> &[u8] { + &self.data + } +} + +pub(crate) trait ShardStateStuffExt { + fn from_state( + block_id: BlockId, + shard_state: ShardStateUnsplit, + tracker: &MinRefMcStateTracker, + ) -> Result + where + Self: Sized; +} +impl ShardStateStuffExt for ShardStateStuff { + fn from_state( + block_id: BlockId, + shard_state: ShardStateUnsplit, + tracker: &MinRefMcStateTracker, + ) -> Result { + let root = CellBuilder::build_from(&shard_state)?; + ShardStateStuff::from_state_and_root(block_id, shard_state, root, tracker) + } } -pub(crate) enum OnValidatedBlockEvent { +pub enum OnValidatedBlockEvent { ValidByState, Invalid, Valid(BlockSignatures), @@ -83,8 +124,8 @@ impl OnValidatedBlockEvent { } #[derive(Default, Clone)] -pub(crate) struct BlockSignatures { - pub signatures: HashMap, +pub struct BlockSignatures { + pub signatures: FastHashMap, } pub struct ValidatedBlock { @@ -118,20 +159,21 @@ impl ValidatedBlock { } } -pub(crate) struct BlockStuffForSync { +pub struct BlockStuffForSync { //STUB: will not parse Block because candidate does not contain real block //TODO: remove `block_id` and make `block_stuff: BlockStuff` when collator will generate real blocks pub block_id: BlockId, - pub block_stuff: Option, - pub signatures: HashMap, + pub block_stuff_aug: BlockStuffAug, + pub signatures: FastHashMap, pub prev_blocks_ids: Vec, + pub top_shard_blocks_ids: Vec, } /// (`ShardIdent`, seqno) pub(crate) type CollationSessionId = (ShardIdent, u32); #[derive(Clone)] -pub(crate) struct CollationSessionInfo { +pub struct CollationSessionInfo { /// Sequence number of the collation session seqno: u32, collators: ValidatorSubsetInfo, diff --git a/collator/src/utils/async_queued_dispatcher.rs b/collator/src/utils/async_queued_dispatcher.rs index 81e93c606..588ede355 100644 --- a/collator/src/utils/async_queued_dispatcher.rs +++ b/collator/src/utils/async_queued_dispatcher.rs @@ -1,6 +1,7 @@ -use std::{future::Future, pin::Pin}; +use std::{future::Future, pin::Pin, usize}; use anyhow::{anyhow, Result}; +use log::trace; use tokio::sync::{mpsc, oneshot}; use crate::tracing_targets; @@ -33,6 +34,11 @@ where pub fn run(mut worker: W, mut receiver: mpsc::Receiver>) { tokio::spawn(async move { while let Some(task) = receiver.recv().await { + trace!( + target: tracing_targets::ASYNC_QUEUE_DISPATCHER, + "Task #{} ({}): received", + task.id(), + task.get_descr()); let (task_id, task_descr) = (task.id(), task.get_descr()); let (func, responder) = task.extract(); tracing::trace!( diff --git a/collator/src/validator/mod.rs b/collator/src/validator/mod.rs index 7b1d7a220..b8cf1a17d 100644 --- a/collator/src/validator/mod.rs +++ b/collator/src/validator/mod.rs @@ -5,5 +5,5 @@ pub mod state; pub mod test_impl; pub mod types; #[allow(clippy::module_inception)] -mod validator; +pub mod validator; pub mod validator_processor; diff --git a/collator/src/validator/network/dto.rs b/collator/src/validator/network/dto.rs index f8d0bcbcb..71b192901 100644 --- a/collator/src/validator/network/dto.rs +++ b/collator/src/validator/network/dto.rs @@ -1,8 +1,7 @@ -use std::collections::HashMap; - use everscale_types::cell::HashBytes; use everscale_types::models::{BlockIdShort, Signature}; use tl_proto::{TlRead, TlWrite}; +use tycho_util::FastHashMap; #[derive(Debug, Clone, TlRead, TlWrite)] #[tl(boxed, id = 0x11112222)] @@ -17,7 +16,7 @@ impl SignaturesQuery { pub(crate) fn create( session_seqno: u32, block_header: BlockIdShort, - current_signatures: &HashMap, + current_signatures: &FastHashMap, ) -> Self { let signatures = current_signatures.iter().map(|(k, v)| (k.0, v.0)).collect(); Self { diff --git a/collator/src/validator/network/handlers.rs b/collator/src/validator/network/handlers.rs index 80d6e0322..996bde8b7 100644 --- a/collator/src/validator/network/handlers.rs +++ b/collator/src/validator/network/handlers.rs @@ -2,7 +2,6 @@ use std::sync::Arc; use anyhow::anyhow; use everscale_types::models::BlockIdShort; -use tracing::debug; use tycho_network::Response; diff --git a/collator/src/validator/state.rs b/collator/src/validator/state.rs index 8151be303..f9df9b273 100644 --- a/collator/src/validator/state.rs +++ b/collator/src/validator/state.rs @@ -5,15 +5,15 @@ use anyhow::{bail, Context}; use everscale_types::cell::HashBytes; use everscale_types::models::{BlockId, BlockIdShort, Signature}; -use tycho_network::PrivateOverlay; - use crate::validator::types::{ BlockValidationCandidate, ValidationResult, ValidationSessionInfo, ValidatorInfo, }; +use tycho_network::PrivateOverlay; +use tycho_util::FastHashMap; struct SignatureMaps { - valid_signatures: HashMap, - invalid_signatures: HashMap, + valid_signatures: FastHashMap, + invalid_signatures: FastHashMap, } /// Represents the state of validation for blocks and sessions. @@ -32,11 +32,11 @@ pub trait ValidationState: Send + Sync + 'static { } /// Holds information about a validation session. -pub(crate) struct SessionInfo { +pub struct SessionInfo { session_id: u32, max_weight: u64, - blocks_signatures: HashMap, - cached_signatures: HashMap>, + blocks_signatures: FastHashMap, + cached_signatures: FastHashMap>, validation_session_info: Arc, private_overlay: PrivateOverlay, } @@ -108,6 +108,7 @@ impl SessionInfo { /// Determines the validation status of a block. pub fn validation_status(&self, block_id_short: &BlockIdShort) -> ValidationResult { + let valid_weight = self.max_weight * 2 / 3 + 1; if let Some((_, signature_maps)) = self.blocks_signatures.get(block_id_short) { let total_valid_weight: u64 = signature_maps .valid_signatures @@ -120,16 +121,15 @@ impl SessionInfo { }) .sum(); - let valid_weight = self.max_weight * 2 / 3 + 1; if total_valid_weight >= valid_weight { ValidationResult::Valid } else if self.is_invalid(signature_maps, valid_weight) { ValidationResult::Invalid } else { - ValidationResult::Insufficient + ValidationResult::Insufficient(total_valid_weight, valid_weight) } } else { - ValidationResult::Insufficient + ValidationResult::Insufficient(0, valid_weight) } } /// Lists validators without signatures for a given block. @@ -187,23 +187,11 @@ impl SessionInfo { pub fn get_valid_signatures( &self, block_id_short: &BlockIdShort, - ) -> HashMap { + ) -> FastHashMap { if let Some((_, signature_maps)) = self.blocks_signatures.get(block_id_short) { signature_maps.valid_signatures.clone() } else { - HashMap::new() - } - } - - /// Retrieves valid signatures for a block. - pub fn get_invalid_signatures( - &self, - block_id_short: &BlockIdShort, - ) -> HashMap { - if let Some((_, signature_maps)) = self.blocks_signatures.get(block_id_short) { - signature_maps.invalid_signatures.clone() - } else { - HashMap::new() + FastHashMap::default() } } @@ -223,8 +211,8 @@ impl SessionInfo { ( *block_id, SignatureMaps { - valid_signatures: HashMap::new(), - invalid_signatures: HashMap::new(), + valid_signatures: FastHashMap::default(), + invalid_signatures: FastHashMap::default(), }, ) }); diff --git a/collator/src/validator/test_impl.rs b/collator/src/validator/test_impl.rs index f0f80cea7..74348d6be 100644 --- a/collator/src/validator/test_impl.rs +++ b/collator/src/validator/test_impl.rs @@ -7,14 +7,12 @@ use everscale_crypto::ed25519::{KeyPair, PublicKey}; use everscale_types::models::{BlockId, BlockIdShort, Signature}; use tycho_block_util::state::ShardStateStuff; +use tycho_util::FastHashMap; use crate::tracing_targets; use crate::types::{BlockSignatures, OnValidatedBlockEvent, ValidatorNetwork}; use crate::validator::types::ValidationSessionInfo; -use crate::{ - state_node::StateNodeAdapter, types::ValidatedBlock, - utils::async_queued_dispatcher::AsyncQueuedDispatcher, -}; +use crate::{state_node::StateNodeAdapter, utils::async_queued_dispatcher::AsyncQueuedDispatcher}; use super::{ validator_processor::{ValidatorProcessor, ValidatorTaskResult}, @@ -27,7 +25,7 @@ where { _dispatcher: Arc>, listener: Arc, - state_node_adapter: Arc, + _state_node_adapter: Arc, _stub_candidates_cache: HashMap, } @@ -61,13 +59,13 @@ where fn new( _dispatcher: Arc>, listener: Arc, - state_node_adapter: Arc, + _state_node_adapter: Arc, _network: ValidatorNetwork, ) -> Self { Self { _dispatcher, listener, - state_node_adapter, + _state_node_adapter, _stub_candidates_cache: HashMap::new(), } } @@ -78,7 +76,7 @@ where _session_seqno: u32, current_validator_keypair: KeyPair, ) -> Result { - let mut signatures = HashMap::new(); + let mut signatures = FastHashMap::default(); signatures.insert( current_validator_keypair.public_key.to_bytes().into(), Signature::default(), @@ -99,14 +97,13 @@ where } fn get_dispatcher(&self) -> Arc> { - todo!() + self._dispatcher.clone() } async fn try_add_session( &mut self, _session: Arc, ) -> Result { - //STUB: do nothing Ok(ValidatorTaskResult::Void) } diff --git a/collator/src/validator/types.rs b/collator/src/validator/types.rs index e947158ba..75ad70ca9 100644 --- a/collator/src/validator/types.rs +++ b/collator/src/validator/types.rs @@ -2,8 +2,7 @@ use std::collections::HashMap; use std::convert::TryFrom; use std::sync::Arc; -use anyhow::bail; -use everscale_crypto::ed25519::{KeyPair, PublicKey}; +use everscale_crypto::ed25519::PublicKey; use everscale_types::cell::HashBytes; use everscale_types::models::{BlockId, ValidatorDescription}; use tl_proto::{TlRead, TlWrite}; @@ -13,15 +12,15 @@ use crate::types::CollationSessionInfo; pub(crate) type ValidatorsMap = HashMap<[u8; 32], Arc>; -pub(crate) enum ValidatorInfoError { +pub enum ValidatorInfoError { InvalidPublicKey, } #[derive(Clone)] -pub(crate) struct ValidatorInfo { +pub struct ValidatorInfo { pub public_key: PublicKey, pub weight: u64, - pub adnl_addr: Option, + pub _adnl_addr: Option, } impl TryFrom<&ValidatorDescription> for ValidatorInfo { @@ -33,28 +32,20 @@ impl TryFrom<&ValidatorDescription> for ValidatorInfo { Ok(Self { public_key: pubkey, weight: value.weight, - adnl_addr: value.adnl_addr.map(|addr| HashBytes(addr.0)), + _adnl_addr: value.adnl_addr.map(|addr| HashBytes(addr.0)), }) } } -pub(crate) struct ValidationSessionInfo { +pub struct ValidationSessionInfo { pub seqno: u32, pub validators: ValidatorsMap, - pub current_validator_keypair: KeyPair, } impl TryFrom> for ValidationSessionInfo { type Error = anyhow::Error; fn try_from(session_info: Arc) -> std::result::Result { - let current_validator_keypair = match session_info.current_collator_keypair() { - Some(keypair) => *keypair, - None => { - bail!("Collator keypair is not set, skip candidate validation"); - } - }; - let mut validators = HashMap::new(); for validator_descr in session_info.collators().validators.iter() { let validator_info: anyhow::Result = @@ -75,7 +66,6 @@ impl TryFrom> for ValidationSessionInfo { let validation_session = ValidationSessionInfo { seqno: session_info.seqno(), validators, - current_validator_keypair, }; Ok(validation_session) } @@ -113,8 +103,8 @@ pub(crate) struct OverlayNumber { } #[derive(Eq, PartialEq, Debug)] -pub(crate) enum ValidationResult { +pub enum ValidationResult { Valid, Invalid, - Insufficient, + Insufficient(u64, u64), } diff --git a/collator/src/validator/validator.rs b/collator/src/validator/validator.rs index 515d9af7c..e14b45cd3 100644 --- a/collator/src/validator/validator.rs +++ b/collator/src/validator/validator.rs @@ -8,19 +8,15 @@ use everscale_types::models::BlockId; use crate::types::{OnValidatedBlockEvent, ValidatorNetwork}; use crate::validator::types::ValidationSessionInfo; use crate::{ - method_to_async_task_closure, - state_node::StateNodeAdapter, - tracing_targets, - utils::async_queued_dispatcher::{ - AsyncQueuedDispatcher, STANDARD_DISPATCHER_QUEUE_BUFFER_SIZE, - }, + method_to_async_task_closure, state_node::StateNodeAdapter, tracing_targets, + utils::async_queued_dispatcher::AsyncQueuedDispatcher, }; use super::validator_processor::{ValidatorProcessor, ValidatorTaskResult}; - +const VALIDATOR_BUFFER_SIZE: usize = 1usize; //TODO: remove emitter #[async_trait] -pub(crate) trait ValidatorEventEmitter { +pub trait ValidatorEventEmitter { /// When shard or master block was validated by validator async fn on_block_validated_event( &self, @@ -30,7 +26,7 @@ pub(crate) trait ValidatorEventEmitter { } #[async_trait] -pub(crate) trait ValidatorEventListener: Send + Sync { +pub trait ValidatorEventListener: Send + Sync { /// Process validated shard or master block async fn on_block_validated( &self, @@ -40,7 +36,7 @@ pub(crate) trait ValidatorEventListener: Send + Sync { } #[async_trait] -pub(crate) trait Validator: Send + Sync + 'static +pub trait Validator: Send + Sync + 'static where ST: StateNodeAdapter, { @@ -63,7 +59,7 @@ where } #[allow(private_bounds)] -pub(crate) struct ValidatorStdImpl +pub struct ValidatorStdImpl where W: ValidatorProcessor, ST: StateNodeAdapter, @@ -86,8 +82,7 @@ where tracing::info!(target: tracing_targets::VALIDATOR, "Creating validator..."); // create dispatcher for own async tasks queue - let (dispatcher, receiver) = - AsyncQueuedDispatcher::new(STANDARD_DISPATCHER_QUEUE_BUFFER_SIZE); + let (dispatcher, receiver) = AsyncQueuedDispatcher::new(VALIDATOR_BUFFER_SIZE); let dispatcher = Arc::new(dispatcher); // create validation processor and run dispatcher for own tasks queue @@ -136,350 +131,3 @@ where .await } } - -#[cfg(test)] -mod tests { - use std::collections::HashMap; - use std::net::Ipv4Addr; - - use std::time::Duration; - - use everscale_crypto::ed25519; - use everscale_crypto::ed25519::KeyPair; - use everscale_types::models::ValidatorDescription; - use rand::prelude::ThreadRng; - use tokio::sync::{Mutex, Notify}; - - use tracing::debug; - - use tycho_block_util::block::ValidatorSubsetInfo; - use tycho_network::{ - DhtClient, DhtConfig, DhtService, Network, OverlayService, PeerId, PeerResolver, Router, - }; - - use crate::state_node::{ - StateNodeAdapterBuilder, StateNodeAdapterBuilderStdImpl, StateNodeEventListener, - }; - use crate::test_utils::try_init_test_tracing; - use crate::types::{BlockSignatures, CollationSessionInfo}; - - use crate::validator::state::{ValidationState, ValidationStateStdImpl}; - use crate::validator::types::ValidationSessionInfo; - - use crate::validator::validator_processor::ValidatorProcessorStdImpl; - - use super::*; - - pub struct TestValidatorEventListener { - validated_blocks: Mutex>, - notify: Arc, - expected_notifications: Mutex, - received_notifications: Mutex, - } - - impl TestValidatorEventListener { - pub fn new(expected_count: u32) -> Arc { - Arc::new(Self { - validated_blocks: Mutex::new(vec![]), - notify: Arc::new(Notify::new()), - expected_notifications: Mutex::new(expected_count), - received_notifications: Mutex::new(0), - }) - } - - pub async fn increment_and_check(&self) { - let mut received = self.received_notifications.lock().await; - *received += 1; - if *received == *self.expected_notifications.lock().await { - self.notify.notify_one(); - } - } - } - - #[async_trait] - impl ValidatorEventListener for TestValidatorEventListener { - async fn on_block_validated( - &self, - block_id: BlockId, - event: OnValidatedBlockEvent, - ) -> Result<()> { - let mut validated_blocks = self.validated_blocks.lock().await; - validated_blocks.push(block_id); - self.increment_and_check().await; - debug!("block validated event"); - Ok(()) - } - } - - #[async_trait] - impl StateNodeEventListener for TestValidatorEventListener { - async fn on_mc_block(&self, _mc_block_id: BlockId) -> Result<()> { - unimplemented!("Not implemented"); - } - } - - struct Node { - network: Network, - keypair: KeyPair, - overlay_service: OverlayService, - dht_client: DhtClient, - peer_resolver: PeerResolver, - } - - impl Node { - fn new(key: &ed25519::SecretKey) -> Self { - let keypair = ed25519::KeyPair::from(key); - let local_id = PeerId::from(keypair.public_key); - - let (dht_tasks, dht_service) = DhtService::builder(local_id) - .with_config(DhtConfig { - local_info_announce_period: Duration::from_secs(1), - local_info_announce_period_max_jitter: Duration::from_secs(1), - routing_table_refresh_period: Duration::from_secs(1), - routing_table_refresh_period_max_jitter: Duration::from_secs(1), - ..Default::default() - }) - .build(); - - let (overlay_tasks, overlay_service) = OverlayService::builder(local_id) - .with_dht_service(dht_service.clone()) - .build(); - - let router = Router::builder() - .route(overlay_service.clone()) - .route(dht_service.clone()) - .build(); - - let network = Network::builder() - .with_private_key(key.to_bytes()) - .with_service_name("test-service") - .build((Ipv4Addr::LOCALHOST, 0), router) - .unwrap(); - - let dht_client = dht_service.make_client(&network); - let peer_resolver = dht_service.make_peer_resolver().build(&network); - - overlay_tasks.spawn(&network); - dht_tasks.spawn(&network); - - Self { - network, - keypair, - overlay_service, - dht_client, - peer_resolver, - } - } - } - - fn make_network(node_count: usize) -> Vec { - let keys = (0..node_count) - .map(|_| ed25519::SecretKey::generate(&mut rand::thread_rng())) - .collect::>(); - let nodes = keys.iter().map(Node::new).collect::>(); - let common_peer_info = nodes.first().unwrap().network.sign_peer_info(0, u32::MAX); - for node in &nodes { - node.dht_client - .add_peer(Arc::new(common_peer_info.clone())) - .unwrap(); - } - nodes - } - - #[tokio::test] - async fn test_validator_accept_block_by_state() -> Result<()> { - let test_listener = TestValidatorEventListener::new(1); - let _state_node_event_listener: Arc = test_listener.clone(); - - let state_node_adapter = - Arc::new(StateNodeAdapterBuilderStdImpl::new().build(test_listener.clone())); - let _validation_state = ValidationStateStdImpl::new(); - - let random_secret_key = ed25519::SecretKey::generate(&mut rand::thread_rng()); - let keypair = ed25519::KeyPair::from(&random_secret_key); - let local_id = PeerId::from(keypair.public_key); - let (_, _overlay_service) = OverlayService::builder(local_id).build(); - - let (_overlay_tasks, overlay_service) = OverlayService::builder(local_id).build(); - - let router = Router::builder().route(overlay_service.clone()).build(); - let network = Network::builder() - .with_private_key(random_secret_key.to_bytes()) - .with_service_name("test-service") - .build((Ipv4Addr::LOCALHOST, 0), router) - .unwrap(); - - let (_, dht_service) = DhtService::builder(local_id) - .with_config(DhtConfig { - local_info_announce_period: Duration::from_secs(1), - local_info_announce_period_max_jitter: Duration::from_secs(1), - routing_table_refresh_period: Duration::from_secs(1), - routing_table_refresh_period_max_jitter: Duration::from_secs(1), - ..Default::default() - }) - .build(); - - let dht_client = dht_service.make_client(&network); - let peer_resolver = dht_service.make_peer_resolver().build(&network); - - let validator_network = ValidatorNetwork { - overlay_service, - peer_resolver, - dht_client, - }; - - let _validator = ValidatorStdImpl::, _>::create( - test_listener.clone(), - state_node_adapter, - validator_network, - ); - - let block = BlockId { - shard: Default::default(), - seqno: 0, - root_hash: Default::default(), - file_hash: Default::default(), - }; - - let validator_description = ValidatorDescription { - public_key: KeyPair::generate(&mut ThreadRng::default()) - .public_key - .to_bytes() - .into(), - weight: 0, - adnl_addr: None, - mc_seqno_since: 0, - prev_total_weight: 0, - }; - - let validators = ValidatorSubsetInfo { - validators: vec![validator_description], - short_hash: 0, - }; - let keypair = KeyPair::generate(&mut ThreadRng::default()); - let _collator_session_info = CollationSessionInfo::new(0, validators, Some(keypair)); - test_listener - .on_block_validated(block, OnValidatedBlockEvent::ValidByState) - .await?; - - let validated_blocks = test_listener.validated_blocks.lock().await; - assert!(!validated_blocks.is_empty(), "No blocks were validated."); - - Ok(()) - } - - #[tokio::test] - async fn test_validator_accept_block_by_network() -> Result<()> { - try_init_test_tracing(tracing_subscriber::filter::LevelFilter::DEBUG); - - let network_nodes = make_network(3); - let blocks_amount = 1; // Assuming you expect 3 validation per node. - - let expected_validations = network_nodes.len() as u32; // Expecting each node to validate - let _test_listener = TestValidatorEventListener::new(expected_validations); - - let mut validators = vec![]; - let mut listeners = vec![]; // Track listeners for later validati - - for node in network_nodes { - // Create a unique listener for each validator - let test_listener = TestValidatorEventListener::new(blocks_amount); - listeners.push(test_listener.clone()); - - let state_node_adapter = - Arc::new(StateNodeAdapterBuilderStdImpl::new().build(test_listener.clone())); - let _validation_state = ValidationStateStdImpl::new(); - let network = ValidatorNetwork { - overlay_service: node.overlay_service.clone(), - dht_client: node.dht_client.clone(), - peer_resolver: node.peer_resolver.clone(), - }; - let validator = ValidatorStdImpl::, _>::create( - test_listener.clone(), - state_node_adapter, - network, - ); - validators.push((validator, node)); - } - - let mut validators_descriptions = vec![]; - for (_validator, node) in &validators { - let peer_id = node.network.peer_id(); - let _keypair = node.keypair; - validators_descriptions.push(ValidatorDescription { - public_key: (*peer_id.as_bytes()).into(), - weight: 1, - adnl_addr: None, - mc_seqno_since: 0, - prev_total_weight: 0, - }); - } - - let blocks = create_blocks(blocks_amount); - - let validators_subset_info = ValidatorSubsetInfo { - validators: validators_descriptions, - short_hash: 0, - }; - for (validator, _node) in &validators { - let collator_session_info = Arc::new(CollationSessionInfo::new( - 1, - validators_subset_info.clone(), - Some(_node.keypair), // Ensure you use the node's keypair correctly here - )); - // Assuming this setup is correct and necessary for each validator - - let validation_session = - Arc::new(ValidationSessionInfo::try_from(collator_session_info.clone()).unwrap()); - validator - .enqueue_add_session(validation_session) - .await - .unwrap(); - } - - tokio::time::sleep(Duration::from_secs(1)).await; - - for (validator, _node) in &validators { - let collator_session_info = Arc::new(CollationSessionInfo::new( - 1, - validators_subset_info.clone(), - Some(_node.keypair), // Ensure you use the node's keypair correctly here - )); - - for block in blocks.iter() { - validator - .enqueue_candidate_validation( - *block, - collator_session_info.seqno(), - *collator_session_info.current_collator_keypair().unwrap(), - ) - .await - .unwrap(); - } - } - - for listener in listeners { - listener.notify.notified().await; - let validated_blocks = listener.validated_blocks.lock().await; - assert_eq!( - validated_blocks.len(), - blocks_amount as usize, - "Expected each validator to validate the block once." - ); - } - Ok(()) - } - - fn create_blocks(amount: u32) -> Vec { - let mut blocks = vec![]; - for i in 0..amount { - blocks.push(BlockId { - shard: Default::default(), - seqno: i, - root_hash: Default::default(), - file_hash: Default::default(), - }); - } - blocks - } -} diff --git a/collator/src/validator/validator_processor.rs b/collator/src/validator/validator_processor.rs index 654c91122..a4fb9fff2 100644 --- a/collator/src/validator/validator_processor.rs +++ b/collator/src/validator/validator_processor.rs @@ -1,4 +1,3 @@ -use std::collections::HashMap; use std::sync::Arc; use std::time::Duration; @@ -8,13 +7,13 @@ use everscale_crypto::ed25519::KeyPair; use everscale_types::cell::HashBytes; use everscale_types::models::{BlockId, BlockIdShort, Signature}; use tokio::sync::broadcast; -use tokio::time::interval; use tracing::warn; -use tracing::{debug, error, trace}; +use tracing::{debug, trace}; -use crate::types::{BlockSignatures, OnValidatedBlockEvent, ValidatedBlock, ValidatorNetwork}; +use crate::types::{BlockSignatures, OnValidatedBlockEvent, ValidatorNetwork}; use tycho_block_util::state::ShardStateStuff; use tycho_network::{OverlayId, PeerId, PrivateOverlay, Request}; +use tycho_util::FastHashMap; use crate::validator::network::dto::SignaturesQuery; use crate::validator::network::network_service::NetworkService; @@ -29,13 +28,15 @@ use crate::{ use super::{ValidatorEventEmitter, ValidatorEventListener}; -const MAX_VALIDATION_ATTEMPTS: u32 = 1000; -const VALIDATION_RETRY_TIMEOUT_SEC: u64 = 3; +const NETWORK_TIMEOUT: Duration = Duration::from_millis(1000); +const INITIAL_BACKOFF: Duration = Duration::from_millis(100); +const MAX_BACKOFF: Duration = Duration::from_secs(10); +const BACKOFF_FACTOR: u32 = 2; // Factor by which the timeout will increase #[derive(PartialEq, Debug)] pub enum ValidatorTaskResult { Void, - Signatures(HashMap), + Signatures(FastHashMap), ValidationStatus(ValidationResult), } @@ -89,18 +90,18 @@ where async fn validate_candidate_by_block_from_bc( &mut self, - _candidate_id: BlockId, + candidate_id: BlockId, ) -> Result { - // self.on_block_validated_event(ValidatedBlock::new(candidate_id, vec![], true)) - // .await?; - // Ok(ValidatorTaskResult::Void) - todo!(); + self.on_block_validated_event(candidate_id, OnValidatedBlockEvent::ValidByState) + .await?; + Ok(ValidatorTaskResult::Void) } async fn get_block_signatures( &mut self, session_seqno: u32, block_id_short: &BlockIdShort, ) -> Result; + async fn validate_candidate( &mut self, candidate_id: BlockId, @@ -114,7 +115,7 @@ where ) -> Result; } -pub(crate) struct ValidatorProcessorStdImpl +pub struct ValidatorProcessorStdImpl where ST: StateNodeAdapter, { @@ -171,6 +172,7 @@ where &mut self, session: Arc, ) -> Result { + trace!(target: tracing_targets::VALIDATOR, "Trying to add session seqno {:?}", session.seqno); if self.validation_state.get_session(session.seqno).is_none() { let (peer_resolver, local_peer_id) = { let network = self.network.clone(); @@ -183,6 +185,7 @@ where let overlay_id = OverlayNumber { session_seqno: session.seqno, }; + trace!(target: tracing_targets::VALIDATOR, overlay_id = ?session.seqno, "Creating private overlay"); let overlay_id = OverlayId(tl_proto::hash(overlay_id)); let network_service = NetworkService::new(self.get_dispatcher().clone()); @@ -196,7 +199,7 @@ where .add_private_overlay(&private_overlay); if !overlay_added { - bail!("Failed to add private overlay"); + panic!("Failed to add private overlay"); } self.validation_state @@ -209,8 +212,10 @@ where continue; } entries.insert(&PeerId(validator.public_key.to_bytes())); + trace!(target: tracing_targets::VALIDATOR, validator_pubkey = ?validator.public_key.as_bytes(), "Added validator to overlay"); } } + trace!(target: tracing_targets::VALIDATOR, "Session seqno {:?} added", session.seqno); Ok(ValidatorTaskResult::Void) } @@ -222,6 +227,7 @@ where current_validator_keypair: KeyPair, ) -> Result { let mut stop_receiver = self.stop_sender.subscribe(); + trace!(target: tracing_targets::VALIDATOR, block = %candidate_id, "Starting candidate validation"); // Simplify session retrieval with clear, concise error handling. let session = self @@ -229,36 +235,66 @@ where .get_mut_session(session_seqno) .ok_or_else(|| anyhow!("Failed to start candidate validation. Session not found"))?; + trace!(target: tracing_targets::VALIDATOR, block = %candidate_id, "Signing block"); + let our_signature = sign_block(¤t_validator_keypair, &candidate_id)?; let current_validator_signature = HashBytes(current_validator_keypair.public_key.to_bytes()); + + trace!(target: tracing_targets::VALIDATOR, block = %candidate_id, "Adding block to session"); session.add_block(candidate_id)?; + trace!(target: tracing_targets::VALIDATOR, block = %candidate_id, "Adding our signature to session"); + let enqueue_task_result = self - .dispatcher - .enqueue_task(method_to_async_task_closure!( - process_candidate_signature_response, + .process_candidate_signature_response( session_seqno, candidate_id.as_short_id(), - vec![(current_validator_signature.0, our_signature.0)] - )) + vec![(current_validator_signature.0, our_signature.0)], + ) .await; + trace!(target: tracing_targets::VALIDATOR, block = %candidate_id, "Enqueued task for processing signatures response"); if let Err(e) = enqueue_task_result { + trace!(target: tracing_targets::VALIDATOR, block = %candidate_id, "Failed to enqueue task for processing signatures response {e:?}"); bail!("Failed to enqueue task for processing signatures response {e:?}"); } + let session = self + .validation_state + .get_session(session_seqno) + .ok_or_else(|| anyhow!("Failed to start candidate validation. Session not found"))?; + + let validation_status = session.validation_status(&candidate_id.as_short_id()); + + if validation_status == ValidationResult::Valid + || validation_status == ValidationResult::Invalid + { + trace!(target: tracing_targets::VALIDATOR, block = %candidate_id, "Validation status is already set for block {:?}", candidate_id); + return Ok(ValidatorTaskResult::Void); + } + let dispatcher = self.get_dispatcher().clone(); let current_validator_pubkey = current_validator_keypair.public_key; + trace!(target: tracing_targets::VALIDATOR, block = %candidate_id, "Starting validation loop"); tokio::spawn(async move { - let mut retry_interval = interval(Duration::from_secs(VALIDATION_RETRY_TIMEOUT_SEC)); - let max_retries = MAX_VALIDATION_ATTEMPTS; - let mut attempts = 0; + let mut iteration = 0; + loop { + let interval_duration = if iteration == 0 { + Duration::from_millis(0) + } else { + let exponential_backoff = INITIAL_BACKOFF * BACKOFF_FACTOR.pow(iteration - 1); + + if exponential_backoff > MAX_BACKOFF { + MAX_BACKOFF + } else { + exponential_backoff + } + }; + + trace!(target: tracing_targets::VALIDATOR, block = %candidate_id, interval = ?interval_duration, "Waiting for next validation attempt"); - while attempts < max_retries { - trace!(target: tracing_targets::VALIDATOR, block = %candidate_id, "Attempt to validate block"); - attempts += 1; let dispatcher_clone = dispatcher.clone(); let cloned_candidate = candidate_id; @@ -269,7 +305,8 @@ where break; } }, - _ = retry_interval.tick() => { + _ = tokio::time::sleep(interval_duration) => { + let validation_task_result = dispatcher_clone.enqueue_task_with_responder( method_to_async_task_closure!( get_validation_status, @@ -277,6 +314,8 @@ where &cloned_candidate.as_short_id()) ).await; + trace!(target: tracing_targets::VALIDATOR, block = %cloned_candidate, "Enqueued task for getting validation status"); + match validation_task_result { Ok(receiver) => match receiver.await.unwrap() { Ok(ValidatorTaskResult::ValidationStatus(validation_status)) => { @@ -285,27 +324,26 @@ where break; } + trace!(target: tracing_targets::VALIDATOR, block = %cloned_candidate, "Validation status is not set yet. Enqueueing validation task"); dispatcher_clone.enqueue_task(method_to_async_task_closure!( validate_candidate, cloned_candidate, session_seqno, current_validator_pubkey )).await.expect("Failed to validate candidate"); + trace!(target: tracing_targets::VALIDATOR, block = %cloned_candidate, "Enqueued validation task"); }, Ok(e) => panic!("Unexpected response from get_validation_status: {:?}", e), Err(e) => panic!("Failed to get validation status: {:?}", e), }, Err(e) => panic!("Failed to enqueue validation task: {:?}", e), } - - if attempts >= max_retries { - warn!(target: tracing_targets::VALIDATOR, "Max retries reached without successful validation for block {:?}.", cloned_candidate); - break; - } } } + iteration += 1; } }); + Ok(ValidatorTaskResult::Void) } @@ -332,6 +370,7 @@ where block_id_short: BlockIdShort, signatures: Vec<([u8; 32], [u8; 64])>, ) -> Result { + trace!(target: tracing_targets::VALIDATOR, block = %block_id_short, "Processing candidate signature response"); // Simplified session retrieval let session = self .validation_state @@ -378,11 +417,17 @@ where .await?; } ValidationResult::Invalid => { + trace!(target: tracing_targets::VALIDATOR, block = %block_id_short, "Block is invalid"); self.on_block_validated_event(block, OnValidatedBlockEvent::Invalid) .await?; } - ValidationResult::Insufficient => { - debug!("Insufficient signatures for block {:?}", block_id_short); + ValidationResult::Insufficient(total_valid_weight, valid_weight) => { + trace!( + "Insufficient signatures for block {:?}. Total valid weight: {}. Required weight: {}", + block_id_short, + total_valid_weight, + valid_weight + ); } } } else { @@ -423,6 +468,7 @@ where session_seqno: u32, current_validator_pubkey: everscale_crypto::ed25519::PublicKey, ) -> Result { + trace!(target: tracing_targets::VALIDATOR, block = %candidate_id, "Validating candidate"); let block_id_short = candidate_id.as_short_id(); let validation_state = &self.validation_state; @@ -430,20 +476,23 @@ where .get_session(session_seqno) .ok_or(anyhow!("Session not found"))?; + trace!(target: tracing_targets::VALIDATOR, block = %candidate_id, "Getting validators"); let dispatcher = self.get_dispatcher(); - - let receiver = self.state_node_adapter.request_block(candidate_id).await?; + let state_node_adapter = self.state_node_adapter.clone(); let validators = session.validators_without_signatures(&block_id_short); let private_overlay = session.get_overlay().clone(); - let current_signatures = session.get_valid_signatures(&candidate_id.as_short_id()); - let network = self.network.clone(); tokio::spawn(async move { - if let Ok(Some(_)) = receiver.try_recv().await { + let block_from_state = state_node_adapter + .load_block_handle(&candidate_id) + .await + .expect("Failed to load block from state"); + + if block_from_state.is_some() { let result = dispatcher .clone() .enqueue_task(method_to_async_task_closure!( @@ -453,8 +502,7 @@ where .await; if let Err(e) = result { - error!(err = %e, "Failed to validate block by state"); - panic!("Failed to validate block by state {e}"); + panic!("Failed to validate block by state {e:?}"); } } else { let payload = SignaturesQuery::create( @@ -464,43 +512,57 @@ where ); for validator in validators { - if validator.public_key != current_validator_pubkey { - trace!(target: tracing_targets::VALIDATOR, validator_pubkey=?validator.public_key.as_bytes(), "trying to send request for getting signatures from validator"); - let response = private_overlay - .query( - network.dht_client.network(), - &PeerId(validator.public_key.to_bytes()), - Request::from_tl(payload.clone()), + let cloned_private_overlay = private_overlay.clone(); + let cloned_network = network.dht_client.network().clone(); + let cloned_payload = Request::from_tl(payload.clone()); + let cloned_dispatcher = dispatcher.clone(); + tokio::spawn(async move { + if validator.public_key != current_validator_pubkey { + trace!(target: tracing_targets::VALIDATOR, validator_pubkey=?validator.public_key.as_bytes(), "trying to send request for getting signatures from validator"); + + let response = tokio::time::timeout( + NETWORK_TIMEOUT, + cloned_private_overlay.query( + &cloned_network, + &PeerId(validator.public_key.to_bytes()), + cloned_payload, + ), ) .await; - match response { - Ok(response) => { - let response = response.parse_tl::(); - match response { - Ok(signatures) => { - let enqueue_task_result = dispatcher - .enqueue_task(method_to_async_task_closure!( - process_candidate_signature_response, - signatures.session_seqno, - signatures.block_id_short, - signatures.signatures - )) - .await; - - if let Err(e) = enqueue_task_result { - error!(err = %e, "Failed to enqueue task for processing signatures response"); + + match response { + Ok(Ok(response)) => { + let response = response.parse_tl::(); + trace!(target: tracing_targets::VALIDATOR, "Received response from overlay"); + match response { + Ok(signatures) => { + let enqueue_task_result = cloned_dispatcher + .enqueue_task(method_to_async_task_closure!( + process_candidate_signature_response, + signatures.session_seqno, + signatures.block_id_short, + signatures.signatures + )) + .await; + trace!(target: tracing_targets::VALIDATOR, "Enqueued task for processing signatures response"); + if let Err(e) = enqueue_task_result { + panic!("Failed to enqueue task for processing signatures response: {e}"); + } + } + Err(e) => { + panic!("Failed convert signatures response to SignaturesQuery: {e}"); } - } - Err(e) => { - error!(err = %e, "Failed convert signatures response to SignaturesQuery"); } } - } - Err(e) => { - error!(err = %e, "Failed to get response from overlay"); + Ok(Err(e)) => { + warn!("Failed to get response from overlay: {e}"); + } + Err(e) => { + warn!("Network request timed out: {e}"); + } } } - } + }); } } }); diff --git a/collator/tests/adapter_tests.rs b/collator/tests/adapter_tests.rs new file mode 100644 index 000000000..558cab83c --- /dev/null +++ b/collator/tests/adapter_tests.rs @@ -0,0 +1,209 @@ +use anyhow::Result; +use async_trait::async_trait; +use everscale_types::models::{BlockId, ShardIdent}; +use std::sync::atomic::{AtomicUsize, Ordering}; +use std::sync::Arc; +use tycho_block_util::block::{BlockStuff, BlockStuffAug}; +use tycho_block_util::state::{MinRefMcStateTracker, ShardStateStuff}; +use tycho_collator::state_node::{ + StateNodeAdapter, StateNodeAdapterStdImpl, StateNodeEventListener, +}; +use tycho_collator::test_utils::prepare_test_storage; +use tycho_collator::types::BlockStuffForSync; +use tycho_core::block_strider::{ + BlockProvider, BlockStrider, PersistentBlockStriderState, PrintSubscriber, +}; +use tycho_storage::Storage; + +struct MockEventListener { + accepted_count: Arc, +} + +#[async_trait] +impl StateNodeEventListener for MockEventListener { + async fn on_block_accepted(&self, _block_id: &BlockId) -> Result<()> { + self.accepted_count.fetch_add(1, Ordering::SeqCst); + Ok(()) + } + async fn on_block_accepted_external(&self, _state: &ShardStateStuff) -> Result<()> { + Ok(()) + } +} + +#[tokio::test] +async fn test_add_and_get_block() { + let (mock_storage, _tmp_dir) = Storage::new_temp().unwrap(); + let counter = Arc::new(AtomicUsize::new(0)); + let listener = Arc::new(MockEventListener { + accepted_count: counter.clone(), + }); + let adapter = StateNodeAdapterStdImpl::create(listener, mock_storage); + + // Test adding a block + + let empty_block = BlockStuff::new_empty(ShardIdent::BASECHAIN, 1); + let block_id = *empty_block.id(); + let block_stuff_aug = BlockStuffAug::loaded(empty_block); + + let block = BlockStuffForSync { + block_id, + block_stuff_aug, + signatures: Default::default(), + prev_blocks_ids: Vec::new(), + top_shard_blocks_ids: Vec::new(), + }; + adapter.accept_block(block).await.unwrap(); + + // Test getting the next block (which should be the one just added) + let next_block = adapter.get_block(&block_id).await; + assert!( + next_block.is_some(), + "Block should be retrieved after being added" + ); +} + +#[tokio::test] +async fn test_storage_accessors() { + let (provider, storage) = prepare_test_storage().await.unwrap(); + + let zerostate_id = BlockId::default(); + + let block_strider = BlockStrider::builder() + .with_provider(provider) + .with_state(PersistentBlockStriderState::new( + zerostate_id, + storage.clone(), + )) + .with_state_subscriber( + MinRefMcStateTracker::default(), + storage.clone(), + PrintSubscriber, + ) + .build(); + + block_strider.run().await.unwrap(); + + let counter = Arc::new(AtomicUsize::new(0)); + let listener = Arc::new(MockEventListener { + accepted_count: counter.clone(), + }); + let adapter = StateNodeAdapterStdImpl::create(listener, storage.clone()); + + let last_mc_block_id = adapter.load_last_applied_mc_block_id().await.unwrap(); + + storage + .shard_state_storage() + .load_state(&last_mc_block_id) + .await + .unwrap(); +} + +#[tokio::test] +async fn test_add_and_get_next_block() { + let (mock_storage, _tmp_dir) = Storage::new_temp().unwrap(); + let counter = Arc::new(AtomicUsize::new(0)); + let listener = Arc::new(MockEventListener { + accepted_count: counter.clone(), + }); + let adapter = StateNodeAdapterStdImpl::create(listener, mock_storage); + + // Test adding a block + let prev_block = BlockStuff::new_empty(ShardIdent::MASTERCHAIN, 1); + let prev_block_id = prev_block.id(); + + let empty_block = BlockStuff::new_empty(ShardIdent::MASTERCHAIN, 2); + let block_stuff_aug = BlockStuffAug::loaded(empty_block); + + let block = BlockStuffForSync { + block_id: *block_stuff_aug.data.id(), + block_stuff_aug, + signatures: Default::default(), + prev_blocks_ids: vec![*prev_block_id], + top_shard_blocks_ids: Vec::new(), + }; + adapter.accept_block(block).await.unwrap(); + + let next_block = adapter.get_next_block(prev_block_id).await; + assert!( + next_block.is_some(), + "Block should be retrieved after being added" + ); +} + +#[tokio::test] +async fn test_add_read_handle_100000_blocks_parallel() { + let (mock_storage, _tmp_dir) = Storage::new_temp().unwrap(); + let counter = Arc::new(AtomicUsize::new(0)); + let listener = Arc::new(MockEventListener { + accepted_count: counter.clone(), + }); + let adapter = Arc::new(StateNodeAdapterStdImpl::create( + listener.clone(), + mock_storage.clone(), + )); + + // Task 1: Adding 100000 blocks + let add_blocks = { + let adapter = adapter.clone(); + tokio::spawn(async move { + for i in 1..=100000 { + let empty_block = BlockStuff::new_empty(ShardIdent::BASECHAIN, i); + let block_stuff_aug = BlockStuffAug::loaded(empty_block.clone()); + + let block = BlockStuffForSync { + block_id: *empty_block.id(), + block_stuff_aug, + signatures: Default::default(), + prev_blocks_ids: Vec::new(), + top_shard_blocks_ids: Vec::new(), + }; + let accept_result = adapter.accept_block(block).await; + assert!(accept_result.is_ok(), "Block {} should be accepted", i); + } + }) + }; + + // Task 2: Retrieving and handling 100000 blocks + let handle_blocks = { + let adapter = adapter.clone(); + tokio::spawn(async move { + for i in 1..=100000 { + let block_id = BlockId { + shard: ShardIdent::new_full(0), + seqno: i, + root_hash: Default::default(), + file_hash: Default::default(), + }; + let next_block = adapter.get_block(&block_id).await; + assert!( + next_block.is_some(), + "Block {} should be retrieved after being added", + i + ); + + // TODO + + // let block_stuff = BlockStuffAug::loaded(BlockStuff::with_block( + // block_id.clone(), + // empty_block.clone(), + // )); + + // let handle_block = adapter.handle_block(&block_stuff, None).await; + // assert!( + // handle_block.is_ok(), + // "Block {} should be handled after being added", + // i + // ); + } + }) + }; + + // Await both tasks to complete + let _ = tokio::join!(handle_blocks, add_blocks); + + assert_eq!( + counter.load(Ordering::SeqCst), + 100000, + "100000 blocks should be accepted" + ); +} diff --git a/collator/tests/collation_tests.rs b/collator/tests/collation_tests.rs index 06036da63..5cd91e92a 100644 --- a/collator/tests/collation_tests.rs +++ b/collator/tests/collation_tests.rs @@ -1,21 +1,61 @@ +use everscale_types::models::{BlockId, GlobalCapability}; + +use tycho_block_util::state::MinRefMcStateTracker; +use tycho_collator::test_utils::prepare_test_storage; +use tycho_collator::validator_test_impl::ValidatorProcessorTestImpl; use tycho_collator::{ + manager::CollationManager, mempool::{MempoolAdapterBuilder, MempoolAdapterBuilderStdImpl, MempoolAdapterStdImpl}, state_node::{StateNodeAdapterBuilder, StateNodeAdapterBuilderStdImpl}, test_utils::try_init_test_tracing, types::CollationConfig, - validator_test_impl::ValidatorProcessorTestImpl, }; +use tycho_core::block_strider::{BlockStrider, PersistentBlockStriderState, PrintSubscriber}; +/// run: `RUST_BACKTRACE=1 cargo test -p tycho-collator --features test --test collation_tests -- --nocapture` #[tokio::test] async fn test_collation_process_on_stubs() { try_init_test_tracing(tracing_subscriber::filter::LevelFilter::TRACE); + let (provider, storage) = prepare_test_storage().await.unwrap(); + + let zerostate_id = BlockId::default(); + + let block_strider = BlockStrider::builder() + .with_provider(provider) + .with_state(PersistentBlockStriderState::new( + zerostate_id, + storage.clone(), + )) + .with_state_subscriber( + MinRefMcStateTracker::default(), + storage.clone(), + PrintSubscriber, + ) + .build(); + + block_strider.run().await.unwrap(); + + let mpool_adapter_builder = MempoolAdapterBuilderStdImpl::::new(); + let state_node_adapter_builder = StateNodeAdapterBuilderStdImpl::new(storage.clone()); + + let mut rnd = rand::thread_rng(); + let node_1_keypair = everscale_crypto::ed25519::KeyPair::generate(&mut rnd); + let config = CollationConfig { - key_pair: everscale_crypto::ed25519::KeyPair::generate(&mut rand::thread_rng()), + key_pair: node_1_keypair, mc_block_min_interval_ms: 10000, + max_mc_block_delta_from_bc_to_await_own: 2, + supported_block_version: 50, + supported_capabilities: supported_capabilities(), + max_collate_threads: 1, + + #[cfg(feature = "test")] + test_validators_keypairs: vec![ + node_1_keypair, + everscale_crypto::ed25519::KeyPair::generate(&mut rnd), + ], }; - let mpool_adapter_builder = MempoolAdapterBuilderStdImpl::::new(); - let state_node_adapter_builder = StateNodeAdapterBuilderStdImpl::new(); tracing::info!("Trying to start CollationManager"); @@ -32,7 +72,28 @@ async fn test_collation_process_on_stubs() { node_network, ); + let state_node_adapter = _manager.get_state_node_adapter(); + + let block_strider = BlockStrider::builder() + .with_provider(state_node_adapter.clone()) + .with_state(PersistentBlockStriderState::new( + zerostate_id, + storage.clone(), + )) + .with_state_subscriber( + MinRefMcStateTracker::default(), + storage.clone(), + state_node_adapter, + ) + .build(); + + let strider_handle = block_strider.run(); + tokio::select! { + _ = strider_handle => { + println!(); + println!("block_strider finished"); + }, _ = tokio::signal::ctrl_c() => { println!(); println!("Ctrl-C received, shutting down the test"); @@ -43,3 +104,28 @@ async fn test_collation_process_on_stubs() { } } } + +fn supported_capabilities() -> u64 { + let caps = GlobalCapability::CapCreateStatsEnabled as u64 + | GlobalCapability::CapBounceMsgBody as u64 + | GlobalCapability::CapReportVersion as u64 + | GlobalCapability::CapShortDequeue as u64 + | GlobalCapability::CapRemp as u64 + | GlobalCapability::CapInitCodeHash as u64 + | GlobalCapability::CapOffHypercube as u64 + | GlobalCapability::CapFixTupleIndexBug as u64 + | GlobalCapability::CapFastStorageStat as u64 + | GlobalCapability::CapMyCode as u64 + | GlobalCapability::CapCopyleft as u64 + | GlobalCapability::CapFullBodyInBounced as u64 + | GlobalCapability::CapStorageFeeToTvm as u64 + | GlobalCapability::CapWorkchains as u64 + | GlobalCapability::CapStcontNewFormat as u64 + | GlobalCapability::CapFastStorageStatBugfix as u64 + | GlobalCapability::CapResolveMerkleCell as u64 + | GlobalCapability::CapFeeInGasUnits as u64 + | GlobalCapability::CapBounceAfterFailedAction as u64 + | GlobalCapability::CapSuspendedList as u64 + | GlobalCapability::CapsTvmBugfixes2022 as u64; + caps +} diff --git a/collator/tests/validator_tests.rs b/collator/tests/validator_tests.rs new file mode 100644 index 000000000..568d9a2f7 --- /dev/null +++ b/collator/tests/validator_tests.rs @@ -0,0 +1,400 @@ +use std::net::Ipv4Addr; +use std::sync::Arc; + +use async_trait::async_trait; +use std::time::Duration; + +use anyhow::Result; +use everscale_crypto::ed25519; +use everscale_crypto::ed25519::KeyPair; +use everscale_types::models::{BlockId, ValidatorDescription}; +use rand::prelude::ThreadRng; +use tokio::sync::{Mutex, Notify}; + +use tycho_block_util::block::ValidatorSubsetInfo; +use tycho_block_util::state::{MinRefMcStateTracker, ShardStateStuff}; +use tycho_collator::state_node::{ + StateNodeAdapterBuilder, StateNodeAdapterBuilderStdImpl, StateNodeEventListener, +}; +use tycho_collator::test_utils::{prepare_test_storage, try_init_test_tracing}; +use tycho_collator::types::{CollationSessionInfo, OnValidatedBlockEvent, ValidatorNetwork}; +use tycho_collator::validator::state::{ValidationState, ValidationStateStdImpl}; +use tycho_collator::validator::types::ValidationSessionInfo; +use tycho_collator::validator::validator::{Validator, ValidatorEventListener, ValidatorStdImpl}; +use tycho_collator::validator::validator_processor::ValidatorProcessorStdImpl; +use tycho_core::block_strider::{BlockStrider, PersistentBlockStriderState, PrintSubscriber}; +use tycho_network::{ + DhtClient, DhtConfig, DhtService, Network, OverlayService, PeerId, PeerResolver, Router, +}; +use tycho_storage::Storage; + +pub struct TestValidatorEventListener { + validated_blocks: Mutex>, + notify: Arc, + expected_notifications: Mutex, + received_notifications: Mutex, +} + +impl TestValidatorEventListener { + pub fn new(expected_count: u32) -> Arc { + Arc::new(Self { + validated_blocks: Mutex::new(vec![]), + notify: Arc::new(Notify::new()), + expected_notifications: Mutex::new(expected_count), + received_notifications: Mutex::new(0), + }) + } + + pub async fn increment_and_check(&self) { + let mut received = self.received_notifications.lock().await; + *received += 1; + if *received == *self.expected_notifications.lock().await { + println!( + "received: {}, expected: {}", + *received, + *self.expected_notifications.lock().await + ); + self.notify.notify_one(); + } + } +} + +#[async_trait] +impl ValidatorEventListener for TestValidatorEventListener { + async fn on_block_validated( + &self, + block_id: BlockId, + _event: OnValidatedBlockEvent, + ) -> anyhow::Result<()> { + let mut validated_blocks = self.validated_blocks.lock().await; + validated_blocks.push(block_id); + self.increment_and_check().await; + Ok(()) + } +} + +#[async_trait] +impl StateNodeEventListener for TestValidatorEventListener { + async fn on_block_accepted(&self, _block_id: &BlockId) -> Result<()> { + unimplemented!("Not implemented"); + } + + async fn on_block_accepted_external(&self, _state: &ShardStateStuff) -> Result<()> { + unimplemented!("Not implemented"); + } +} + +struct Node { + network: Network, + keypair: KeyPair, + overlay_service: OverlayService, + dht_client: DhtClient, + peer_resolver: PeerResolver, +} + +impl Node { + fn new(key: &ed25519::SecretKey) -> Self { + let keypair = ed25519::KeyPair::from(key); + let local_id = PeerId::from(keypair.public_key); + + let (dht_tasks, dht_service) = DhtService::builder(local_id) + .with_config(DhtConfig { + local_info_announce_period: Duration::from_secs(1), + local_info_announce_period_max_jitter: Duration::from_secs(1), + routing_table_refresh_period: Duration::from_secs(1), + routing_table_refresh_period_max_jitter: Duration::from_secs(1), + ..Default::default() + }) + .build(); + + let (overlay_tasks, overlay_service) = OverlayService::builder(local_id) + .with_dht_service(dht_service.clone()) + .build(); + + let router = Router::builder() + .route(overlay_service.clone()) + .route(dht_service.clone()) + .build(); + + let network = Network::builder() + .with_private_key(key.to_bytes()) + .with_service_name("test-service") + .build((Ipv4Addr::LOCALHOST, 0), router) + .unwrap(); + + let dht_client = dht_service.make_client(&network); + let peer_resolver = dht_service.make_peer_resolver().build(&network); + + overlay_tasks.spawn(&network); + dht_tasks.spawn(&network); + + Self { + network, + keypair, + overlay_service, + dht_client, + peer_resolver, + } + } +} + +fn make_network(node_count: usize) -> Vec { + let keys = (0..node_count) + .map(|_| ed25519::SecretKey::generate(&mut rand::thread_rng())) + .collect::>(); + let nodes = keys.iter().map(Node::new).collect::>(); + let common_peer_info = nodes.first().unwrap().network.sign_peer_info(0, u32::MAX); + for node in &nodes { + node.dht_client + .add_peer(Arc::new(common_peer_info.clone())) + .unwrap(); + } + nodes +} + +#[tokio::test] +async fn test_validator_accept_block_by_state() -> anyhow::Result<()> { + let test_listener = TestValidatorEventListener::new(1); + let _state_node_event_listener: Arc = test_listener.clone(); + + let (provider, storage) = prepare_test_storage().await.unwrap(); + + let zerostate_id = BlockId::default(); + + let block_strider = BlockStrider::builder() + .with_provider(provider) + .with_state(PersistentBlockStriderState::new( + zerostate_id, + storage.clone(), + )) + .with_state_subscriber( + MinRefMcStateTracker::default(), + storage.clone(), + PrintSubscriber, + ) + .build(); + + block_strider.run().await.unwrap(); + + let state_node_adapter = + Arc::new(StateNodeAdapterBuilderStdImpl::new(storage.clone()).build(test_listener.clone())); + let _validation_state = ValidationStateStdImpl::new(); + + let random_secret_key = ed25519::SecretKey::generate(&mut rand::thread_rng()); + let keypair = ed25519::KeyPair::from(&random_secret_key); + let local_id = PeerId::from(keypair.public_key); + let (_, _overlay_service) = OverlayService::builder(local_id).build(); + + let (_overlay_tasks, overlay_service) = OverlayService::builder(local_id).build(); + + let router = Router::builder().route(overlay_service.clone()).build(); + let network = Network::builder() + .with_private_key(random_secret_key.to_bytes()) + .with_service_name("test-service") + .build((Ipv4Addr::LOCALHOST, 0), router) + .unwrap(); + + let (_, dht_service) = DhtService::builder(local_id) + .with_config(DhtConfig { + local_info_announce_period: Duration::from_secs(1), + local_info_announce_period_max_jitter: Duration::from_secs(1), + routing_table_refresh_period: Duration::from_secs(1), + routing_table_refresh_period_max_jitter: Duration::from_secs(1), + ..Default::default() + }) + .build(); + + let dht_client = dht_service.make_client(&network); + let peer_resolver = dht_service.make_peer_resolver().build(&network); + + let validator_network = ValidatorNetwork { + overlay_service, + peer_resolver, + dht_client, + }; + + let validator = ValidatorStdImpl::, _>::create( + test_listener.clone(), + state_node_adapter, + validator_network, + ); + + let v_keypair = KeyPair::generate(&mut ThreadRng::default()); + + let validator_description = ValidatorDescription { + public_key: v_keypair.public_key.to_bytes().into(), + weight: 1, + adnl_addr: None, + mc_seqno_since: 0, + prev_total_weight: 0, + }; + + let validator_description2 = ValidatorDescription { + public_key: KeyPair::generate(&mut ThreadRng::default()) + .public_key + .to_bytes() + .into(), + weight: 3, + adnl_addr: None, + mc_seqno_since: 0, + prev_total_weight: 0, + }; + + let block_id = storage.node_state().load_last_mc_block_id().unwrap(); + + let block_handle = storage.block_handle_storage().load_handle(&block_id); + assert!(block_handle.is_some(), "Block handle not found in storage."); + + let validators = ValidatorSubsetInfo { + validators: vec![validator_description, validator_description2], + short_hash: 0, + }; + let keypair = KeyPair::generate(&mut ThreadRng::default()); + let collator_session_info = Arc::new(CollationSessionInfo::new(0, validators, Some(keypair))); + + let validation_session = + Arc::new(ValidationSessionInfo::try_from(collator_session_info.clone()).unwrap()); + + validator + .enqueue_add_session(validation_session) + .await + .unwrap(); + + validator + .enqueue_candidate_validation(block_id, collator_session_info.seqno(), v_keypair) + .await + .unwrap(); + + test_listener.notify.notified().await; + let validated_blocks = test_listener.validated_blocks.lock().await; + assert_eq!( + validated_blocks.len() as u32, + 1, + "Expected each validator to validate the block once." + ); + Ok(()) +} + +#[tokio::test(flavor = "multi_thread", worker_threads = 4)] +async fn test_validator_accept_block_by_network() -> Result<()> { + try_init_test_tracing(tracing_subscriber::filter::LevelFilter::DEBUG); + tycho_util::test::init_logger("test_validator_accept_block_by_network"); + + let mut tmp_dirs = Vec::new(); + + let network_nodes = make_network(13); + let blocks_amount = 1000; + let sessions = 1; + + let mut validators = vec![]; + let mut listeners = vec![]; // Track listeners for later validation + + for node in network_nodes { + // Create a unique listener for each validator + let test_listener = TestValidatorEventListener::new(blocks_amount * sessions); + listeners.push(test_listener.clone()); + + let (storage, tmp_dir) = Storage::new_temp()?; + tmp_dirs.push(tmp_dir); + + let state_node_adapter = + Arc::new(StateNodeAdapterBuilderStdImpl::new(storage).build(test_listener.clone())); + + let network = ValidatorNetwork { + overlay_service: node.overlay_service.clone(), + dht_client: node.dht_client.clone(), + peer_resolver: node.peer_resolver.clone(), + }; + let validator = ValidatorStdImpl::, _>::create( + test_listener.clone(), + state_node_adapter, + network, + ); + validators.push((validator, node)); + } + + let mut validators_descriptions = vec![]; + for (_, node) in &validators { + let peer_id = node.network.peer_id(); + validators_descriptions.push(ValidatorDescription { + public_key: (*peer_id.as_bytes()).into(), + weight: 1, + adnl_addr: None, + mc_seqno_since: 0, + prev_total_weight: 0, + }); + } + + let validators_subset_info = ValidatorSubsetInfo { + validators: validators_descriptions, + short_hash: 0, + }; + + for session in 1..=sessions { + let blocks = create_blocks(blocks_amount); + + for (validator, _node) in &validators { + let collator_session_info = Arc::new(CollationSessionInfo::new( + session, + validators_subset_info.clone(), + Some(_node.keypair), // Ensure you use the node's keypair correctly here + )); + // Assuming this setup is correct and necessary for each validator + + let validation_session = + Arc::new(ValidationSessionInfo::try_from(collator_session_info.clone()).unwrap()); + validator + .enqueue_add_session(validation_session) + .await + .unwrap(); + } + + let mut i = 0; + for block in blocks.iter() { + i += 1; + for (validator, _node) in &validators { + let collator_session_info = Arc::new(CollationSessionInfo::new( + session, + validators_subset_info.clone(), + Some(_node.keypair), // Ensure you use the node's keypair correctly here + )); + + if i % 10 == 0 { + tokio::time::sleep(Duration::from_millis(10)).await; + } + validator + .enqueue_candidate_validation( + *block, + collator_session_info.seqno(), + *collator_session_info.current_collator_keypair().unwrap(), + ) + .await + .unwrap(); + } + } + } + + for listener in listeners { + listener.notify.notified().await; + let validated_blocks = listener.validated_blocks.lock().await; + assert_eq!( + validated_blocks.len() as u32, + sessions * blocks_amount, + "Expected each validator to validate the block once." + ); + } + Ok(()) +} + +fn create_blocks(amount: u32) -> Vec { + let mut blocks = vec![]; + for i in 0..amount { + blocks.push(BlockId { + shard: Default::default(), + seqno: i, + root_hash: Default::default(), + file_hash: Default::default(), + }); + } + blocks +} diff --git a/core/Cargo.toml b/core/Cargo.toml index c56ae6b77..3dcbbdd4e 100644 --- a/core/Cargo.toml +++ b/core/Cargo.toml @@ -36,11 +36,12 @@ tycho-util = { workspace = true } bytesize = { workspace = true } everscale-crypto = { workspace = true } tycho-util = { workspace = true, features = ["test"] } +tycho-storage = { workspace = true, features = ["test"] } tempfile = { workspace = true } tracing-test = { workspace = true } -[lints] -workspace = true - [features] test = [] + +[lints] +workspace = true diff --git a/core/src/block_strider/mod.rs b/core/src/block_strider/mod.rs index 208e0b333..618b014c8 100644 --- a/core/src/block_strider/mod.rs +++ b/core/src/block_strider/mod.rs @@ -8,7 +8,9 @@ use tycho_block_util::state::MinRefMcStateTracker; use tycho_storage::Storage; use tycho_util::FastHashMap; -pub use self::provider::{BlockProvider, BlockchainBlockProvider, BlockchainBlockProviderConfig}; +pub use self::provider::{ + BlockProvider, BlockchainBlockProvider, BlockchainBlockProviderConfig, OptionalBlockStuff, +}; pub use self::state::{BlockStriderState, PersistentBlockStriderState, TempBlockStriderState}; pub use self::state_applier::ShardStateApplier; pub use self::subscriber::{ @@ -18,6 +20,8 @@ pub use self::subscriber::{ #[cfg(any(test, feature = "test"))] pub use self::provider::ArchiveBlockProvider; +#[cfg(any(test, feature = "test"))] +pub use self::subscriber::test::PrintSubscriber; mod provider; mod state; diff --git a/core/src/block_strider/subscriber.rs b/core/src/block_strider/subscriber.rs index 887545948..93a3489da 100644 --- a/core/src/block_strider/subscriber.rs +++ b/core/src/block_strider/subscriber.rs @@ -1,4 +1,5 @@ use std::future::Future; +use std::sync::Arc; use anyhow::Result; use everscale_types::models::*; @@ -29,6 +30,14 @@ impl BlockSubscriber for Box { } } +impl BlockSubscriber for Arc { + type HandleBlockFut<'a> = T::HandleBlockFut<'a>; + + fn handle_block<'a>(&'a self, cx: &'a BlockSubscriberContext) -> Self::HandleBlockFut<'a> { + ::handle_block(self, cx) + } +} + pub trait BlockSubscriberExt: Sized { fn chain(self, other: T) -> ChainSubscriber; } @@ -65,6 +74,14 @@ impl StateSubscriber for Box { } } +impl StateSubscriber for Arc { + type HandleStateFut<'a> = T::HandleStateFut<'a>; + + fn handle_state<'a>(&'a self, cx: &'a StateSubscriberContext) -> Self::HandleStateFut<'a> { + ::handle_state(self, cx) + } +} + pub trait StateSubscriberExt: Sized { fn chain(self, other: T) -> ChainSubscriber; } diff --git a/network/Cargo.toml b/network/Cargo.toml index fe309f7fd..b7d0f4791 100644 --- a/network/Cargo.toml +++ b/network/Cargo.toml @@ -55,7 +55,7 @@ serde_json = "1.0" tokio = { version = "1", features = ["rt-multi-thread"] } tracing-appender = "0.2.3" tracing-subscriber = { version = "0.3", features = ["env-filter"] } -tracing-test = "0.2" +tracing-test = { workspace = true } tycho-util = { workspace = true, features = ["test"] } diff --git a/storage/Cargo.toml b/storage/Cargo.toml index 635d0bcaf..16e02c017 100644 --- a/storage/Cargo.toml +++ b/storage/Cargo.toml @@ -30,6 +30,7 @@ serde = { workspace = true } sha2 = { workspace = true } smallvec = { workspace = true } sysinfo = { workspace = true } +tempfile = { workspace = true, optional = true } thiserror = { workspace = true } tokio = { workspace = true, features = ["rt"] } tracing = { workspace = true } @@ -42,6 +43,7 @@ tycho-util = { workspace = true } [dev-dependencies] base64 = { workspace = true } +bytesize = { workspace = true } serde_json = { workspace = true } tracing-appender = { workspace = true } tracing-subscriber = { workspace = true } @@ -49,5 +51,8 @@ tracing-test = { workspace = true } tempfile = { workspace = true } tokio = { version = "1", features = ["full"] } +[features] +test = ["dep:tempfile"] + [lints] workspace = true diff --git a/storage/src/lib.rs b/storage/src/lib.rs index e9303641d..974d0361a 100644 --- a/storage/src/lib.rs +++ b/storage/src/lib.rs @@ -1,6 +1,8 @@ use std::path::PathBuf; use std::sync::Arc; +use anyhow::Result; + pub use self::db::*; pub use self::models::*; pub use self::store::*; @@ -22,11 +24,7 @@ pub struct Storage { } impl Storage { - pub fn new( - db: Arc, - file_db_path: PathBuf, - max_cell_cache_size_bytes: u64, - ) -> anyhow::Result { + pub fn new(db: Arc, file_db_path: PathBuf, max_cell_cache_size_bytes: u64) -> Result { let files_dir = FileDb::new(file_db_path); let block_handle_storage = Arc::new(BlockHandleStorage::new(db.clone())); @@ -61,6 +59,34 @@ impl Storage { }) } + /// Creates a new temporary storage with potato config. + /// + /// NOTE: Temp dir must live longer than the storage, + /// otherwise compaction filter will not work. + #[cfg(any(test, feature = "test"))] + pub fn new_temp() -> Result<(Self, tempfile::TempDir)> { + use bytesize::ByteSize; + + let tmp_dir = tempfile::tempdir()?; + let root_path = tmp_dir.path(); + + // Init rocksdb + let db_options = DbOptions { + rocksdb_lru_capacity: ByteSize::kb(1024), + cells_cache_size: ByteSize::kb(1024), + }; + let db = Db::open(root_path.join("db_storage"), db_options)?; + + // Init storage + let storage = Storage::new( + db, + root_path.join("file_storage"), + db_options.cells_cache_size.as_u64(), + )?; + + Ok((storage, tmp_dir)) + } + pub fn runtime_storage(&self) -> &RuntimeStorage { &self.inner.runtime_storage }