diff --git a/Cargo.lock b/Cargo.lock index 60965708e..00c37aece 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2950,7 +2950,7 @@ dependencies = [ [[package]] name = "ton_executor" version = "2.0.0" -source = "git+https://github.com/broxus/ton-labs-executor.git?branch=new_cells#923a1061ef70448c487a4ffdd91aa859d3e29335" +source = "git+https://github.com/broxus/ton-labs-executor.git?branch=new_cells#973e3d0ecb4f07ac0ffcbdbda2c1a9825c9fe627" dependencies = [ "ahash", "anyhow", diff --git a/collator/src/collator/build_block.rs b/collator/src/collator/build_block.rs index cca8477ac..d824f7270 100644 --- a/collator/src/collator/build_block.rs +++ b/collator/src/collator/build_block.rs @@ -194,7 +194,7 @@ impl CollatorStdImpl { end_lt: collation_data.next_lt, gen_validator_list_hash_short: self.collation_session.collators().short_hash, gen_catchain_seqno: self.collation_session.seqno(), - min_ref_mc_seqno: collation_data.min_ref_mc_seqno()?, + min_ref_mc_seqno: collation_data.min_ref_mc_seqno, prev_key_block_seqno: mc_data.prev_key_block_seqno(), master_ref: master_ref.as_ref().map(Lazy::new).transpose()?, ..Default::default() @@ -236,7 +236,8 @@ impl CollatorStdImpl { processed_upto: Lazy::new(&collation_data.processed_upto)?, before_split: new_block_info.before_split, accounts: Lazy::new(&shard_accounts)?, - overload_history: 0, + overload_history: prev_shard_data.gas_used() + + collation_data.block_limit.gas_used as u64, underload_history: 0, total_balance: value_flow.to_next_block.clone(), total_validator_fees: prev_shard_data.total_validator_fees().clone(), diff --git a/collator/src/collator/do_collate.rs b/collator/src/collator/do_collate.rs index 7b90ac0d4..16193ef6b 100644 --- a/collator/src/collator/do_collate.rs +++ b/collator/src/collator/do_collate.rs @@ -13,7 +13,9 @@ use tycho_util::metrics::HistogramGuard; use tycho_util::time::now_millis; use tycho_util::FastHashMap; -use super::types::{CachedMempoolAnchor, SpecialOrigin}; +use super::types::{ + BlockCollationDataBuilder, BlockLimitsLevel, CachedMempoolAnchor, SpecialOrigin, +}; use super::CollatorStdImpl; use crate::collator::execution_manager::ExecutionManager; use crate::collator::types::{ @@ -59,41 +61,33 @@ impl CollatorStdImpl { let rand_seed = HashBytes::from_slice(hash_bytes.as_slice()); tracing::trace!(target: tracing_targets::COLLATOR, "rand_seed from chain time: {}", rand_seed); + let is_masterchain = self.shard_id.is_masterchain(); // prepare block collation data // STUB: consider split/merge in future for taking prev_block_id let prev_block_id = prev_shard_data.blocks_ids()[0]; - let mut collation_data = Box::new(BlockCollationData::default()); - collation_data.block_id_short = BlockIdShort { + let block_id_short = BlockIdShort { shard: prev_block_id.shard, seqno: prev_block_id.seqno + 1, }; - collation_data.rand_seed = rand_seed; - collation_data.update_ref_min_mc_seqno(mc_data.mc_state_stuff().state().seqno); - collation_data.gen_utime = (next_chain_time / 1000) as u32; - collation_data.gen_utime_ms = (next_chain_time % 1000) as u16; - collation_data.start_lt = Self::calc_start_lt(mc_data, prev_shard_data, &collation_data)?; - collation_data.next_lt = collation_data.start_lt + 1; - - collation_data.processed_upto = prev_shard_data.processed_upto().clone(); - tracing::debug!(target: tracing_targets::COLLATOR, "initial processed_upto.externals = {:?}", - collation_data.processed_upto.externals, + let block_limits = mc_data.config().get_block_limits(is_masterchain)?; + tracing::debug!(target: tracing_targets::COLLATOR, + "Block limits: {:?}", + block_limits ); - // show intenals proccessed upto - collation_data - .processed_upto - .internals - .iter() - .for_each(|result| { - let (shard_ident, processed_upto) = result.unwrap(); - tracing::debug!(target: tracing_targets::COLLATOR, - "initial processed_upto.internals for shard {:?}: {:?}", - shard_ident, processed_upto, - ); - }); + // TODO: get from anchor + let created_by = HashBytes::default(); + let mut collation_data_builder = BlockCollationDataBuilder::new( + block_id_short, + rand_seed, + mc_data.mc_state_stuff().state().seqno, + next_chain_time, + prev_shard_data.processed_upto().clone(), + created_by, + ); // init ShardHashes descriptions for master - if self.shard_id.is_masterchain() { + if is_masterchain { let shards = prev_shard_data.observable_states()[0] .shards()? .iter() @@ -104,17 +98,43 @@ impl CollatorStdImpl { }) .collect::>(); - collation_data.set_shards(shards); + collation_data_builder.set_shards(shards); if let Some(top_shard_blocks_info) = top_shard_blocks_info { self.import_new_shard_top_blocks_for_masterchain( mc_data.config(), - &mut collation_data, + &mut collation_data_builder, top_shard_blocks_info, )?; } } + let start_lt = Self::calc_start_lt( + mc_data, + prev_shard_data, + is_masterchain, + collation_data_builder.shards_max_end_lt, + )?; + + let mut collation_data = Box::new(collation_data_builder.build(start_lt, block_limits)); + + tracing::debug!(target: tracing_targets::COLLATOR, "initial processed_upto.externals = {:?}", + collation_data.processed_upto.externals, + ); + + // show intenals proccessed upto + collation_data + .processed_upto + .internals + .iter() + .for_each(|result| { + let (shard_ident, processed_upto) = result.unwrap(); + tracing::debug!(target: tracing_targets::COLLATOR, + "initial processed_upto.internals for shard {:?}: {:?}", + shard_ident, processed_upto, + ); + }); + // compute created / minted / recovered / from_prev_block self.update_value_flow(mc_data, prev_shard_data, &mut collation_data)?; @@ -167,7 +187,7 @@ impl CollatorStdImpl { // execute tick transaction and special transactions (mint, recover) let execute_tick_elapsed; - if self.shard_id.is_masterchain() { + if is_masterchain { let histogram = HistogramGuard::begin_with_labels("tycho_do_collate_execute_tick_time", labels); @@ -204,13 +224,21 @@ impl CollatorStdImpl { loop { let mut timer = Instant::now(); + let soft_level_reached = collation_data.block_limit.reached(BlockLimitsLevel::Soft); + if soft_level_reached { + tracing::debug!(target: tracing_targets::COLLATOR, + "STUB: soft block limit reached: {:?}", + collation_data.block_limit, + ); + } let mut executed_internal_messages = vec![]; let mut internal_messages_sources = FastHashMap::default(); // build messages set let mut msgs_set: Vec> = vec![]; // 1. First try to read min externals amount - let mut ext_msgs = if self.has_pending_externals { + + let mut ext_msgs = if !soft_level_reached && self.has_pending_externals { self.read_next_externals(min_externals_per_set, &mut collation_data)? } else { vec![] @@ -260,7 +288,7 @@ impl CollatorStdImpl { // If not enough existing internals to fill the set then try read more externals msgs_set.append(&mut ext_msgs); remaining_capacity = max_messages_per_set - msgs_set.len(); - if remaining_capacity > 0 && self.has_pending_externals { + if remaining_capacity > 0 && self.has_pending_externals && !soft_level_reached { ext_msgs = self.read_next_externals(remaining_capacity, &mut collation_data)?; tracing::debug!(target: tracing_targets::COLLATOR, ext_count = ext_msgs.len(), @@ -388,6 +416,7 @@ impl CollatorStdImpl { } collation_data.next_lt = exec_manager.min_next_lt(); + collation_data.block_limit.lt_current = collation_data.next_lt; } msgs_set_offset = tick.new_offset; @@ -409,6 +438,15 @@ impl CollatorStdImpl { if msgs_set_offset == msgs_set_len { msgs_set_full_processed = true; } + + if collation_data.block_limit.reached(BlockLimitsLevel::Hard) { + tracing::debug!(target: tracing_targets::COLLATOR, + "STUB: block limit reached: {:?}", + collation_data.block_limit, + ); + block_limits_reached = true; + break; + } } metrics::gauge!("tycho_do_collate_exec_ticks_per_msgs_set", labels) @@ -416,15 +454,6 @@ impl CollatorStdImpl { timer = std::time::Instant::now(); - // HACK: temporary always full process msgs set and check block limits after - if collation_data.tx_count >= self.config.block_txs_limit as u64 { - tracing::debug!(target: tracing_targets::COLLATOR, - "STUB: block limit reached: {}/{}", - collation_data.tx_count, self.config.block_txs_limit, - ); - block_limits_reached = true; - } - // commit messages to iterator only if set was fully processed if msgs_set_full_processed { self.mq_adapter.commit_messages_to_iterator( @@ -469,7 +498,7 @@ impl CollatorStdImpl { // execute tock transaction let execute_tock_elapsed; - if self.shard_id.is_masterchain() { + if is_masterchain { let histogram = HistogramGuard::begin_with_labels("tycho_do_collate_execute_tock_time", labels); self.create_ticktock_transactions( @@ -918,20 +947,18 @@ impl CollatorStdImpl { fn calc_start_lt( mc_data: &McData, prev_shard_data: &PrevData, - collation_data: &BlockCollationData, + is_masterchain: bool, + shards_max_end_lt: u64, ) -> Result { tracing::trace!(target: tracing_targets::COLLATOR, "calc_start_lt()"); - let mut start_lt = if !collation_data.block_id_short.shard.is_masterchain() { + let mut start_lt = if !is_masterchain { std::cmp::max( mc_data.mc_state_stuff().state().gen_lt, prev_shard_data.gen_lt(), ) } else { - std::cmp::max( - mc_data.mc_state_stuff().state().gen_lt, - collation_data.shards_max_end_lt(), - ) + std::cmp::max(mc_data.mc_state_stuff().state().gen_lt, shards_max_end_lt) }; let align = mc_data.get_lt_align(); @@ -1227,14 +1254,14 @@ impl CollatorStdImpl { fn import_new_shard_top_blocks_for_masterchain( &self, config: &BlockchainConfig, - collation_data: &mut BlockCollationData, + collation_data_builder: &mut BlockCollationDataBuilder, top_shard_blocks_info: Vec, ) -> Result<()> { tracing::trace!(target: tracing_targets::COLLATOR, "import_new_shard_top_blocks_for_masterchain()", ); - let gen_utime = collation_data.gen_utime; + let gen_utime = collation_data_builder.gen_utime; for TopBlockDescription { block_id, block_info, @@ -1248,13 +1275,13 @@ impl CollatorStdImpl { &block_info, &value_flow, )); - shard_descr.reg_mc_seqno = collation_data.block_id_short.seqno; + shard_descr.reg_mc_seqno = collation_data_builder.block_id_short.seqno; - collation_data.update_shards_max_end_lt(shard_descr.end_lt); + collation_data_builder.update_shards_max_end_lt(shard_descr.end_lt); let shard_id = block_id.shard; - collation_data.top_shard_blocks_ids.push(block_id); + collation_data_builder.top_shard_blocks_ids.push(block_id); if shard_descr.gen_utime > gen_utime { tracing::debug!(target: tracing_targets::COLLATOR, @@ -1275,19 +1302,23 @@ impl CollatorStdImpl { // TODO: Check may update shard block info // TODO: Implement merge algorithm in future - self.update_shard_block_info(collation_data.shards_mut()?, shard_id, shard_descr)?; + self.update_shard_block_info( + collation_data_builder.shards_mut()?, + shard_id, + shard_descr, + )?; - collation_data.store_shard_fees(shard_id, proof_funds)?; - collation_data.register_shard_block_creators(creators)?; + collation_data_builder.store_shard_fees(shard_id, proof_funds)?; + collation_data_builder.register_shard_block_creators(creators)?; } - let shard_fees = collation_data.shard_fees.root_extra().clone(); + let shard_fees = collation_data_builder.shard_fees.root_extra().clone(); - collation_data + collation_data_builder .value_flow .fees_collected .checked_add(&shard_fees.fees)?; - collation_data.value_flow.fees_imported = shard_fees.fees; + collation_data_builder.value_flow.fees_imported = shard_fees.fees; Ok(()) } @@ -1338,6 +1369,7 @@ fn new_transaction( ); collation_data.execute_count_all += 1; + collation_data.block_limit.gas_used += executor_output.gas_used as u32; let import_fees; let in_msg_hash = *in_msg.cell.repr_hash(); @@ -1481,7 +1513,6 @@ fn new_transaction( info = ?out_msg_info, "adding out message to out_msgs", ); - match &out_msg_info { MsgInfo::Int(IntMsgInfo { fwd_fee, dst, .. }) => { collation_data.int_enqueue_count += 1; diff --git a/collator/src/collator/mod.rs b/collator/src/collator/mod.rs index 04c41654b..7e0e187d9 100644 --- a/collator/src/collator/mod.rs +++ b/collator/src/collator/mod.rs @@ -278,6 +278,12 @@ impl CollatorStdImpl { .expect("should `init` collator before calling `working_state`") } + fn working_state_mut(&mut self) -> &mut WorkingState { + self.working_state + .as_mut() + .expect("should `init` collator before calling `working_state`") + } + fn set_working_state(&mut self, working_state: WorkingState) { self.working_state = Some(working_state); } @@ -730,11 +736,10 @@ impl CollatorStdImpl { let force_mc_block_by_uncommitted_chain = uncommitted_chain_length >= self.config.max_uncommitted_chain_length; - // should import anchor every fixed interval in uncommitted blocks chain - let force_import_anchor_by_uncommitted_chain = uncommitted_chain_length - / self.config.uncommitted_chain_to_import_next_anchor - > 0 - && uncommitted_chain_length % self.config.uncommitted_chain_to_import_next_anchor == 0; + // should import anchor after fixed gas used by shard blocks in uncommitted blocks chain + let gas_used = self.working_state().prev_shard_data.gas_used(); + let force_import_anchor_by_uncommitted_chain = + uncommitted_chain_length > 0 && gas_used > self.config.gas_used_to_import_next_anchor; // check if has pending internals or externals let no_pending_msgs = !has_internals && !has_externals; @@ -755,8 +760,8 @@ impl CollatorStdImpl { ); } else if force_import_anchor_by_uncommitted_chain { tracing::info!(target: tracing_targets::COLLATOR, - "uncommitted chain interval to import anchor {} reached on length {}, will import next anchor", - self.config.uncommitted_chain_to_import_next_anchor, uncommitted_chain_length, + "uncommitted chain interval to import anchor gas {} with limit {} reached on length {}, will import next anchor", + gas_used, self.config.gas_used_to_import_next_anchor, uncommitted_chain_length, ); } let (next_anchor, next_anchor_has_externals) = self.import_next_anchor().await?; @@ -766,6 +771,7 @@ impl CollatorStdImpl { "just imported anchor has externals, will collate next block", ); } + self.working_state_mut().prev_shard_data.clear_gas_used(); Some((next_anchor, next_anchor_has_externals)) } else { None diff --git a/collator/src/collator/types.rs b/collator/src/collator/types.rs index 8777931ed..3bb7a2657 100644 --- a/collator/src/collator/types.rs +++ b/collator/src/collator/types.rs @@ -1,15 +1,15 @@ -use std::collections::BTreeMap; +use std::collections::{BTreeMap, HashSet}; use std::sync::{Arc, OnceLock}; use anyhow::{anyhow, bail, Result}; use everscale_types::cell::{Cell, HashBytes, UsageTree, UsageTreeMode}; use everscale_types::dict::Dict; use everscale_types::models::{ - Account, AccountState, BlockId, BlockIdShort, BlockInfo, BlockRef, BlockchainConfig, - CurrencyCollection, HashUpdate, ImportFees, InMsg, Lazy, LibDescr, McStateExtra, MsgInfo, - OptionalAccount, OutMsg, PrevBlockRef, ProcessedUptoInfo, ShardAccount, ShardAccounts, - ShardDescription, ShardFeeCreated, ShardFees, ShardIdent, ShardIdentFull, SimpleLib, - SpecialFlags, StateInit, Transaction, ValueFlow, + Account, AccountState, BlockId, BlockIdShort, BlockInfo, BlockLimits, BlockParamLimits, + BlockRef, BlockchainConfig, CurrencyCollection, HashUpdate, ImportFees, InMsg, Lazy, LibDescr, + McStateExtra, MsgInfo, OptionalAccount, OutMsg, PrevBlockRef, ProcessedUptoInfo, ShardAccount, + ShardAccounts, ShardDescription, ShardFeeCreated, ShardFees, ShardIdent, ShardIdentFull, + SimpleLib, SpecialFlags, StateInit, Transaction, ValueFlow, }; use tycho_block_util::dict::RelaxedAugDict; use tycho_block_util::state::{MinRefMcStateTracker, ShardStateStuff}; @@ -192,8 +192,8 @@ pub(super) struct PrevData { gen_chain_time: u32, gen_lt: u64, total_validator_fees: CurrencyCollection, + gas_used: u64, // TODO: remove if we do not need this - _overload_history: u64, _underload_history: u64, processed_upto: ProcessedUptoInfo, @@ -225,7 +225,7 @@ impl PrevData { let gen_lt = observable_states[0].state().gen_lt; let observable_accounts = observable_states[0].state().load_accounts()?; let total_validator_fees = observable_states[0].state().total_validator_fees.clone(); - let overload_history = observable_states[0].state().overload_history; + let gas_used = observable_states[0].state().overload_history; let underload_history = observable_states[0].state().underload_history; let processed_upto = pure_prev_states[0].state().processed_upto.load()?; @@ -241,7 +241,7 @@ impl PrevData { gen_chain_time: gen_utime, gen_lt, total_validator_fees, - _overload_history: overload_history, + gas_used, _underload_history: underload_history, processed_upto, @@ -304,6 +304,14 @@ impl PrevData { self.gen_lt } + pub fn gas_used(&self) -> u64 { + self.gas_used + } + + pub fn clear_gas_used(&mut self) { + self.gas_used = 0; + } + pub fn total_validator_fees(&self) -> &CurrencyCollection { &self.total_validator_fees } @@ -313,7 +321,136 @@ impl PrevData { } } -#[derive(Debug, Default)] +#[derive(Debug)] +pub(super) struct BlockCollationDataBuilder { + pub block_id_short: BlockIdShort, + pub gen_utime: u32, + pub gen_utime_ms: u16, + pub processed_upto: ProcessedUptoInfo, + shards: Option>>, + pub shards_max_end_lt: u64, + pub shard_fees: ShardFees, + pub value_flow: ValueFlow, + pub min_ref_mc_seqno: u32, + pub rand_seed: HashBytes, + pub block_create_count: FastHashMap, + pub created_by: HashBytes, + pub top_shard_blocks_ids: Vec, +} + +impl BlockCollationDataBuilder { + pub fn new( + block_id_short: BlockIdShort, + rand_seed: HashBytes, + min_ref_mc_seqno: u32, + next_chain_time: u64, + processed_upto: ProcessedUptoInfo, + created_by: HashBytes, + ) -> Self { + let gen_utime = (next_chain_time / 1000) as u32; + let gen_utime_ms = (next_chain_time % 1000) as u16; + Self { + block_id_short, + gen_utime, + gen_utime_ms, + processed_upto, + shards_max_end_lt: 0, + shard_fees: Default::default(), + value_flow: Default::default(), + min_ref_mc_seqno, + rand_seed, + block_create_count: Default::default(), + created_by, + shards: None, + top_shard_blocks_ids: vec![], + } + } + pub fn set_shards(&mut self, shards: FastHashMap>) { + self.shards = Some(shards); + } + + pub fn shards_mut(&mut self) -> Result<&mut FastHashMap>> { + self.shards + .as_mut() + .ok_or_else(|| anyhow!("`shards` is not initialized yet")) + } + + pub fn update_shards_max_end_lt(&mut self, val: u64) { + if val > self.shards_max_end_lt { + self.shards_max_end_lt = val; + } + } + + pub fn store_shard_fees( + &mut self, + shard_id: ShardIdent, + proof_funds: ProofFunds, + ) -> Result<()> { + let shard_fee_created = ShardFeeCreated { + fees: proof_funds.fees_collected.clone(), + create: proof_funds.funds_created.clone(), + }; + self.shard_fees.set( + ShardIdentFull::from(shard_id), + shard_fee_created.clone(), + shard_fee_created, + )?; + Ok(()) + } + + pub fn register_shard_block_creators(&mut self, creators: Vec) -> Result<()> { + for creator in creators { + self.block_create_count + .entry(creator) + .and_modify(|count| *count += 1) + .or_insert(1); + } + Ok(()) + } + + pub fn build(self, start_lt: u64, block_limits: BlockLimits) -> BlockCollationData { + let block_limit = BlockLimitStats::new(block_limits, start_lt); + BlockCollationData { + block_id_short: self.block_id_short, + gen_utime: self.gen_utime, + gen_utime_ms: self.gen_utime_ms, + processed_upto: self.processed_upto, + min_ref_mc_seqno: self.min_ref_mc_seqno, + rand_seed: self.rand_seed, + created_by: self.created_by, + shards: self.shards, + top_shard_blocks_ids: self.top_shard_blocks_ids, + shard_fees: self.shard_fees, + block_create_count: self.block_create_count, + value_flow: self.value_flow, + block_limit, + start_lt, + next_lt: start_lt + 1, + tx_count: 0, + total_execute_msgs_time_mc: 0, + execute_count_all: 0, + execute_count_ext: 0, + ext_msgs_error_count: 0, + execute_count_int: 0, + execute_count_new_int: 0, + int_enqueue_count: 0, + int_dequeue_count: 0, + read_ext_msgs: 0, + read_int_msgs_from_iterator: 0, + new_msgs_created: 0, + inserted_new_msgs_to_iterator: 0, + read_new_msgs_from_iterator: 0, + in_msgs: Default::default(), + out_msgs: Default::default(), + externals_reading_started: false, + _internals_reading_started: false, + mint_msg: None, + recover_create_msg: None, + } + } +} + +#[derive(Debug)] pub(super) struct BlockCollationData { // block_descr: Arc, pub block_id_short: BlockIdShort, @@ -322,6 +459,8 @@ pub(super) struct BlockCollationData { pub tx_count: u64, + pub block_limit: BlockLimitStats, + pub total_execute_msgs_time_mc: u128, pub execute_count_all: u64, @@ -357,7 +496,6 @@ pub(super) struct BlockCollationData { pub top_shard_blocks_ids: Vec, shards: Option>>, - shards_max_end_lt: u64, // TODO: setup update logic when ShardFees would be implemented pub shard_fees: ShardFees, @@ -367,7 +505,7 @@ pub(super) struct BlockCollationData { pub value_flow: ValueFlow, - min_ref_mc_seqno: Option, + pub min_ref_mc_seqno: u32, pub rand_seed: HashBytes, @@ -376,6 +514,85 @@ pub(super) struct BlockCollationData { // TODO: set from anchor pub created_by: HashBytes, } +#[derive(Debug)] +pub struct BlockLimitStats { + pub gas_used: u32, + pub lt_current: u64, + pub lt_start: u64, + pub cells_seen: HashSet, + pub cells_bits: u32, + pub block_limits: BlockLimits, +} + +impl BlockLimitStats { + pub fn new(block_limits: BlockLimits, lt_start: u64) -> Self { + Self { + gas_used: 0, + lt_current: lt_start, + lt_start, + cells_seen: Default::default(), + cells_bits: 0, + block_limits, + } + } + + pub fn reached(&self, level: BlockLimitsLevel) -> bool { + let BlockLimits { + bytes, + gas, + lt_delta, + } = &self.block_limits; + + let BlockParamLimits { + soft_limit, + hard_limit, + .. + } = bytes; + + let cells_bytes = self.cells_bits / 8; + if cells_bytes >= *hard_limit { + return true; + } + if cells_bytes >= *soft_limit && level == BlockLimitsLevel::Soft { + return true; + } + + let BlockParamLimits { + soft_limit, + hard_limit, + .. + } = gas; + + if self.gas_used >= *hard_limit { + return true; + } + if self.gas_used >= *soft_limit && level == BlockLimitsLevel::Soft { + return true; + } + + let BlockParamLimits { + soft_limit, + hard_limit, + .. + } = lt_delta; + + let delta_lt = (self.lt_current - self.lt_start) as u32; + if delta_lt >= *hard_limit { + return true; + } + if delta_lt >= *soft_limit && level == BlockLimitsLevel::Soft { + return true; + } + false + } +} + +#[derive(Debug, Clone, Copy, Eq, Ord, PartialEq, PartialOrd)] +pub enum BlockLimitsLevel { + Underload, + Soft, + Hard, +} #[derive(Debug)] pub struct PreparedInMsg { @@ -396,61 +613,16 @@ impl BlockCollationData { .as_ref() .ok_or_else(|| anyhow!("`shards` is not initialized yet")) } - pub fn set_shards(&mut self, shards: FastHashMap>) { - self.shards = Some(shards); - } + pub fn shards_mut(&mut self) -> Result<&mut FastHashMap>> { self.shards .as_mut() .ok_or_else(|| anyhow!("`shards` is not initialized yet")) } - pub fn shards_max_end_lt(&self) -> u64 { - self.shards_max_end_lt - } - pub fn update_shards_max_end_lt(&mut self, val: u64) { - if val > self.shards_max_end_lt { - self.shards_max_end_lt = val; - } - } - pub fn update_ref_min_mc_seqno(&mut self, mc_seqno: u32) -> u32 { - let min_ref_mc_seqno = - std::cmp::min(self.min_ref_mc_seqno.unwrap_or(std::u32::MAX), mc_seqno); - self.min_ref_mc_seqno = Some(min_ref_mc_seqno); - min_ref_mc_seqno - } - - pub fn min_ref_mc_seqno(&self) -> Result { + self.min_ref_mc_seqno = std::cmp::min(self.min_ref_mc_seqno, mc_seqno); self.min_ref_mc_seqno - .ok_or_else(|| anyhow!("`min_ref_mc_seqno` is not initialized yet")) - } - - pub fn store_shard_fees( - &mut self, - shard_id: ShardIdent, - proof_funds: ProofFunds, - ) -> Result<()> { - let shard_fee_created = ShardFeeCreated { - fees: proof_funds.fees_collected.clone(), - create: proof_funds.funds_created.clone(), - }; - self.shard_fees.set( - ShardIdentFull::from(shard_id), - shard_fee_created.clone(), - shard_fee_created, - )?; - Ok(()) - } - - pub fn register_shard_block_creators(&mut self, creators: Vec) -> Result<()> { - for creator in creators { - self.block_create_count - .entry(creator) - .and_modify(|count| *count += 1) - .or_insert(1); - } - Ok(()) } } diff --git a/collator/src/manager/utils.rs b/collator/src/manager/utils.rs index 7cc33b97c..509675ec9 100644 --- a/collator/src/manager/utils.rs +++ b/collator/src/manager/utils.rs @@ -1,6 +1,5 @@ use anyhow::Result; use everscale_crypto::ed25519::{KeyPair, PublicKey}; -use everscale_types::boc::BocRepr; use everscale_types::models::ValidatorDescription; use tycho_block_util::block::{BlockStuff, BlockStuffAug}; diff --git a/collator/src/types.rs b/collator/src/types.rs index 04be808de..54dff6d31 100644 --- a/collator/src/types.rs +++ b/collator/src/types.rs @@ -23,9 +23,7 @@ pub struct CollationConfig { pub mc_block_min_interval: Duration, pub max_mc_block_delta_from_bc_to_await_own: i32, pub max_uncommitted_chain_length: u32, - pub uncommitted_chain_to_import_next_anchor: u32, - - pub block_txs_limit: u32, + pub gas_used_to_import_next_anchor: u64, pub msgs_exec_params: MsgsExecutionParams, } @@ -40,9 +38,7 @@ impl Default for CollationConfig { max_mc_block_delta_from_bc_to_await_own: 2, max_uncommitted_chain_length: 31, - uncommitted_chain_to_import_next_anchor: 4, - - block_txs_limit: 10000, + gas_used_to_import_next_anchor: 80_000_000u64, msgs_exec_params: MsgsExecutionParams::default(), } diff --git a/collator/src/utils/async_queued_dispatcher.rs b/collator/src/utils/async_queued_dispatcher.rs index 12f664e68..00beb006d 100644 --- a/collator/src/utils/async_queued_dispatcher.rs +++ b/collator/src/utils/async_queued_dispatcher.rs @@ -8,7 +8,7 @@ use tokio::sync::{mpsc, oneshot}; use super::task_descr::{TaskDesc, TaskResponder}; use crate::tracing_targets; -pub const STANDARD_DISPATCHER_QUEUE_BUFFER_SIZE: usize = 10; +pub const STANDARD_DISPATCHER_QUEUE_BUFFER_SIZE: usize = 100; type AsyncTaskDesc = TaskDesc< dyn FnOnce(W) -> Pin)> + Send>> + Send, diff --git a/scripts/gen-dashboard.py b/scripts/gen-dashboard.py index c6b900394..c5b017e6c 100644 --- a/scripts/gen-dashboard.py +++ b/scripts/gen-dashboard.py @@ -630,7 +630,7 @@ def collator_do_collate() -> RowPanel: "Number of internals in the iterator", labels=['workchain=~"$workchain"'], ), - create_counter_panel( + create_gauge_panel( "tycho_do_collate_int_msgs_queue_calc", "Calculated Internal queue len", ),