From a2bdebb33c85d4805e3934bcf758c1d64ebc85fe Mon Sep 17 00:00:00 2001 From: jackzhhuang Date: Mon, 24 Jul 2023 12:01:34 +0800 Subject: [PATCH 01/30] add storage of dag accumulator --- commons/accumulator/src/node.rs | 1 + storage/src/accumulator/mod.rs | 19 +++++- storage/src/lib.rs | 103 ++++++++++++++++++++++++-------- storage/src/upgrade.rs | 8 +++ 4 files changed, 106 insertions(+), 25 deletions(-) diff --git a/commons/accumulator/src/node.rs b/commons/accumulator/src/node.rs index f3d05cd29c..36d8138eea 100644 --- a/commons/accumulator/src/node.rs +++ b/commons/accumulator/src/node.rs @@ -16,6 +16,7 @@ use starcoin_crypto::{ pub enum AccumulatorStoreType { Transaction, Block, + SyncDag, } #[derive(Clone, Debug, Eq, PartialEq, Serialize, Deserialize, CryptoHasher, CryptoHash)] diff --git a/storage/src/accumulator/mod.rs b/storage/src/accumulator/mod.rs index fbbb6bc37e..594e5681d1 100644 --- a/storage/src/accumulator/mod.rs +++ b/storage/src/accumulator/mod.rs @@ -1,9 +1,9 @@ // Copyright (c) The Starcoin Core Contributors // SPDX-License-Identifier: Apache-2.0 -use crate::define_storage; use crate::storage::{CodecKVStore, ValueCodec}; use crate::StorageInstance; +use crate::{define_storage, SYNC_FLEXI_DAG_ACCUMULATOR_PREFIX_NAME}; use crate::{BLOCK_ACCUMULATOR_NODE_PREFIX_NAME, TRANSACTION_ACCUMULATOR_NODE_PREFIX_NAME}; use anyhow::Result; use bcs_ext::BCSCodec; @@ -24,6 +24,13 @@ define_storage!( TRANSACTION_ACCUMULATOR_NODE_PREFIX_NAME ); +define_storage!( + DagBlockAccumulatorStorage, + HashValue, + AccumulatorNode, + SYNC_FLEXI_DAG_ACCUMULATOR_PREFIX_NAME +); + impl ValueCodec for AccumulatorNode { fn encode_value(&self) -> Result> { self.encode() @@ -62,6 +69,16 @@ impl AccumulatorStorage { } } +impl AccumulatorStorage { + pub fn new_dag_block_accumulator_storage( + instance: StorageInstance, + ) -> AccumulatorStorage { + Self { + store: DagBlockAccumulatorStorage::new(instance), + } + } +} + impl AccumulatorTreeStore for AccumulatorStorage where S: CodecKVStore, diff --git a/storage/src/lib.rs b/storage/src/lib.rs index 1f732914fb..ba3be0d55c 100644 --- a/storage/src/lib.rs +++ b/storage/src/lib.rs @@ -1,40 +1,40 @@ // Copyright (c) The Starcoin Core Contributors // SPDX-License-Identifier: Apache-2.0 -use crate::{ - accumulator::{AccumulatorStorage, BlockAccumulatorStorage, TransactionAccumulatorStorage}, - block::BlockStorage, - block_info::{BlockInfoStorage, BlockInfoStore}, - chain_info::ChainInfoStorage, - contract_event::ContractEventStorage, - state_node::StateStorage, - storage::{CodecKVStore, CodecWriteBatch, ColumnFamilyName, StorageInstance}, +use crate::accumulator::{ + AccumulatorStorage, BlockAccumulatorStorage, TransactionAccumulatorStorage, }; +use crate::block::BlockStorage; +use crate::block_info::{BlockInfoStorage, BlockInfoStore}; +use crate::chain_info::ChainInfoStorage; +use crate::contract_event::ContractEventStorage; +use crate::state_node::StateStorage; +use crate::storage::{CodecKVStore, CodecWriteBatch, ColumnFamilyName, StorageInstance}; //use crate::table_info::{TableInfoStorage, TableInfoStore}; -use crate::{ - transaction::TransactionStorage, - transaction_info::{TransactionInfoHashStorage, TransactionInfoStorage}, -}; +use crate::transaction::TransactionStorage; +use crate::transaction_info::{TransactionInfoHashStorage, TransactionInfoStorage}; use anyhow::{bail, format_err, Error, Result}; +use flexi_dag::{SyncFlexiDagSnapshot, SyncFlexiDagSnapshotStorage, SyncFlexiDagStorage}; use network_p2p_types::peer_id::PeerId; use num_enum::{IntoPrimitive, TryFromPrimitive}; use once_cell::sync::Lazy; -use starcoin_accumulator::{node::AccumulatorStoreType, AccumulatorTreeStore}; +use starcoin_accumulator::node::AccumulatorStoreType; +use starcoin_accumulator::AccumulatorTreeStore; use starcoin_crypto::HashValue; use starcoin_state_store_api::{StateNode, StateNodeStore}; +use starcoin_types::contract_event::ContractEvent; +use starcoin_types::startup_info::{ChainInfo, ChainStatus, SnapshotRange}; +use starcoin_types::transaction::{RichTransactionInfo, Transaction}; use starcoin_types::{ block::{Block, BlockBody, BlockHeader, BlockInfo}, - contract_event::ContractEvent, - startup_info::{ChainInfo, ChainStatus, SnapshotRange, StartupInfo}, - transaction::{RichTransactionInfo, Transaction}, + startup_info::StartupInfo, }; //use starcoin_vm_types::state_store::table::{TableHandle, TableInfo}; -use std::{ - collections::BTreeMap, - fmt::{Debug, Display, Formatter}, - sync::Arc, -}; -pub use upgrade::{BARNARD_HARD_FORK_HASH, BARNARD_HARD_FORK_HEIGHT}; +use std::collections::BTreeMap; +use std::fmt::{Debug, Display, Formatter}; +use std::sync::Arc; +pub use upgrade::BARNARD_HARD_FORK_HASH; +pub use upgrade::BARNARD_HARD_FORK_HEIGHT; pub mod accumulator; pub mod batch; @@ -45,6 +45,7 @@ pub mod chain_info; pub mod contract_event; pub mod db_storage; pub mod errors; +pub mod flexi_dag; pub mod metrics; pub mod state_node; pub mod storage; @@ -77,6 +78,8 @@ pub const TRANSACTION_INFO_HASH_PREFIX_NAME: ColumnFamilyName = "transaction_inf pub const CONTRACT_EVENT_PREFIX_NAME: ColumnFamilyName = "contract_event"; pub const FAILED_BLOCK_PREFIX_NAME: ColumnFamilyName = "failed_block"; pub const TABLE_INFO_PREFIX_NAME: ColumnFamilyName = "table_info"; +pub const SYNC_FLEXI_DAG_ACCUMULATOR_PREFIX_NAME: ColumnFamilyName = "sync_flexi_dag_accumulator"; +pub const SYNC_FLEXI_DAG_SNAPSHOT_PREFIX_NAME: ColumnFamilyName = "sync_flexi_dag_snapshot"; ///db storage use prefix_name vec to init /// Please note that adding a prefix needs to be added in vec simultaneously, remember!! @@ -142,17 +145,43 @@ static VEC_PREFIX_NAME_V3: Lazy> = Lazy::new(|| { // TABLE_INFO_PREFIX_NAME, ] }); + +static VEC_PREFIX_NAME_V4: Lazy> = Lazy::new(|| { + vec![ + BLOCK_ACCUMULATOR_NODE_PREFIX_NAME, + TRANSACTION_ACCUMULATOR_NODE_PREFIX_NAME, + BLOCK_PREFIX_NAME, + BLOCK_HEADER_PREFIX_NAME, + BLOCK_BODY_PREFIX_NAME, // unused column + BLOCK_INFO_PREFIX_NAME, + BLOCK_TRANSACTIONS_PREFIX_NAME, + BLOCK_TRANSACTION_INFOS_PREFIX_NAME, + STATE_NODE_PREFIX_NAME, + CHAIN_INFO_PREFIX_NAME, + TRANSACTION_PREFIX_NAME, + TRANSACTION_INFO_PREFIX_NAME, // unused column + TRANSACTION_INFO_PREFIX_NAME_V2, + TRANSACTION_INFO_HASH_PREFIX_NAME, + CONTRACT_EVENT_PREFIX_NAME, + FAILED_BLOCK_PREFIX_NAME, + SYNC_FLEXI_DAG_ACCUMULATOR_PREFIX_NAME, + SYNC_FLEXI_DAG_SNAPSHOT_PREFIX_NAME, + // TABLE_INFO_PREFIX_NAME, + ] +}); + #[derive(Copy, Clone, Debug, Eq, PartialEq, Ord, PartialOrd, IntoPrimitive, TryFromPrimitive)] #[repr(u8)] pub enum StorageVersion { V1 = 1, V2 = 2, V3 = 3, + V4 = 4, } impl StorageVersion { pub fn current_version() -> StorageVersion { - StorageVersion::V3 + StorageVersion::V4 } pub fn get_column_family_names(&self) -> &'static [ColumnFamilyName] { @@ -160,6 +189,7 @@ impl StorageVersion { StorageVersion::V1 => &VEC_PREFIX_NAME_V1, StorageVersion::V2 => &VEC_PREFIX_NAME_V2, StorageVersion::V3 => &VEC_PREFIX_NAME_V3, + StorageVersion::V4 => &VEC_PREFIX_NAME_V4, } } } @@ -262,6 +292,12 @@ pub trait TransactionStore { fn get_transactions(&self, txn_hash_vec: Vec) -> Result>>; } +pub trait SyncFlexiDagStore { + fn put_hashes(&self, key: HashValue, accumulator_snapshot: SyncFlexiDagSnapshot) -> Result<()>; + fn query_by_hash(&self, key: HashValue) -> Result>; + fn get_accumulator_snapshot_storage(&self) -> std::sync::Arc; +} + // TODO: remove Arc, we can clone Storage directly. #[derive(Clone)] pub struct Storage { @@ -275,6 +311,7 @@ pub struct Storage { block_info_storage: BlockInfoStorage, event_storage: ContractEventStorage, chain_info_storage: ChainInfoStorage, + flexi_dag_storage: SyncFlexiDagStorage, // table_info_storage: TableInfoStorage, // instance: StorageInstance, } @@ -294,7 +331,8 @@ impl Storage { AccumulatorStorage::new_transaction_accumulator_storage(instance.clone()), block_info_storage: BlockInfoStorage::new(instance.clone()), event_storage: ContractEventStorage::new(instance.clone()), - chain_info_storage: ChainInfoStorage::new(instance), + chain_info_storage: ChainInfoStorage::new(instance.clone()), + flexi_dag_storage: SyncFlexiDagStorage::new(instance), // table_info_storage: TableInfoStorage::new(instance), // instance, }; @@ -565,6 +603,20 @@ impl TransactionStore for Storage { } } +impl SyncFlexiDagStore for Storage { + fn put_hashes(&self, key: HashValue, accumulator_snapshot: SyncFlexiDagSnapshot) -> Result<()> { + self.flexi_dag_storage.put_hashes(key, accumulator_snapshot) + } + + fn query_by_hash(&self, key: HashValue) -> Result> { + self.flexi_dag_storage.get_hashes_by_hash(key) + } + + fn get_accumulator_snapshot_storage(&self) -> std::sync::Arc { + self.flexi_dag_storage.get_snapshot_storage() + } +} + /// Chain storage define pub trait Store: StateNodeStore @@ -646,6 +698,9 @@ impl Store for Storage { AccumulatorStoreType::Transaction => { Arc::new(self.transaction_accumulator_storage.clone()) } + AccumulatorStoreType::SyncDag => { + Arc::new(self.flexi_dag_storage.get_accumulator_storage()) + } } } } diff --git a/storage/src/upgrade.rs b/storage/src/upgrade.rs index 87b259026c..ecf2b323b1 100644 --- a/storage/src/upgrade.rs +++ b/storage/src/upgrade.rs @@ -163,6 +163,11 @@ impl DBUpgrade { Ok(()) } + fn db_upgrade_v3_v4(_instance: &mut StorageInstance) -> Result<()> { + // https://github.com/facebook/rocksdb/issues/1295 + Ok(()) + } + pub fn do_upgrade( version_in_db: StorageVersion, version_in_code: StorageVersion, @@ -185,6 +190,9 @@ impl DBUpgrade { (StorageVersion::V2, StorageVersion::V3) => { Self::db_upgrade_v2_v3(instance)?; } + (StorageVersion::V3, StorageVersion::V4) => { + Self::db_upgrade_v3_v4(instance)?; + } _ => bail!( "Can not upgrade db from {:?} to {:?}", version_in_db, From 9cb07f31862c97b3cd1783b95bac2bff71b6d014 Mon Sep 17 00:00:00 2001 From: jackzhhuang Date: Mon, 24 Jul 2023 15:11:21 +0800 Subject: [PATCH 02/30] add flexi dag --- storage/src/flexi_dag/mod.rs | 76 ++++++++ storage/src/tests/test_dag.rs | 347 ++++++++++++++++++++++++++++++++++ 2 files changed, 423 insertions(+) create mode 100644 storage/src/flexi_dag/mod.rs create mode 100644 storage/src/tests/test_dag.rs diff --git a/storage/src/flexi_dag/mod.rs b/storage/src/flexi_dag/mod.rs new file mode 100644 index 0000000000..6cd09959a8 --- /dev/null +++ b/storage/src/flexi_dag/mod.rs @@ -0,0 +1,76 @@ +use std::sync::Arc; + +use crate::{ + accumulator::{AccumulatorStorage, DagBlockAccumulatorStorage}, + define_storage, + storage::{CodecKVStore, StorageInstance, ValueCodec}, + SYNC_FLEXI_DAG_SNAPSHOT_PREFIX_NAME, +}; +use anyhow::Result; +use bcs_ext::BCSCodec; +use serde::{Deserialize, Serialize}; +use starcoin_accumulator::accumulator_info::AccumulatorInfo; +use starcoin_crypto::HashValue; + +#[derive(Clone, Debug, Hash, Eq, PartialEq, Serialize, Deserialize)] +pub struct SyncFlexiDagSnapshot { + pub child_hashes: Vec, // child nodes, to get the relationship, use dag's relationship store + pub accumulator_info: AccumulatorInfo, +} + +impl ValueCodec for SyncFlexiDagSnapshot { + fn encode_value(&self) -> Result> { + self.encode() + } + + fn decode_value(data: &[u8]) -> Result { + Self::decode(data) + } +} + +define_storage!( + SyncFlexiDagSnapshotStorage, + HashValue, // accumulator leaf node + SyncFlexiDagSnapshot, + SYNC_FLEXI_DAG_SNAPSHOT_PREFIX_NAME +); + +#[derive(Clone)] +pub struct SyncFlexiDagStorage { + snapshot_storage: Arc, + accumulator_storage: AccumulatorStorage, +} + +impl SyncFlexiDagStorage { + pub fn new(instance: StorageInstance) -> Self { + let snapshot_storage = Arc::new(SyncFlexiDagSnapshotStorage::new(instance.clone())); + let accumulator_storage = + AccumulatorStorage::::new_dag_block_accumulator_storage( + instance, + ); + + SyncFlexiDagStorage { + snapshot_storage, + accumulator_storage, + } + } + + pub fn get_accumulator_storage(&self) -> AccumulatorStorage { + self.accumulator_storage.clone() + } + + pub fn get_snapshot_storage(&self) -> Arc { + self.snapshot_storage.clone() + } + + pub fn put_hashes(&self, key: HashValue, accumulator_info: SyncFlexiDagSnapshot) -> Result<()> { + self.snapshot_storage.put(key, accumulator_info) + } + + pub fn get_hashes_by_hash( + &self, + hash: HashValue, + ) -> std::result::Result, anyhow::Error> { + self.snapshot_storage.get(hash) + } +} diff --git a/storage/src/tests/test_dag.rs b/storage/src/tests/test_dag.rs new file mode 100644 index 0000000000..159c905ba2 --- /dev/null +++ b/storage/src/tests/test_dag.rs @@ -0,0 +1,347 @@ +use starcoin_accumulator::{accumulator_info::AccumulatorInfo, Accumulator, MerkleAccumulator}; +use starcoin_config::RocksdbConfig; +use starcoin_crypto::HashValue; + +use crate::{ + cache_storage::CacheStorage, db_storage::DBStorage, flexi_dag::SyncFlexiDagSnapshot, + storage::StorageInstance, Storage, Store, SyncFlexiDagStore, +}; +use anyhow::{Ok, Result}; + +trait SyncFlexiDagManager { + fn insert_hashes(&self, hashes: Vec) -> Result; + fn query_by_hash(&self, hash: HashValue) -> Result>; + fn fork(&mut self, accumulator_info: AccumulatorInfo) -> Result<()>; + fn get_hash_by_position(&self, position: u64) -> Result>; + fn get_accumulator_info(&self) -> AccumulatorInfo; +} + +struct SyncFlexiDagManagerImp { + flexi_dag_storage: Box, + accumulator: MerkleAccumulator, +} + +impl SyncFlexiDagManagerImp { + pub fn new() -> Self { + let flexi_dag_storage = Storage::new(StorageInstance::new_cache_and_db_instance( + CacheStorage::default(), + DBStorage::new( + starcoin_config::temp_dir().as_ref(), + RocksdbConfig::default(), + None, + ) + .unwrap(), + )) + .unwrap(); + let accumulator = MerkleAccumulator::new_empty( + flexi_dag_storage + .get_accumulator_store(starcoin_accumulator::node::AccumulatorStoreType::SyncDag), + ); + SyncFlexiDagManagerImp { + flexi_dag_storage: Box::new(flexi_dag_storage), + accumulator, + } + } + + fn hash_for_hashes(mut hashes: Vec) -> HashValue { + hashes.sort(); + HashValue::sha3_256_of(&hashes.into_iter().fold([].to_vec(), |mut collect, hash| { + collect.extend(hash.into_iter()); + collect + })) + } +} + +impl SyncFlexiDagManager for SyncFlexiDagManagerImp { + fn insert_hashes(&self, mut child_hashes: Vec) -> Result { + child_hashes.sort(); + let accumulator_key = Self::hash_for_hashes(child_hashes.clone()); + self.accumulator.append(&[accumulator_key])?; + self.flexi_dag_storage.put_hashes( + accumulator_key, + SyncFlexiDagSnapshot { + child_hashes, + accumulator_info: self.get_accumulator_info(), + }, + )?; + Ok(accumulator_key) + } + + fn query_by_hash(&self, hash: HashValue) -> Result> { + self.flexi_dag_storage.query_by_hash(hash) + } + + fn fork(&mut self, accumulator_info: AccumulatorInfo) -> Result<()> { + self.accumulator = self.accumulator.fork(Some(accumulator_info)); + Ok(()) + } + + fn get_hash_by_position(&self, position: u64) -> Result> { + self.accumulator.get_leaf(position) + } + + fn get_accumulator_info(&self) -> AccumulatorInfo { + self.accumulator.get_info() + } +} + +#[test] +fn test_syn_dag_accumulator_insert_and_find() { + let syn_accumulator = SyncFlexiDagManagerImp::new(); + let genesis = HashValue::sha3_256_of(b"genesis"); + let b = HashValue::sha3_256_of(b"b"); + let c = HashValue::sha3_256_of(b"c"); + let d = HashValue::sha3_256_of(b"d"); + let e = HashValue::sha3_256_of(b"e"); + let f = HashValue::sha3_256_of(b"f"); + let h = HashValue::sha3_256_of(b"h"); + let i = HashValue::sha3_256_of(b"i"); + let j = HashValue::sha3_256_of(b"j"); + let k = HashValue::sha3_256_of(b"k"); + let l = HashValue::sha3_256_of(b"l"); + let m = HashValue::sha3_256_of(b"m"); + + let genesis_key = syn_accumulator.insert_hashes([genesis].to_vec()).unwrap(); + let layer1 = syn_accumulator + .insert_hashes([b, c, d, e].to_vec()) + .unwrap(); + let layer2 = syn_accumulator + .insert_hashes([f, h, i, k].to_vec()) + .unwrap(); + let layer3 = syn_accumulator + .insert_hashes([j, m, k, l].to_vec()) + .unwrap(); + let layer4 = syn_accumulator.insert_hashes([j, m, l].to_vec()).unwrap(); + + assert_eq!(5, syn_accumulator.get_accumulator_info().get_num_leaves()); + + assert_eq!( + genesis_key, + syn_accumulator.get_hash_by_position(0).unwrap().unwrap() + ); + assert_eq!( + layer1, + syn_accumulator.get_hash_by_position(1).unwrap().unwrap() + ); + assert_eq!( + layer2, + syn_accumulator.get_hash_by_position(2).unwrap().unwrap() + ); + assert_eq!( + layer3, + syn_accumulator.get_hash_by_position(3).unwrap().unwrap() + ); + assert_eq!( + layer4, + syn_accumulator.get_hash_by_position(4).unwrap().unwrap() + ); + + assert_eq!( + [genesis].to_vec(), + syn_accumulator + .query_by_hash(syn_accumulator.get_hash_by_position(0).unwrap().unwrap()) + .unwrap() + .unwrap() + .child_hashes + ); + assert_eq!( + { + let mut v = [b, c, d, e].to_vec(); + v.sort(); + v + }, + syn_accumulator + .query_by_hash(syn_accumulator.get_hash_by_position(1).unwrap().unwrap()) + .unwrap() + .unwrap() + .child_hashes + ); + assert_eq!( + { + let mut v = [f, h, i, k].to_vec(); + v.sort(); + v + }, + syn_accumulator + .query_by_hash(syn_accumulator.get_hash_by_position(2).unwrap().unwrap()) + .unwrap() + .unwrap() + .child_hashes + ); + assert_eq!( + { + let mut v = [j, m, k, l].to_vec(); + v.sort(); + v + }, + syn_accumulator + .query_by_hash(syn_accumulator.get_hash_by_position(3).unwrap().unwrap()) + .unwrap() + .unwrap() + .child_hashes + ); + assert_eq!( + { + let mut v = [j, m, l].to_vec(); + v.sort(); + v + }, + syn_accumulator + .query_by_hash(syn_accumulator.get_hash_by_position(4).unwrap().unwrap()) + .unwrap() + .unwrap() + .child_hashes + ); +} + +#[test] +fn test_syn_dag_accumulator_fork() { + let mut syn_accumulator = SyncFlexiDagManagerImp::new(); + let syn_accumulator_target = SyncFlexiDagManagerImp::new(); + + let genesis = HashValue::sha3_256_of(b"genesis"); + let b = HashValue::sha3_256_of(b"b"); + let c = HashValue::sha3_256_of(b"c"); + let d = HashValue::sha3_256_of(b"d"); + let e = HashValue::sha3_256_of(b"e"); + let f = HashValue::sha3_256_of(b"f"); + let h = HashValue::sha3_256_of(b"h"); + let i = HashValue::sha3_256_of(b"i"); + let j = HashValue::sha3_256_of(b"j"); + let k = HashValue::sha3_256_of(b"k"); + let l = HashValue::sha3_256_of(b"l"); + let m = HashValue::sha3_256_of(b"m"); + let p = HashValue::sha3_256_of(b"p"); + let v = HashValue::sha3_256_of(b"v"); + + let _genesis_key = syn_accumulator.insert_hashes([genesis].to_vec()).unwrap(); + let _genesis_key = syn_accumulator_target + .insert_hashes([genesis].to_vec()) + .unwrap(); + + let layer1 = syn_accumulator + .insert_hashes([b, c, d, e].to_vec()) + .unwrap(); + let layer2 = syn_accumulator + .insert_hashes([f, h, i, k].to_vec()) + .unwrap(); + let layer3 = syn_accumulator + .insert_hashes([j, m, k, l].to_vec()) + .unwrap(); + let layer4 = syn_accumulator.insert_hashes([j, m, l].to_vec()).unwrap(); + + let target_layer1 = syn_accumulator_target + .insert_hashes([b, c, d, e].to_vec()) + .unwrap(); + let target_layer2 = syn_accumulator_target + .insert_hashes([f, h, i, k].to_vec()) + .unwrap(); + let target_layer3 = syn_accumulator_target + .insert_hashes([j, m, k, l].to_vec()) + .unwrap(); + let target_layer4 = syn_accumulator_target + .insert_hashes([p, m, v].to_vec()) + .unwrap(); + let target_layer5 = syn_accumulator_target + .insert_hashes([p, v].to_vec()) + .unwrap(); + + assert_eq!(layer1, target_layer1); + assert_eq!(layer2, target_layer2); + assert_eq!(layer3, target_layer3); + + assert_ne!(layer4, target_layer4); + assert_ne!( + syn_accumulator.get_accumulator_info().get_num_leaves(), + syn_accumulator_target + .get_accumulator_info() + .get_num_leaves() + ); + assert_ne!( + syn_accumulator.get_accumulator_info(), + syn_accumulator_target.get_accumulator_info() + ); + + let info = syn_accumulator_target + .query_by_hash(layer3) + .unwrap() + .unwrap() + .accumulator_info; + + println!("{:?}", info); + assert_eq!( + layer3, + syn_accumulator.get_hash_by_position(3).unwrap().unwrap() + ); + + syn_accumulator.fork(info).unwrap(); + + assert_eq!( + layer3, + syn_accumulator.get_hash_by_position(3).unwrap().unwrap() + ); + + let new_layer4 = syn_accumulator.insert_hashes([p, m, v].to_vec()).unwrap(); + let new_layer5 = syn_accumulator.insert_hashes([p, v].to_vec()).unwrap(); + + assert_eq!(new_layer4, target_layer4); + assert_eq!(new_layer5, target_layer5); + assert_eq!( + syn_accumulator.get_accumulator_info().get_num_leaves(), + syn_accumulator_target + .get_accumulator_info() + .get_num_leaves() + ); + assert_eq!( + syn_accumulator.get_accumulator_info(), + syn_accumulator_target.get_accumulator_info() + ); +} + +#[test] +fn test_accumulator_temp() { + let flexi_dag_storage = Storage::new(StorageInstance::new_cache_and_db_instance( + CacheStorage::default(), + DBStorage::new( + starcoin_config::temp_dir().as_ref(), + RocksdbConfig::default(), + None, + ) + .unwrap(), + )) + .unwrap(); + let mut accumulator = MerkleAccumulator::new_empty( + flexi_dag_storage + .get_accumulator_store(starcoin_accumulator::node::AccumulatorStoreType::SyncDag), + ); + let _hash1 = accumulator.append(&[HashValue::sha3_256_of(b"a")]).unwrap(); + let _hash2 = accumulator.append(&[HashValue::sha3_256_of(b"b")]).unwrap(); + let _hash3 = accumulator.append(&[HashValue::sha3_256_of(b"c")]).unwrap(); + let accumulator_info = accumulator.get_info(); + let _hash4 = accumulator.append(&[HashValue::sha3_256_of(b"d")]).unwrap(); + + assert_eq!( + HashValue::sha3_256_of(b"b"), + accumulator.get_leaf(1).unwrap().unwrap() + ); + accumulator.flush().unwrap(); + accumulator = accumulator.fork(Some(accumulator_info)); + let _hash5 = accumulator.append(&[HashValue::sha3_256_of(b"e")]).unwrap(); + + assert_eq!( + HashValue::sha3_256_of(b"b"), + accumulator.get_leaf(1).unwrap().unwrap() + ); + assert_eq!( + HashValue::sha3_256_of(b"c"), + accumulator.get_leaf(2).unwrap().unwrap() + ); + assert_eq!( + HashValue::sha3_256_of(b"e"), + accumulator.get_leaf(3).unwrap().unwrap() + ); + assert_ne!( + HashValue::sha3_256_of(b"d"), + accumulator.get_leaf(3).unwrap().unwrap() + ); +} From 13e3ba1d42eaa0df940146c44398f87c7eaa8742 Mon Sep 17 00:00:00 2001 From: jackzhhuang Date: Mon, 24 Jul 2023 17:03:48 +0800 Subject: [PATCH 03/30] merge from starcoin dag --- storage/src/lib.rs | 43 ++++++++++++++++++++++--------------------- 1 file changed, 22 insertions(+), 21 deletions(-) diff --git a/storage/src/lib.rs b/storage/src/lib.rs index ba3be0d55c..07031ddc7b 100644 --- a/storage/src/lib.rs +++ b/storage/src/lib.rs @@ -1,40 +1,41 @@ // Copyright (c) The Starcoin Core Contributors // SPDX-License-Identifier: Apache-2.0 -use crate::accumulator::{ - AccumulatorStorage, BlockAccumulatorStorage, TransactionAccumulatorStorage, +use crate::{ + accumulator::{AccumulatorStorage, BlockAccumulatorStorage, TransactionAccumulatorStorage}, + block::BlockStorage, + block_info::{BlockInfoStorage, BlockInfoStore}, + chain_info::ChainInfoStorage, + contract_event::ContractEventStorage, + state_node::StateStorage, + storage::{CodecKVStore, CodecWriteBatch, ColumnFamilyName, StorageInstance}, }; -use crate::block::BlockStorage; -use crate::block_info::{BlockInfoStorage, BlockInfoStore}; -use crate::chain_info::ChainInfoStorage; -use crate::contract_event::ContractEventStorage; -use crate::state_node::StateStorage; -use crate::storage::{CodecKVStore, CodecWriteBatch, ColumnFamilyName, StorageInstance}; //use crate::table_info::{TableInfoStorage, TableInfoStore}; -use crate::transaction::TransactionStorage; -use crate::transaction_info::{TransactionInfoHashStorage, TransactionInfoStorage}; +use crate::{ + transaction::TransactionStorage, + transaction_info::{TransactionInfoHashStorage, TransactionInfoStorage}, +}; use anyhow::{bail, format_err, Error, Result}; use flexi_dag::{SyncFlexiDagSnapshot, SyncFlexiDagSnapshotStorage, SyncFlexiDagStorage}; use network_p2p_types::peer_id::PeerId; use num_enum::{IntoPrimitive, TryFromPrimitive}; use once_cell::sync::Lazy; -use starcoin_accumulator::node::AccumulatorStoreType; -use starcoin_accumulator::AccumulatorTreeStore; +use starcoin_accumulator::{node::AccumulatorStoreType, AccumulatorTreeStore}; use starcoin_crypto::HashValue; use starcoin_state_store_api::{StateNode, StateNodeStore}; -use starcoin_types::contract_event::ContractEvent; -use starcoin_types::startup_info::{ChainInfo, ChainStatus, SnapshotRange}; -use starcoin_types::transaction::{RichTransactionInfo, Transaction}; use starcoin_types::{ block::{Block, BlockBody, BlockHeader, BlockInfo}, - startup_info::StartupInfo, + contract_event::ContractEvent, + startup_info::{ChainInfo, ChainStatus, SnapshotRange, StartupInfo}, + transaction::{RichTransactionInfo, Transaction}, }; //use starcoin_vm_types::state_store::table::{TableHandle, TableInfo}; -use std::collections::BTreeMap; -use std::fmt::{Debug, Display, Formatter}; -use std::sync::Arc; -pub use upgrade::BARNARD_HARD_FORK_HASH; -pub use upgrade::BARNARD_HARD_FORK_HEIGHT; +use std::{ + collections::BTreeMap, + fmt::{Debug, Display, Formatter}, + sync::Arc, +}; +pub use upgrade::{BARNARD_HARD_FORK_HASH, BARNARD_HARD_FORK_HEIGHT}; pub mod accumulator; pub mod batch; From e5444e1ce7ad69f804822be79e0cbd3e5530d99f Mon Sep 17 00:00:00 2001 From: jackzhhuang Date: Tue, 25 Jul 2023 18:20:14 +0800 Subject: [PATCH 04/30] add dag block --- Cargo.lock | 1503 +++++++++++++++++++--------- Cargo.toml | 5 + chain/Cargo.toml | 6 + chain/api/src/message.rs | 4 + chain/service/src/chain_service.rs | 6 + chain/src/lib.rs | 1 + network-rpc/api/Cargo.toml | 1 + network-rpc/api/src/lib.rs | 17 + network-rpc/src/rpc.rs | 26 +- storage/src/chain_info/mod.rs | 16 + storage/src/lib.rs | 16 + storage/src/upgrade.rs | 1 - types/Cargo.toml | 1 + types/src/lib.rs | 1 + vm/types/src/lib.rs | 1 + 15 files changed, 1139 insertions(+), 466 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 37c29e4192..db04cd6a0f 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -207,7 +207,7 @@ version = "1.13.5" dependencies = [ "anyhow", "async-trait", - "bcs-ext", + "bcs-ext 1.13.5", "clap 3.2.23", "csv", "elasticsearch", @@ -219,10 +219,10 @@ dependencies = [ "serde 1.0.152", "serde_json", "starcoin-crypto", - "starcoin-logger", + "starcoin-logger 1.13.5", "starcoin-rpc-api", - "starcoin-types", - "starcoin-vm-types", + "starcoin-types 1.13.5", + "starcoin-vm-types 1.13.5", "tokio", ] @@ -656,6 +656,16 @@ dependencies = [ "serde 1.0.152", ] +[[package]] +name = "bcs-ext" +version = "1.13.5" +source = "git+https://github.com/starcoinorg/starcoin?rev=fb6fcc48fdcad3d6f54ed97e004e25099ac24000#fb6fcc48fdcad3d6f54ed97e004e25099ac24000" +dependencies = [ + "anyhow", + "bcs", + "serde 1.0.152", +] + [[package]] name = "bech32" version = "0.9.1" @@ -674,33 +684,33 @@ version = "1.13.5" dependencies = [ "anyhow", "criterion", - "forkable-jellyfish-merkle", + "forkable-jellyfish-merkle 1.13.5", "futures 0.3.26", "futures-timer", - "network-api", + "network-api 1.13.5", "parking_lot 0.12.1", "pprof", "proptest", "rand 0.8.5", "rand_core 0.6.4", - "starcoin-account-api", - "starcoin-accumulator", + "starcoin-account-api 1.13.5", + "starcoin-accumulator 1.13.5", "starcoin-chain", - "starcoin-config", + "starcoin-config 1.13.5", "starcoin-consensus", "starcoin-crypto", "starcoin-executor", "starcoin-executor-benchmark", "starcoin-genesis", - "starcoin-logger", - "starcoin-service-registry", - "starcoin-state-store-api", + "starcoin-logger 1.13.5", + "starcoin-service-registry 1.13.5", + "starcoin-state-store-api 1.13.5", "starcoin-state-tree", - "starcoin-storage", + "starcoin-storage 1.13.5", "starcoin-transaction-builder", - "starcoin-types", + "starcoin-types 1.13.5", "starcoin-vm-runtime", - "starcoin-vm-types", + "starcoin-vm-types 1.13.5", ] [[package]] @@ -1350,6 +1360,32 @@ dependencies = [ "yaml-rust", ] +[[package]] +name = "consensus" +version = "0.1.0" +source = "git+https://github.com/starcoinorg/smolstc?rev=167d700a0f99ba929cd6d156dac77859306f32da#167d700a0f99ba929cd6d156dac77859306f32da" +dependencies = [ + "anyhow", + "consensus-types", + "database", + "ghostdag", + "parking_lot 0.12.1", + "reachability", + "starcoin-crypto", + "starcoin-types 1.13.5 (git+https://github.com/starcoinorg/starcoin?rev=fb6fcc48fdcad3d6f54ed97e004e25099ac24000)", +] + +[[package]] +name = "consensus-types" +version = "0.1.0" +source = "git+https://github.com/starcoinorg/smolstc?rev=167d700a0f99ba929cd6d156dac77859306f32da#167d700a0f99ba929cd6d156dac77859306f32da" +dependencies = [ + "itertools", + "serde 1.0.152", + "starcoin-crypto", + "starcoin-types 1.13.5 (git+https://github.com/starcoinorg/starcoin?rev=fb6fcc48fdcad3d6f54ed97e004e25099ac24000)", +] + [[package]] name = "console" version = "0.15.5" @@ -1379,7 +1415,7 @@ name = "contrib-contracts" version = "1.13.5" dependencies = [ "anyhow", - "bcs-ext", + "bcs-ext 1.13.5", "ethereum-types", "hex", "rlp", @@ -1389,9 +1425,9 @@ dependencies = [ "starcoin-crypto", "starcoin-executor", "starcoin-state-api", - "starcoin-types", - "starcoin-vm-types", - "stdlib", + "starcoin-types 1.13.5", + "starcoin-vm-types 1.13.5", + "stdlib 1.13.5", "stest", "tempfile", "test-helper", @@ -2043,6 +2079,28 @@ dependencies = [ "syn 1.0.107", ] +[[package]] +name = "database" +version = "0.1.0" +source = "git+https://github.com/starcoinorg/smolstc?rev=167d700a0f99ba929cd6d156dac77859306f32da#167d700a0f99ba929cd6d156dac77859306f32da" +dependencies = [ + "bincode", + "consensus-types", + "faster-hex", + "indexmap", + "itertools", + "num_cpus", + "parking_lot 0.12.1", + "rand 0.8.5", + "rocksdb", + "serde 1.0.152", + "starcoin-config 1.13.5 (git+https://github.com/starcoinorg/starcoin?rev=fb6fcc48fdcad3d6f54ed97e004e25099ac24000)", + "starcoin-crypto", + "starcoin-storage 1.13.5 (git+https://github.com/starcoinorg/starcoin?rev=fb6fcc48fdcad3d6f54ed97e004e25099ac24000)", + "starcoin-types 1.13.5 (git+https://github.com/starcoinorg/starcoin?rev=fb6fcc48fdcad3d6f54ed97e004e25099ac24000)", + "thiserror", +] + [[package]] name = "datatest-stable" version = "0.1.1" @@ -2071,7 +2129,7 @@ version = "1.13.5" dependencies = [ "anyhow", "atomic-counter", - "bcs-ext", + "bcs-ext 1.13.5", "clap 3.2.23", "csv", "futures 0.3.26", @@ -2083,22 +2141,22 @@ dependencies = [ "rayon", "serde 1.0.152", "serde_json", - "starcoin-account-api", - "starcoin-accumulator", + "starcoin-account-api 1.13.5", + "starcoin-accumulator 1.13.5", "starcoin-chain", - "starcoin-config", + "starcoin-config 1.13.5", "starcoin-consensus", "starcoin-crypto", "starcoin-executor", "starcoin-genesis", - "starcoin-logger", + "starcoin-logger 1.13.5", "starcoin-resource-viewer", "starcoin-state-tree", "starcoin-statedb", - "starcoin-storage", + "starcoin-storage 1.13.5", "starcoin-transaction-builder", - "starcoin-types", - "starcoin-vm-types", + "starcoin-types 1.13.5", + "starcoin-vm-types 1.13.5", "tokio", ] @@ -2659,6 +2717,12 @@ version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e88a8acf291dafb59c2d96e8f59828f3838bb1a70398823ade51a84de6a6deed" +[[package]] +name = "faster-hex" +version = "0.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "51e2ce894d53b295cf97b05685aa077950ff3e8541af83217fc720a6437169f8" + [[package]] name = "fastrand" version = "1.9.0" @@ -2782,7 +2846,7 @@ dependencies = [ "anyhow", "backtrace", "bcs", - "bcs-ext", + "bcs-ext 1.13.5", "byteorder", "criterion", "hex", @@ -2796,7 +2860,27 @@ dependencies = [ "serde 1.0.152", "serde_bytes", "starcoin-crypto", - "starcoin-logger", + "starcoin-logger 1.13.5", + "thiserror", +] + +[[package]] +name = "forkable-jellyfish-merkle" +version = "1.13.5" +source = "git+https://github.com/starcoinorg/starcoin?rev=fb6fcc48fdcad3d6f54ed97e004e25099ac24000#fb6fcc48fdcad3d6f54ed97e004e25099ac24000" +dependencies = [ + "anyhow", + "backtrace", + "bcs-ext 1.13.5 (git+https://github.com/starcoinorg/starcoin?rev=fb6fcc48fdcad3d6f54ed97e004e25099ac24000)", + "byteorder", + "hex", + "mirai-annotations", + "num-derive", + "num-traits 0.2.15", + "serde 1.0.152", + "serde_bytes", + "starcoin-crypto", + "starcoin-logger 1.13.5 (git+https://github.com/starcoinorg/starcoin?rev=fb6fcc48fdcad3d6f54ed97e004e25099ac24000)", "thiserror", ] @@ -3034,7 +3118,7 @@ name = "genesis-nft-miner" version = "1.13.5" dependencies = [ "anyhow", - "bcs-ext", + "bcs-ext 1.13.5", "clap 3.2.23", "hex", "jsonrpc-core-client", @@ -3043,8 +3127,8 @@ dependencies = [ "serde_json", "starcoin-crypto", "starcoin-rpc-api", - "starcoin-types", - "starcoin-vm-types", + "starcoin-types 1.13.5", + "starcoin-vm-types 1.13.5", "tokio", ] @@ -3102,6 +3186,22 @@ dependencies = [ "textwrap 0.11.0", ] +[[package]] +name = "ghostdag" +version = "0.1.0" +source = "git+https://github.com/starcoinorg/smolstc?rev=167d700a0f99ba929cd6d156dac77859306f32da#167d700a0f99ba929cd6d156dac77859306f32da" +dependencies = [ + "consensus-types", + "database", + "itertools", + "parking_lot 0.12.1", + "reachability", + "rocksdb", + "serde 1.0.152", + "starcoin-crypto", + "thiserror", +] + [[package]] name = "gimli" version = "0.27.2" @@ -4814,7 +4914,7 @@ name = "merkle-generator" version = "1.13.5" dependencies = [ "anyhow", - "bcs-ext", + "bcs-ext 1.13.5", "clap 3.2.23", "csv", "hex", @@ -4823,7 +4923,7 @@ dependencies = [ "serde_json", "sha3", "starcoin-crypto", - "starcoin-vm-types", + "starcoin-vm-types 1.13.5", ] [[package]] @@ -5387,7 +5487,7 @@ version = "1.13.5" dependencies = [ "anyhow", "bcs", - "bcs-ext", + "bcs-ext 1.13.5", "clap 3.2.23", "datatest-stable 0.1.1", "difference", @@ -5412,17 +5512,17 @@ dependencies = [ "once_cell", "starcoin-account-provider", "starcoin-cmd", - "starcoin-config", + "starcoin-config 1.13.5", "starcoin-crypto", - "starcoin-logger", - "starcoin-move-compiler", + "starcoin-logger 1.13.5", + "starcoin-move-compiler 1.13.5", "starcoin-rpc-api", "starcoin-rpc-client", "starcoin-transactional-test-harness", - "starcoin-types", + "starcoin-types 1.13.5", "starcoin-vm-runtime", - "starcoin-vm-types", - "stdlib", + "starcoin-vm-types 1.13.5", + "stdlib 1.13.5", "tempfile", "tokio", "vm-status-translator", @@ -5935,21 +6035,45 @@ version = "1.13.5" dependencies = [ "anyhow", "async-trait", - "bcs-ext", + "bcs-ext 1.13.5", + "futures 0.3.26", + "hex", + "itertools", + "network-p2p-types 1.13.5", + "network-types 1.13.5", + "parking_lot 0.12.1", + "rand 0.8.5", + "schemars", + "serde 1.0.152", + "starcoin-crypto", + "starcoin-logger 1.13.5", + "starcoin-metrics 1.13.5", + "starcoin-service-registry 1.13.5", + "starcoin-types 1.13.5", +] + +[[package]] +name = "network-api" +version = "1.13.5" +source = "git+https://github.com/starcoinorg/starcoin?rev=fb6fcc48fdcad3d6f54ed97e004e25099ac24000#fb6fcc48fdcad3d6f54ed97e004e25099ac24000" +dependencies = [ + "anyhow", + "async-trait", + "bcs-ext 1.13.5 (git+https://github.com/starcoinorg/starcoin?rev=fb6fcc48fdcad3d6f54ed97e004e25099ac24000)", "futures 0.3.26", "hex", "itertools", - "network-p2p-types", - "network-types", + "network-p2p-types 1.13.5 (git+https://github.com/starcoinorg/starcoin?rev=fb6fcc48fdcad3d6f54ed97e004e25099ac24000)", + "network-types 1.13.5 (git+https://github.com/starcoinorg/starcoin?rev=fb6fcc48fdcad3d6f54ed97e004e25099ac24000)", "parking_lot 0.12.1", "rand 0.8.5", "schemars", "serde 1.0.152", "starcoin-crypto", - "starcoin-logger", - "starcoin-metrics", - "starcoin-service-registry", - "starcoin-types", + "starcoin-logger 1.13.5 (git+https://github.com/starcoinorg/starcoin?rev=fb6fcc48fdcad3d6f54ed97e004e25099ac24000)", + "starcoin-metrics 1.13.5 (git+https://github.com/starcoinorg/starcoin?rev=fb6fcc48fdcad3d6f54ed97e004e25099ac24000)", + "starcoin-service-registry 1.13.5 (git+https://github.com/starcoinorg/starcoin?rev=fb6fcc48fdcad3d6f54ed97e004e25099ac24000)", + "starcoin-types 1.13.5 (git+https://github.com/starcoinorg/starcoin?rev=fb6fcc48fdcad3d6f54ed97e004e25099ac24000)", ] [[package]] @@ -5960,7 +6084,7 @@ dependencies = [ "async-std", "async-trait", "asynchronous-codec 0.5.0", - "bcs-ext", + "bcs-ext 1.13.5", "bitflags", "bs58 0.3.1", "bytes 1.4.0", @@ -5976,20 +6100,20 @@ dependencies = [ "linked_hash_set", "log 0.4.17", "lru 0.7.8", - "network-p2p-types", + "network-p2p-types 1.13.5", "once_cell", "parking_lot 0.12.1", "pin-project 0.4.30", "prometheus", "rand 0.8.5", - "sc-peerset", + "sc-peerset 1.13.5", "serde 1.0.152", "serde_json", "smallvec 1.10.0", - "starcoin-config", + "starcoin-config 1.13.5", "starcoin-crypto", - "starcoin-metrics", - "starcoin-types", + "starcoin-metrics 1.13.5", + "starcoin-types 1.13.5", "stest", "tempfile", "thiserror", @@ -6005,14 +6129,14 @@ name = "network-p2p-core" version = "1.13.5" dependencies = [ "anyhow", - "bcs-ext", + "bcs-ext 1.13.5", "futures 0.3.26", "log 0.4.17", "network-p2p-derive", - "network-p2p-types", + "network-p2p-types 1.13.5", "num_enum", "serde 1.0.152", - "starcoin-types", + "starcoin-types 1.13.5", "stest", ] @@ -6036,24 +6160,56 @@ dependencies = [ "derive_more", "libp2p", "rand 0.8.5", - "sc-peerset", + "sc-peerset 1.13.5", + "schemars", + "serde 1.0.152", + "serde_json", + "starcoin-crypto", + "starcoin-types 1.13.5", +] + +[[package]] +name = "network-p2p-types" +version = "1.13.5" +source = "git+https://github.com/starcoinorg/starcoin?rev=fb6fcc48fdcad3d6f54ed97e004e25099ac24000#fb6fcc48fdcad3d6f54ed97e004e25099ac24000" +dependencies = [ + "anyhow", + "bitflags", + "bytes 1.4.0", + "derive_more", + "libp2p", + "rand 0.8.5", + "sc-peerset 1.13.5 (git+https://github.com/starcoinorg/starcoin?rev=fb6fcc48fdcad3d6f54ed97e004e25099ac24000)", "schemars", "serde 1.0.152", "serde_json", "starcoin-crypto", - "starcoin-types", + "starcoin-types 1.13.5 (git+https://github.com/starcoinorg/starcoin?rev=fb6fcc48fdcad3d6f54ed97e004e25099ac24000)", +] + +[[package]] +name = "network-types" +version = "1.13.5" +dependencies = [ + "anyhow", + "network-p2p-types 1.13.5", + "schemars", + "serde 1.0.152", + "starcoin-crypto", + "starcoin-types 1.13.5", ] [[package]] name = "network-types" version = "1.13.5" +source = "git+https://github.com/starcoinorg/starcoin?rev=fb6fcc48fdcad3d6f54ed97e004e25099ac24000#fb6fcc48fdcad3d6f54ed97e004e25099ac24000" dependencies = [ "anyhow", - "network-p2p-types", + "network-p2p-types 1.13.5 (git+https://github.com/starcoinorg/starcoin?rev=fb6fcc48fdcad3d6f54ed97e004e25099ac24000)", "schemars", "serde 1.0.152", "starcoin-crypto", - "starcoin-types", + "starcoin-types 1.13.5 (git+https://github.com/starcoinorg/starcoin?rev=fb6fcc48fdcad3d6f54ed97e004e25099ac24000)", ] [[package]] @@ -7752,6 +7908,22 @@ dependencies = [ "rand_core 0.3.1", ] +[[package]] +name = "reachability" +version = "0.1.0" +source = "git+https://github.com/starcoinorg/smolstc?rev=167d700a0f99ba929cd6d156dac77859306f32da#167d700a0f99ba929cd6d156dac77859306f32da" +dependencies = [ + "consensus-types", + "database", + "itertools", + "parking_lot 0.12.1", + "rocksdb", + "serde 1.0.152", + "starcoin-crypto", + "starcoin-storage 1.13.5 (git+https://github.com/starcoinorg/starcoin?rev=fb6fcc48fdcad3d6f54ed97e004e25099ac24000)", + "thiserror", +] + [[package]] name = "read-write-set" version = "0.1.0" @@ -7916,7 +8088,7 @@ name = "resource-exporter" version = "1.13.5" dependencies = [ "anyhow", - "bcs-ext", + "bcs-ext 1.13.5", "clap 3.2.23", "csv", "hex", @@ -7927,9 +8099,9 @@ dependencies = [ "starcoin-resource-viewer", "starcoin-state-tree", "starcoin-statedb", - "starcoin-storage", - "starcoin-types", - "starcoin-vm-types", + "starcoin-storage 1.13.5", + "starcoin-types 1.13.5", + "starcoin-vm-types 1.13.5", ] [[package]] @@ -8304,7 +8476,20 @@ dependencies = [ "log 0.4.17", "rand 0.8.5", "serde_json", - "sp-utils", + "sp-utils 1.13.5", + "wasm-timer", +] + +[[package]] +name = "sc-peerset" +version = "1.13.5" +source = "git+https://github.com/starcoinorg/starcoin?rev=fb6fcc48fdcad3d6f54ed97e004e25099ac24000#fb6fcc48fdcad3d6f54ed97e004e25099ac24000" +dependencies = [ + "futures 0.3.26", + "libp2p", + "log 0.4.17", + "serde_json", + "sp-utils 1.13.5 (git+https://github.com/starcoinorg/starcoin?rev=fb6fcc48fdcad3d6f54ed97e004e25099ac24000)", "wasm-timer", ] @@ -8498,7 +8683,7 @@ dependencies = [ name = "serde-helpers" version = "1.13.5" dependencies = [ - "bcs-ext", + "bcs-ext 1.13.5", "hex", "serde 1.0.152", "serde_bytes", @@ -8965,6 +9150,20 @@ dependencies = [ "simple-stopwatch", ] +[[package]] +name = "sp-utils" +version = "1.13.5" +source = "git+https://github.com/starcoinorg/starcoin?rev=fb6fcc48fdcad3d6f54ed97e004e25099ac24000#fb6fcc48fdcad3d6f54ed97e004e25099ac24000" +dependencies = [ + "futures 0.3.26", + "futures-core", + "lazy_static 1.4.0", + "once_cell", + "parking_lot 0.12.1", + "prometheus", + "simple-stopwatch", +] + [[package]] name = "spin" version = "0.5.2" @@ -8993,7 +9192,7 @@ version = "1.13.5" dependencies = [ "anyhow", "bcs", - "bcs-ext", + "bcs-ext 1.13.5", "hex", "move-binary-format", "ordinal", @@ -9004,7 +9203,7 @@ dependencies = [ "starcoin-abi-resolver", "starcoin-abi-types", "starcoin-resource-viewer", - "starcoin-vm-types", + "starcoin-vm-types 1.13.5", ] [[package]] @@ -9016,8 +9215,8 @@ dependencies = [ "serde_json", "starcoin-abi-types", "starcoin-resource-viewer", - "starcoin-vm-types", - "stdlib", + "starcoin-vm-types 1.13.5", + "stdlib 1.13.5", "test-helper", ] @@ -9032,7 +9231,7 @@ dependencies = [ "serde 1.0.152", "serde_bytes", "serde_json", - "starcoin-vm-types", + "starcoin-vm-types 1.13.5", ] [[package]] @@ -9041,7 +9240,7 @@ version = "1.13.5" dependencies = [ "anyhow", "async-trait", - "bcs-ext", + "bcs-ext 1.13.5", "futures 0.3.26", "hex", "parking_lot 0.12.1", @@ -9049,13 +9248,13 @@ dependencies = [ "rand_core 0.6.4", "serde 1.0.152", "serde_json", - "starcoin-account-api", - "starcoin-config", + "starcoin-account-api 1.13.5", + "starcoin-config 1.13.5", "starcoin-crypto", "starcoin-decrypt", - "starcoin-logger", - "starcoin-storage", - "starcoin-types", + "starcoin-logger 1.13.5", + "starcoin-storage 1.13.5", + "starcoin-types 1.13.5", "tempfile", ] @@ -9065,7 +9264,29 @@ version = "1.13.5" dependencies = [ "anyhow", "async-trait", - "bcs-ext", + "bcs-ext 1.13.5", + "futures 0.3.26", + "hex", + "rand 0.8.5", + "rand_core 0.6.4", + "schemars", + "serde 1.0.152", + "serde_bytes", + "serde_json", + "starcoin-crypto", + "starcoin-service-registry 1.13.5", + "starcoin-types 1.13.5", + "thiserror", +] + +[[package]] +name = "starcoin-account-api" +version = "1.13.5" +source = "git+https://github.com/starcoinorg/starcoin?rev=fb6fcc48fdcad3d6f54ed97e004e25099ac24000#fb6fcc48fdcad3d6f54ed97e004e25099ac24000" +dependencies = [ + "anyhow", + "async-trait", + "bcs-ext 1.13.5 (git+https://github.com/starcoinorg/starcoin?rev=fb6fcc48fdcad3d6f54ed97e004e25099ac24000)", "futures 0.3.26", "hex", "rand 0.8.5", @@ -9075,8 +9296,8 @@ dependencies = [ "serde_bytes", "serde_json", "starcoin-crypto", - "starcoin-service-registry", - "starcoin-types", + "starcoin-service-registry 1.13.5 (git+https://github.com/starcoinorg/starcoin?rev=fb6fcc48fdcad3d6f54ed97e004e25099ac24000)", + "starcoin-types 1.13.5 (git+https://github.com/starcoinorg/starcoin?rev=fb6fcc48fdcad3d6f54ed97e004e25099ac24000)", "thiserror", ] @@ -9086,11 +9307,11 @@ version = "1.13.5" dependencies = [ "anyhow", "starcoin-account", - "starcoin-account-api", - "starcoin-config", + "starcoin-account-api 1.13.5", + "starcoin-config 1.13.5", "starcoin-crypto", "starcoin-rpc-client", - "starcoin-types", + "starcoin-types 1.13.5", ] [[package]] @@ -9099,16 +9320,16 @@ version = "1.13.5" dependencies = [ "anyhow", "async-trait", - "bcs-ext", + "bcs-ext 1.13.5", "futures 0.3.26", "starcoin-account", - "starcoin-account-api", + "starcoin-account-api 1.13.5", "starcoin-chain-notify", - "starcoin-config", + "starcoin-config 1.13.5", "starcoin-crypto", - "starcoin-logger", - "starcoin-service-registry", - "starcoin-types", + "starcoin-logger 1.13.5", + "starcoin-service-registry 1.13.5", + "starcoin-types 1.13.5", "stest", "tempfile", "tokio", @@ -9119,7 +9340,7 @@ name = "starcoin-accumulator" version = "1.13.5" dependencies = [ "anyhow", - "bcs-ext", + "bcs-ext 1.13.5", "byteorder", "itertools", "lru 0.7.8", @@ -9133,7 +9354,26 @@ dependencies = [ "schemars", "serde 1.0.152", "starcoin-crypto", - "starcoin-logger", + "starcoin-logger 1.13.5", +] + +[[package]] +name = "starcoin-accumulator" +version = "1.13.5" +source = "git+https://github.com/starcoinorg/starcoin?rev=fb6fcc48fdcad3d6f54ed97e004e25099ac24000#fb6fcc48fdcad3d6f54ed97e004e25099ac24000" +dependencies = [ + "anyhow", + "bcs-ext 1.13.5 (git+https://github.com/starcoinorg/starcoin?rev=fb6fcc48fdcad3d6f54ed97e004e25099ac24000)", + "byteorder", + "itertools", + "lru 0.7.8", + "mirai-annotations", + "once_cell", + "parking_lot 0.12.1", + "schemars", + "serde 1.0.152", + "starcoin-crypto", + "starcoin-logger 1.13.5 (git+https://github.com/starcoinorg/starcoin?rev=fb6fcc48fdcad3d6f54ed97e004e25099ac24000)", ] [[package]] @@ -9142,25 +9382,25 @@ version = "1.13.5" dependencies = [ "anyhow", "async-trait", - "bcs-ext", + "bcs-ext 1.13.5", "futures 0.3.26", "hex", - "network-api", + "network-api 1.13.5", "starcoin-chain", "starcoin-chain-api", - "starcoin-config", + "starcoin-config 1.13.5", "starcoin-crypto", - "starcoin-logger", - "starcoin-metrics", + "starcoin-logger 1.13.5", + "starcoin-metrics 1.13.5", "starcoin-network", "starcoin-network-rpc-api", - "starcoin-service-registry", + "starcoin-service-registry 1.13.5", "starcoin-sync", "starcoin-sync-api", - "starcoin-time-service", + "starcoin-time-service 1.13.5", "starcoin-txpool", "starcoin-txpool-api", - "starcoin-types", + "starcoin-types 1.13.5", "stest", "tokio", ] @@ -9170,34 +9410,37 @@ name = "starcoin-chain" version = "1.13.5" dependencies = [ "anyhow", - "bcs-ext", + "bcs-ext 1.13.5", "clap 3.2.23", + "consensus", + "consensus-types", + "database", "proptest", "proptest-derive", "rand 0.8.5", "rand_core 0.6.4", - "sp-utils", - "starcoin-account-api", - "starcoin-accumulator", + "sp-utils 1.13.5", + "starcoin-account-api 1.13.5", + "starcoin-accumulator 1.13.5", "starcoin-chain-api", "starcoin-chain-mock", - "starcoin-config", + "starcoin-config 1.13.5", "starcoin-consensus", "starcoin-crypto", "starcoin-executor", "starcoin-genesis", - "starcoin-logger", + "starcoin-logger 1.13.5", "starcoin-open-block", "starcoin-resource-viewer", - "starcoin-service-registry", + "starcoin-service-registry 1.13.5", "starcoin-state-api", "starcoin-statedb", - "starcoin-storage", - "starcoin-time-service", + "starcoin-storage 1.13.5", + "starcoin-time-service 1.13.5", "starcoin-transaction-builder", - "starcoin-types", - "starcoin-vm-types", - "stdlib", + "starcoin-types 1.13.5", + "starcoin-vm-types 1.13.5", + "stdlib 1.13.5", "stest", "test-helper", "thiserror", @@ -9211,18 +9454,18 @@ dependencies = [ "anyhow", "async-trait", "futures 0.3.26", - "network-api", + "network-api 1.13.5", "rand 0.8.5", "rand_core 0.6.4", "serde 1.0.152", - "starcoin-accumulator", + "starcoin-accumulator 1.13.5", "starcoin-crypto", - "starcoin-service-registry", + "starcoin-service-registry 1.13.5", "starcoin-state-api", "starcoin-statedb", - "starcoin-time-service", - "starcoin-types", - "starcoin-vm-types", + "starcoin-time-service 1.13.5", + "starcoin-types 1.13.5", + "starcoin-vm-types 1.13.5", "thiserror", ] @@ -9232,27 +9475,27 @@ version = "1.13.5" dependencies = [ "anyhow", "async-trait", - "bcs-ext", + "bcs-ext 1.13.5", "futures 0.3.26", "futures-timer", "parking_lot 0.12.1", "proptest", "proptest-derive", - "starcoin-account-api", - "starcoin-accumulator", + "starcoin-account-api 1.13.5", + "starcoin-accumulator 1.13.5", "starcoin-chain", - "starcoin-config", + "starcoin-config 1.13.5", "starcoin-consensus", "starcoin-crypto", "starcoin-executor", "starcoin-genesis", - "starcoin-logger", + "starcoin-logger 1.13.5", "starcoin-open-block", "starcoin-state-api", "starcoin-statedb", - "starcoin-storage", - "starcoin-types", - "starcoin-vm-types", + "starcoin-storage 1.13.5", + "starcoin-types 1.13.5", + "starcoin-vm-types 1.13.5", "thiserror", ] @@ -9262,10 +9505,10 @@ version = "1.13.5" dependencies = [ "anyhow", "starcoin-crypto", - "starcoin-logger", - "starcoin-service-registry", - "starcoin-storage", - "starcoin-types", + "starcoin-logger 1.13.5", + "starcoin-service-registry 1.13.5", + "starcoin-storage 1.13.5", + "starcoin-types 1.13.5", ] [[package]] @@ -9280,15 +9523,15 @@ dependencies = [ "serde 1.0.152", "starcoin-chain", "starcoin-chain-api", - "starcoin-config", + "starcoin-config 1.13.5", "starcoin-crypto", - "starcoin-logger", - "starcoin-service-registry", + "starcoin-logger 1.13.5", + "starcoin-service-registry 1.13.5", "starcoin-state-api", - "starcoin-storage", - "starcoin-types", + "starcoin-storage 1.13.5", + "starcoin-types 1.13.5", "starcoin-vm-runtime", - "starcoin-vm-types", + "starcoin-vm-types 1.13.5", "stest", "test-helper", "thiserror", @@ -9300,18 +9543,18 @@ name = "starcoin-cmd" version = "1.13.5" dependencies = [ "anyhow", - "bcs-ext", + "bcs-ext 1.13.5", "clap 3.2.23", "crossbeam-channel", - "forkable-jellyfish-merkle", + "forkable-jellyfish-merkle 1.13.5", "futures 0.3.26", "hex", "itertools", "move-command-line-common", "move-errmapgen", - "network-api", - "network-p2p-types", - "network-types", + "network-api 1.13.5", + "network-p2p-types 1.13.5", + "network-types 1.13.5", "rand 0.8.5", "scmd", "serde 1.0.152", @@ -9319,17 +9562,17 @@ dependencies = [ "starcoin-abi-decoder", "starcoin-abi-resolver", "starcoin-abi-types", - "starcoin-account-api", + "starcoin-account-api 1.13.5", "starcoin-account-provider", "starcoin-chain-api", - "starcoin-config", + "starcoin-config 1.13.5", "starcoin-consensus", "starcoin-crypto", "starcoin-dev", "starcoin-executor", "starcoin-genesis", - "starcoin-logger", - "starcoin-move-compiler", + "starcoin-logger 1.13.5", + "starcoin-move-compiler 1.13.5", "starcoin-move-explain", "starcoin-network-rpc-api", "starcoin-node", @@ -9337,15 +9580,15 @@ dependencies = [ "starcoin-resource-viewer", "starcoin-rpc-api", "starcoin-rpc-client", - "starcoin-service-registry", + "starcoin-service-registry 1.13.5", "starcoin-state-api", "starcoin-sync-api", "starcoin-transaction-builder", "starcoin-txpool-api", - "starcoin-types", + "starcoin-types 1.13.5", "starcoin-vm-runtime", - "starcoin-vm-types", - "stdlib", + "starcoin-vm-types 1.13.5", + "stdlib 1.13.5", "stest", "test-helper", "tokio", @@ -9363,9 +9606,50 @@ dependencies = [ "hex", "libc", "names", - "network-api", - "network-p2p-types", - "network-types", + "network-api 1.13.5", + "network-p2p-types 1.13.5", + "network-types 1.13.5", + "num_cpus", + "num_enum", + "once_cell", + "parking_lot 0.12.1", + "rand 0.8.5", + "rand_core 0.6.4", + "schemars", + "serde 1.0.152", + "serde_json", + "starcoin-account-api 1.13.5", + "starcoin-crypto", + "starcoin-gas 1.13.5", + "starcoin-gas-algebra-ext 1.13.5", + "starcoin-logger 1.13.5", + "starcoin-metrics 1.13.5", + "starcoin-system 1.13.5", + "starcoin-time-service 1.13.5", + "starcoin-types 1.13.5", + "starcoin-uint 1.13.5", + "starcoin-vm-types 1.13.5", + "stdlib 1.13.5", + "tempfile", + "thiserror", + "toml", +] + +[[package]] +name = "starcoin-config" +version = "1.13.5" +source = "git+https://github.com/starcoinorg/starcoin?rev=fb6fcc48fdcad3d6f54ed97e004e25099ac24000#fb6fcc48fdcad3d6f54ed97e004e25099ac24000" +dependencies = [ + "anyhow", + "clap 3.2.23", + "dirs-next", + "git-version", + "hex", + "libc", + "names", + "network-api 1.13.5 (git+https://github.com/starcoinorg/starcoin?rev=fb6fcc48fdcad3d6f54ed97e004e25099ac24000)", + "network-p2p-types 1.13.5 (git+https://github.com/starcoinorg/starcoin?rev=fb6fcc48fdcad3d6f54ed97e004e25099ac24000)", + "network-types 1.13.5 (git+https://github.com/starcoinorg/starcoin?rev=fb6fcc48fdcad3d6f54ed97e004e25099ac24000)", "num_cpus", "num_enum", "once_cell", @@ -9375,18 +9659,18 @@ dependencies = [ "schemars", "serde 1.0.152", "serde_json", - "starcoin-account-api", + "starcoin-account-api 1.13.5 (git+https://github.com/starcoinorg/starcoin?rev=fb6fcc48fdcad3d6f54ed97e004e25099ac24000)", "starcoin-crypto", - "starcoin-gas", - "starcoin-gas-algebra-ext", - "starcoin-logger", - "starcoin-metrics", - "starcoin-system", - "starcoin-time-service", - "starcoin-types", - "starcoin-uint", - "starcoin-vm-types", - "stdlib", + "starcoin-gas 1.13.5 (git+https://github.com/starcoinorg/starcoin?rev=fb6fcc48fdcad3d6f54ed97e004e25099ac24000)", + "starcoin-gas-algebra-ext 1.13.5 (git+https://github.com/starcoinorg/starcoin?rev=fb6fcc48fdcad3d6f54ed97e004e25099ac24000)", + "starcoin-logger 1.13.5 (git+https://github.com/starcoinorg/starcoin?rev=fb6fcc48fdcad3d6f54ed97e004e25099ac24000)", + "starcoin-metrics 1.13.5 (git+https://github.com/starcoinorg/starcoin?rev=fb6fcc48fdcad3d6f54ed97e004e25099ac24000)", + "starcoin-system 1.13.5 (git+https://github.com/starcoinorg/starcoin?rev=fb6fcc48fdcad3d6f54ed97e004e25099ac24000)", + "starcoin-time-service 1.13.5 (git+https://github.com/starcoinorg/starcoin?rev=fb6fcc48fdcad3d6f54ed97e004e25099ac24000)", + "starcoin-types 1.13.5 (git+https://github.com/starcoinorg/starcoin?rev=fb6fcc48fdcad3d6f54ed97e004e25099ac24000)", + "starcoin-uint 1.13.5 (git+https://github.com/starcoinorg/starcoin?rev=fb6fcc48fdcad3d6f54ed97e004e25099ac24000)", + "starcoin-vm-types 1.13.5 (git+https://github.com/starcoinorg/starcoin?rev=fb6fcc48fdcad3d6f54ed97e004e25099ac24000)", + "stdlib 1.13.5 (git+https://github.com/starcoinorg/starcoin?rev=fb6fcc48fdcad3d6f54ed97e004e25099ac24000)", "tempfile", "thiserror", "toml", @@ -9410,11 +9694,11 @@ dependencies = [ "sha3", "starcoin-chain-api", "starcoin-crypto", - "starcoin-logger", + "starcoin-logger 1.13.5", "starcoin-state-api", - "starcoin-time-service", - "starcoin-types", - "starcoin-vm-types", + "starcoin-time-service 1.13.5", + "starcoin-types 1.13.5", + "starcoin-vm-types 1.13.5", "stest", "thiserror", ] @@ -9453,13 +9737,13 @@ name = "starcoin-dataformat-generator" version = "1.13.5" dependencies = [ "anyhow", - "bcs-ext", + "bcs-ext 1.13.5", "serde 1.0.152", "serde-reflection 0.3.2", "serde_yaml", "starcoin-crypto", - "starcoin-types", - "starcoin-vm-types", + "starcoin-types 1.13.5", + "starcoin-vm-types 1.13.5", ] [[package]] @@ -9481,18 +9765,18 @@ name = "starcoin-dev" version = "1.13.5" dependencies = [ "anyhow", - "bcs-ext", + "bcs-ext 1.13.5", "starcoin-abi-decoder", "starcoin-abi-resolver", "starcoin-abi-types", "starcoin-crypto", - "starcoin-logger", + "starcoin-logger 1.13.5", "starcoin-resource-viewer", "starcoin-rpc-api", "starcoin-state-api", "starcoin-statedb", "starcoin-vm-runtime", - "starcoin-vm-types", + "starcoin-vm-types 1.13.5", "thiserror", "vm-status-translator", ] @@ -9502,7 +9786,7 @@ name = "starcoin-executor" version = "1.13.5" dependencies = [ "anyhow", - "bcs-ext", + "bcs-ext 1.13.5", "hex", "log 0.4.17", "move-transactional-test-runner", @@ -9511,20 +9795,20 @@ dependencies = [ "serde_json", "starcoin-abi-resolver", "starcoin-abi-types", - "starcoin-config", + "starcoin-config 1.13.5", "starcoin-consensus", "starcoin-crypto", "starcoin-dev", - "starcoin-logger", + "starcoin-logger 1.13.5", "starcoin-resource-viewer", "starcoin-state-api", "starcoin-state-tree", "starcoin-statedb", "starcoin-transaction-builder", - "starcoin-types", + "starcoin-types 1.13.5", "starcoin-vm-runtime", - "starcoin-vm-types", - "stdlib", + "starcoin-vm-types 1.13.5", + "stdlib 1.13.5", "stest", "tempfile", "test-helper", @@ -9539,19 +9823,19 @@ dependencies = [ "itertools", "rand 0.8.5", "rayon", - "starcoin-accumulator", - "starcoin-config", + "starcoin-accumulator 1.13.5", + "starcoin-config 1.13.5", "starcoin-consensus", "starcoin-crypto", "starcoin-executor", "starcoin-genesis", - "starcoin-logger", + "starcoin-logger 1.13.5", "starcoin-state-api", "starcoin-statedb", - "starcoin-storage", + "starcoin-storage 1.13.5", "starcoin-transaction-builder", - "starcoin-types", - "starcoin-vm-types", + "starcoin-types 1.13.5", + "starcoin-vm-types 1.13.5", ] [[package]] @@ -9568,15 +9852,15 @@ dependencies = [ "rust-embed", "serde 1.0.152", "serde_json", - "starcoin-account-api", - "starcoin-config", + "starcoin-account-api 1.13.5", + "starcoin-config 1.13.5", "starcoin-crypto", "starcoin-executor", - "starcoin-logger", + "starcoin-logger 1.13.5", "starcoin-rpc-client", "starcoin-state-api", "starcoin-transaction-builder", - "starcoin-types", + "starcoin-types 1.13.5", "tiny_http", "tokio", "tokio-executor 0.2.0-alpha.6", @@ -9605,14 +9889,45 @@ dependencies = [ "move-stdlib", "move-table-extension", "move-vm-types", - "starcoin-gas-algebra-ext", - "starcoin-logger", - "starcoin-natives", + "starcoin-gas-algebra-ext 1.13.5", + "starcoin-logger 1.13.5", + "starcoin-natives 1.13.5", +] + +[[package]] +name = "starcoin-gas" +version = "1.13.5" +source = "git+https://github.com/starcoinorg/starcoin?rev=fb6fcc48fdcad3d6f54ed97e004e25099ac24000#fb6fcc48fdcad3d6f54ed97e004e25099ac24000" +dependencies = [ + "clap 3.2.23", + "move-binary-format", + "move-core-types", + "move-stdlib", + "move-table-extension", + "move-vm-types", + "starcoin-gas-algebra-ext 1.13.5 (git+https://github.com/starcoinorg/starcoin?rev=fb6fcc48fdcad3d6f54ed97e004e25099ac24000)", + "starcoin-logger 1.13.5 (git+https://github.com/starcoinorg/starcoin?rev=fb6fcc48fdcad3d6f54ed97e004e25099ac24000)", + "starcoin-natives 1.13.5 (git+https://github.com/starcoinorg/starcoin?rev=fb6fcc48fdcad3d6f54ed97e004e25099ac24000)", +] + +[[package]] +name = "starcoin-gas-algebra-ext" +version = "1.13.5" +dependencies = [ + "move-binary-format", + "move-core-types", + "move-stdlib", + "move-table-extension", + "move-vm-test-utils", + "move-vm-types", + "serde 1.0.152", + "starcoin-natives 1.13.5", ] [[package]] name = "starcoin-gas-algebra-ext" version = "1.13.5" +source = "git+https://github.com/starcoinorg/starcoin?rev=fb6fcc48fdcad3d6f54ed97e004e25099ac24000#fb6fcc48fdcad3d6f54ed97e004e25099ac24000" dependencies = [ "move-binary-format", "move-core-types", @@ -9621,7 +9936,7 @@ dependencies = [ "move-vm-test-utils", "move-vm-types", "serde 1.0.152", - "starcoin-natives", + "starcoin-natives 1.13.5 (git+https://github.com/starcoinorg/starcoin?rev=fb6fcc48fdcad3d6f54ed97e004e25099ac24000)", ] [[package]] @@ -9637,15 +9952,15 @@ dependencies = [ "serde 1.0.152", "serde_json", "starcoin-account", - "starcoin-account-api", + "starcoin-account-api 1.13.5", "starcoin-chain", "starcoin-chain-mock", - "starcoin-config", + "starcoin-config 1.13.5", "starcoin-crypto", "starcoin-genesis", - "starcoin-logger", - "starcoin-storage", - "starcoin-types", + "starcoin-logger 1.13.5", + "starcoin-storage 1.13.5", + "starcoin-types 1.13.5", ] [[package]] @@ -9653,26 +9968,26 @@ name = "starcoin-genesis" version = "1.13.5" dependencies = [ "anyhow", - "bcs-ext", + "bcs-ext 1.13.5", "clap 3.2.23", "include_dir", "once_cell", "serde 1.0.152", - "starcoin-accumulator", + "starcoin-accumulator 1.13.5", "starcoin-chain", - "starcoin-config", + "starcoin-config 1.13.5", "starcoin-consensus", "starcoin-crypto", "starcoin-executor", - "starcoin-logger", + "starcoin-logger 1.13.5", "starcoin-state-api", "starcoin-statedb", - "starcoin-storage", + "starcoin-storage 1.13.5", "starcoin-transaction-builder", - "starcoin-types", + "starcoin-types 1.13.5", "starcoin-vm-runtime", - "starcoin-vm-types", - "stdlib", + "starcoin-vm-types 1.13.5", + "stdlib 1.13.5", "stest", "thiserror", ] @@ -9691,9 +10006,9 @@ dependencies = [ "serde 1.0.152", "serde_json", "starcoin-crypto", - "starcoin-logger", + "starcoin-logger 1.13.5", "starcoin-rpc-api", - "starcoin-types", + "starcoin-types 1.13.5", "tokio", ] @@ -9716,6 +10031,26 @@ dependencies = [ "slog-term", ] +[[package]] +name = "starcoin-logger" +version = "1.13.5" +source = "git+https://github.com/starcoinorg/starcoin?rev=fb6fcc48fdcad3d6f54ed97e004e25099ac24000#fb6fcc48fdcad3d6f54ed97e004e25099ac24000" +dependencies = [ + "anyhow", + "arc-swap", + "chrono", + "lazy_static 1.4.0", + "log 0.4.17", + "log4rs", + "once_cell", + "parking_lot 0.12.1", + "schemars", + "serde 1.0.152", + "slog", + "slog-async", + "slog-term", +] + [[package]] name = "starcoin-metrics" version = "1.13.5" @@ -9726,8 +10061,21 @@ dependencies = [ "prometheus", "psutil", "serde_json", - "starcoin-logger", - "timeout-join-handler", + "starcoin-logger 1.13.5", + "timeout-join-handler 1.13.5", +] + +[[package]] +name = "starcoin-metrics" +version = "1.13.5" +source = "git+https://github.com/starcoinorg/starcoin?rev=fb6fcc48fdcad3d6f54ed97e004e25099ac24000#fb6fcc48fdcad3d6f54ed97e004e25099ac24000" +dependencies = [ + "anyhow", + "prometheus", + "psutil", + "serde_json", + "starcoin-logger 1.13.5 (git+https://github.com/starcoinorg/starcoin?rev=fb6fcc48fdcad3d6f54ed97e004e25099ac24000)", + "timeout-join-handler 1.13.5 (git+https://github.com/starcoinorg/starcoin?rev=fb6fcc48fdcad3d6f54ed97e004e25099ac24000)", ] [[package]] @@ -9735,39 +10083,39 @@ name = "starcoin-miner" version = "1.13.5" dependencies = [ "anyhow", - "bcs-ext", + "bcs-ext 1.13.5", "futures 0.3.26", "futures-timer", "hex", "once_cell", "parking_lot 0.12.1", "serde 1.0.152", - "starcoin-account-api", + "starcoin-account-api 1.13.5", "starcoin-account-service", - "starcoin-accumulator", + "starcoin-accumulator 1.13.5", "starcoin-chain", - "starcoin-config", + "starcoin-config 1.13.5", "starcoin-consensus", "starcoin-crypto", "starcoin-executor", "starcoin-genesis", - "starcoin-logger", - "starcoin-metrics", + "starcoin-logger 1.13.5", + "starcoin-metrics 1.13.5", "starcoin-network-rpc", "starcoin-network-rpc-api", "starcoin-node", "starcoin-open-block", - "starcoin-service-registry", + "starcoin-service-registry 1.13.5", "starcoin-state-api", "starcoin-state-service", - "starcoin-storage", + "starcoin-storage 1.13.5", "starcoin-sync", "starcoin-sync-api", - "starcoin-time-service", + "starcoin-time-service 1.13.5", "starcoin-txpool", "starcoin-txpool-api", - "starcoin-types", - "starcoin-vm-types", + "starcoin-types 1.13.5", + "starcoin-vm-types 1.13.5", "stest", "test-helper", "thiserror", @@ -9800,18 +10148,18 @@ dependencies = [ "rust-argon2", "serde 1.0.152", "serde_json", - "starcoin-config", + "starcoin-config 1.13.5", "starcoin-consensus", "starcoin-crypto", - "starcoin-logger", + "starcoin-logger 1.13.5", "starcoin-miner", "starcoin-miner-client-api", "starcoin-rpc-api", "starcoin-rpc-client", - "starcoin-service-registry", + "starcoin-service-registry 1.13.5", "starcoin-stratum", - "starcoin-time-service", - "starcoin-types", + "starcoin-time-service 1.13.5", + "starcoin-types 1.13.5", "stest", "thiserror", "tokio", @@ -9825,7 +10173,7 @@ dependencies = [ "async-trait", "dyn-clone", "futures 0.3.26", - "starcoin-types", + "starcoin-types 1.13.5", ] [[package]] @@ -9840,21 +10188,40 @@ dependencies = [ "petgraph 0.5.1", "regex", "starcoin-crypto", - "starcoin-logger", - "starcoin-vm-types", + "starcoin-logger 1.13.5", + "starcoin-vm-types 1.13.5", "stest", "tempfile", "walkdir", ] +[[package]] +name = "starcoin-move-compiler" +version = "1.13.5" +source = "git+https://github.com/starcoinorg/starcoin?rev=fb6fcc48fdcad3d6f54ed97e004e25099ac24000#fb6fcc48fdcad3d6f54ed97e004e25099ac24000" +dependencies = [ + "anyhow", + "move-binary-format", + "move-command-line-common", + "move-compiler", + "once_cell", + "petgraph 0.5.1", + "regex", + "starcoin-crypto", + "starcoin-logger 1.13.5 (git+https://github.com/starcoinorg/starcoin?rev=fb6fcc48fdcad3d6f54ed97e004e25099ac24000)", + "starcoin-vm-types 1.13.5 (git+https://github.com/starcoinorg/starcoin?rev=fb6fcc48fdcad3d6f54ed97e004e25099ac24000)", + "tempfile", + "walkdir", +] + [[package]] name = "starcoin-move-explain" version = "1.13.5" dependencies = [ - "bcs-ext", + "bcs-ext 1.13.5", "clap 3.2.23", "move-core-types", - "stdlib", + "stdlib 1.13.5", ] [[package]] @@ -9907,7 +10274,33 @@ dependencies = [ "ripemd160", "smallvec 1.10.0", "starcoin-crypto", - "starcoin-uint", + "starcoin-uint 1.13.5", + "tiny-keccak", + "walkdir", +] + +[[package]] +name = "starcoin-natives" +version = "1.13.5" +source = "git+https://github.com/starcoinorg/starcoin?rev=fb6fcc48fdcad3d6f54ed97e004e25099ac24000#fb6fcc48fdcad3d6f54ed97e004e25099ac24000" +dependencies = [ + "arrayref", + "libsecp256k1", + "log 0.4.17", + "move-binary-format", + "move-command-line-common", + "move-core-types", + "move-docgen", + "move-errmapgen", + "move-prover", + "move-stdlib", + "move-vm-runtime", + "move-vm-types", + "num_enum", + "ripemd160", + "smallvec 1.10.0", + "starcoin-crypto", + "starcoin-uint 1.13.5 (git+https://github.com/starcoinorg/starcoin?rev=fb6fcc48fdcad3d6f54ed97e004e25099ac24000)", "tiny-keccak", "walkdir", ] @@ -9919,7 +10312,7 @@ dependencies = [ "anyhow", "async-std", "async-trait", - "bcs-ext", + "bcs-ext 1.13.5", "bitflags", "bytes 1.4.0", "derive_more", @@ -9929,26 +10322,26 @@ dependencies = [ "hex", "log 0.4.17", "lru 0.7.8", - "network-api", + "network-api 1.13.5", "network-p2p", "network-p2p-core", - "network-p2p-types", + "network-p2p-types 1.13.5", "parking_lot 0.12.1", "prometheus", "rand 0.8.5", - "sc-peerset", + "sc-peerset 1.13.5", "serde 1.0.152", "serde_json", - "starcoin-config", + "starcoin-config 1.13.5", "starcoin-crypto", - "starcoin-logger", - "starcoin-metrics", + "starcoin-logger 1.13.5", + "starcoin-metrics 1.13.5", "starcoin-network-rpc", "starcoin-network-rpc-api", - "starcoin-service-registry", - "starcoin-storage", + "starcoin-service-registry 1.13.5", + "starcoin-storage 1.13.5", "starcoin-txpool-api", - "starcoin-types", + "starcoin-types 1.13.5", "stest", "tempfile", "test-helper", @@ -9962,41 +10355,41 @@ version = "1.13.5" dependencies = [ "anyhow", "api-limiter", - "bcs-ext", + "bcs-ext 1.13.5", "bytes 1.4.0", "futures 0.3.26", "futures-timer", "hex", - "network-api", + "network-api 1.13.5", "network-p2p-core", "network-p2p-derive", - "network-p2p-types", + "network-p2p-types 1.13.5", "once_cell", "prometheus", "serde 1.0.152", "serde_json", - "starcoin-account-api", - "starcoin-accumulator", + "starcoin-account-api 1.13.5", + "starcoin-accumulator 1.13.5", "starcoin-block-relayer", "starcoin-chain-service", - "starcoin-config", + "starcoin-config 1.13.5", "starcoin-consensus", "starcoin-crypto", - "starcoin-logger", + "starcoin-logger 1.13.5", "starcoin-miner", "starcoin-network", "starcoin-network-rpc-api", "starcoin-node", - "starcoin-service-registry", + "starcoin-service-registry 1.13.5", "starcoin-state-api", "starcoin-state-service", "starcoin-state-tree", "starcoin-statedb", - "starcoin-storage", + "starcoin-storage 1.13.5", "starcoin-txpool", "starcoin-txpool-api", - "starcoin-types", - "starcoin-vm-types", + "starcoin-types 1.13.5", + "starcoin-vm-types 1.13.5", "stest", "test-helper", "tokio", @@ -10007,21 +10400,22 @@ name = "starcoin-network-rpc-api" version = "1.13.5" dependencies = [ "anyhow", - "bcs-ext", + "bcs-ext 1.13.5", + "consensus-types", "futures 0.3.26", "network-p2p-core", "network-p2p-derive", - "network-p2p-types", - "network-types", + "network-p2p-types 1.13.5", + "network-types 1.13.5", "once_cell", "serde 1.0.152", - "starcoin-accumulator", + "starcoin-accumulator 1.13.5", "starcoin-crypto", - "starcoin-logger", + "starcoin-logger 1.13.5", "starcoin-state-api", "starcoin-state-tree", - "starcoin-types", - "starcoin-vm-types", + "starcoin-types 1.13.5", + "starcoin-vm-types 1.13.5", ] [[package]] @@ -10037,22 +10431,22 @@ dependencies = [ "chrono", "futures 0.3.26", "futures-timer", - "network-api", + "network-api 1.13.5", "network-p2p-core", "serde_json", - "starcoin-account-api", + "starcoin-account-api 1.13.5", "starcoin-account-service", "starcoin-block-relayer", "starcoin-chain-notify", "starcoin-chain-service", - "starcoin-config", + "starcoin-config 1.13.5", "starcoin-consensus", "starcoin-crypto", "starcoin-dev", "starcoin-executor", "starcoin-genesis", - "starcoin-logger", - "starcoin-metrics", + "starcoin-logger 1.13.5", + "starcoin-metrics 1.13.5", "starcoin-miner", "starcoin-miner-client", "starcoin-network", @@ -10061,21 +10455,21 @@ dependencies = [ "starcoin-node-api", "starcoin-rpc-client", "starcoin-rpc-server", - "starcoin-service-registry", + "starcoin-service-registry 1.13.5", "starcoin-state-api", "starcoin-state-service", "starcoin-statedb", - "starcoin-storage", + "starcoin-storage 1.13.5", "starcoin-stratum", "starcoin-sync", "starcoin-sync-api", "starcoin-txpool", "starcoin-txpool-api", - "starcoin-types", + "starcoin-types 1.13.5", "starcoin-vm-runtime", "stest", "thiserror", - "timeout-join-handler", + "timeout-join-handler 1.13.5", "tokio", ] @@ -10088,14 +10482,14 @@ dependencies = [ "backtrace", "futures 0.3.26", "serde 1.0.152", - "starcoin-config", + "starcoin-config 1.13.5", "starcoin-consensus", "starcoin-crypto", "starcoin-genesis", - "starcoin-logger", - "starcoin-service-registry", - "starcoin-storage", - "starcoin-types", + "starcoin-logger 1.13.5", + "starcoin-service-registry 1.13.5", + "starcoin-storage 1.13.5", + "starcoin-types 1.13.5", "stest", "thiserror", ] @@ -10106,19 +10500,19 @@ version = "1.13.5" dependencies = [ "anyhow", "async-trait", - "bcs-ext", + "bcs-ext 1.13.5", "futures 0.3.26", "parking_lot 0.12.1", - "starcoin-accumulator", + "starcoin-accumulator 1.13.5", "starcoin-chain-api", "starcoin-crypto", "starcoin-executor", "starcoin-genesis", - "starcoin-logger", + "starcoin-logger 1.13.5", "starcoin-state-api", "starcoin-statedb", - "starcoin-storage", - "starcoin-types", + "starcoin-storage 1.13.5", + "starcoin-types 1.13.5", "stest", ] @@ -10128,19 +10522,19 @@ version = "1.13.5" dependencies = [ "anyhow", "async-std", - "bcs-ext", + "bcs-ext 1.13.5", "clap 3.2.23", "futures 0.3.26", "network-p2p", - "network-p2p-types", - "network-types", - "starcoin-config", + "network-p2p-types 1.13.5", + "network-types 1.13.5", + "starcoin-config 1.13.5", "starcoin-crypto", "starcoin-genesis", - "starcoin-logger", + "starcoin-logger 1.13.5", "starcoin-network", - "starcoin-storage", - "starcoin-types", + "starcoin-storage 1.13.5", + "starcoin-types 1.13.5", ] [[package]] @@ -10149,14 +10543,14 @@ version = "1.13.5" dependencies = [ "anyhow", "clap 3.2.23", - "sp-utils", + "sp-utils 1.13.5", "starcoin-chain", - "starcoin-config", + "starcoin-config 1.13.5", "starcoin-genesis", - "starcoin-logger", - "starcoin-storage", - "starcoin-types", - "starcoin-vm-types", + "starcoin-logger 1.13.5", + "starcoin-storage 1.13.5", + "starcoin-types 1.13.5", + "starcoin-vm-types 1.13.5", ] [[package]] @@ -10169,7 +10563,7 @@ dependencies = [ "move-core-types", "serde 1.0.152", "serde_json", - "starcoin-vm-types", + "starcoin-vm-types 1.13.5", ] [[package]] @@ -10178,7 +10572,7 @@ version = "1.13.5" dependencies = [ "anyhow", "async-trait", - "bcs-ext", + "bcs-ext 1.13.5", "clap 3.2.23", "futures 0.3.26", "hex", @@ -10188,9 +10582,9 @@ dependencies = [ "jsonrpc-pubsub", "jsonrpc-server-utils", "move-core-types", - "network-api", - "network-p2p-types", - "network-types", + "network-api 1.13.5", + "network-p2p-types 1.13.5", + "network-types 1.13.5", "openrpc-derive", "openrpc-schema", "schemars", @@ -10199,19 +10593,19 @@ dependencies = [ "serde_json", "starcoin-abi-decoder", "starcoin-abi-types", - "starcoin-account-api", - "starcoin-accumulator", + "starcoin-account-api 1.13.5", + "starcoin-accumulator 1.13.5", "starcoin-chain-api", - "starcoin-config", + "starcoin-config 1.13.5", "starcoin-crypto", - "starcoin-logger", + "starcoin-logger 1.13.5", "starcoin-resource-viewer", - "starcoin-service-registry", + "starcoin-service-registry 1.13.5", "starcoin-state-api", "starcoin-sync-api", "starcoin-txpool-api", - "starcoin-types", - "starcoin-vm-types", + "starcoin-types 1.13.5", + "starcoin-vm-types 1.13.5", "thiserror", "vm-status-translator", ] @@ -10224,7 +10618,7 @@ dependencies = [ "actix-rt", "anyhow", "async-std", - "bcs-ext", + "bcs-ext 1.13.5", "futures 0.3.26", "futures-timer", "hex", @@ -10235,27 +10629,27 @@ dependencies = [ "jsonrpc-pubsub", "jsonrpc-server-utils", "log 0.4.17", - "network-api", - "network-p2p-types", - "network-types", + "network-api 1.13.5", + "network-p2p-types 1.13.5", + "network-types 1.13.5", "parity-tokio-ipc", "parking_lot 0.12.1", "serde 1.0.152", "serde_json", "starcoin-abi-types", - "starcoin-account-api", - "starcoin-config", + "starcoin-account-api 1.13.5", + "starcoin-config 1.13.5", "starcoin-crypto", - "starcoin-logger", + "starcoin-logger 1.13.5", "starcoin-rpc-api", "starcoin-rpc-server", - "starcoin-service-registry", + "starcoin-service-registry 1.13.5", "starcoin-state-api", "starcoin-state-tree", "starcoin-sync-api", "starcoin-txpool-api", - "starcoin-types", - "starcoin-vm-types", + "starcoin-types 1.13.5", + "starcoin-vm-types 1.13.5", "stest", "test-helper", "thiserror", @@ -10272,9 +10666,9 @@ dependencies = [ "once_cell", "rand 0.8.5", "serde_json", - "starcoin-config", - "starcoin-logger", - "starcoin-metrics", + "starcoin-config 1.13.5", + "starcoin-logger 1.13.5", + "starcoin-metrics 1.13.5", "starcoin-rpc-api", "stest", "thiserror", @@ -10289,7 +10683,7 @@ dependencies = [ "anyhow", "api-limiter", "bcs", - "bcs-ext", + "bcs-ext 1.13.5", "dashmap", "futures 0.3.26", "futures-channel", @@ -10305,30 +10699,30 @@ dependencies = [ "jsonrpc-tcp-server", "jsonrpc-ws-server", "log 0.4.17", - "network-api", + "network-api 1.13.5", "network-p2p-core", - "network-p2p-types", - "network-types", + "network-p2p-types 1.13.5", + "network-types 1.13.5", "parking_lot 0.12.1", "serde 1.0.152", "serde_json", "starcoin-abi-decoder", "starcoin-abi-resolver", "starcoin-abi-types", - "starcoin-account-api", + "starcoin-account-api 1.13.5", "starcoin-account-service", "starcoin-chain", "starcoin-chain-mock", "starcoin-chain-notify", "starcoin-chain-service", - "starcoin-config", + "starcoin-config 1.13.5", "starcoin-consensus", "starcoin-crypto", "starcoin-dev", "starcoin-executor", "starcoin-genesis", - "starcoin-logger", - "starcoin-metrics", + "starcoin-logger 1.13.5", + "starcoin-metrics 1.13.5", "starcoin-miner", "starcoin-network", "starcoin-node-api", @@ -10336,19 +10730,19 @@ dependencies = [ "starcoin-rpc-api", "starcoin-rpc-client", "starcoin-rpc-middleware", - "starcoin-service-registry", + "starcoin-service-registry 1.13.5", "starcoin-state-api", "starcoin-state-service", "starcoin-state-tree", "starcoin-statedb", - "starcoin-storage", + "starcoin-storage 1.13.5", "starcoin-sync-api", "starcoin-transaction-builder", "starcoin-txpool", "starcoin-txpool-api", "starcoin-txpool-mock-service", - "starcoin-types", - "starcoin-vm-types", + "starcoin-types 1.13.5", + "starcoin-vm-types 1.13.5", "stest", "test-helper", "thiserror", @@ -10376,21 +10770,40 @@ dependencies = [ "tokio", ] +[[package]] +name = "starcoin-service-registry" +version = "1.13.5" +source = "git+https://github.com/starcoinorg/starcoin?rev=fb6fcc48fdcad3d6f54ed97e004e25099ac24000#fb6fcc48fdcad3d6f54ed97e004e25099ac24000" +dependencies = [ + "actix", + "actix-rt", + "anyhow", + "async-trait", + "futures 0.3.26", + "futures-timer", + "log 0.4.17", + "once_cell", + "schemars", + "serde 1.0.152", + "thiserror", + "tokio", +] + [[package]] name = "starcoin-state-api" version = "1.13.5" dependencies = [ "anyhow", "async-trait", - "bcs-ext", - "forkable-jellyfish-merkle", + "bcs-ext 1.13.5", + "forkable-jellyfish-merkle 1.13.5", "once_cell", "serde 1.0.152", "starcoin-crypto", - "starcoin-service-registry", + "starcoin-service-registry 1.13.5", "starcoin-state-tree", - "starcoin-types", - "starcoin-vm-types", + "starcoin-types 1.13.5", + "starcoin-vm-types 1.13.5", ] [[package]] @@ -10400,16 +10813,16 @@ dependencies = [ "anyhow", "async-trait", "futures 0.3.26", - "starcoin-config", + "starcoin-config 1.13.5", "starcoin-crypto", - "starcoin-logger", - "starcoin-service-registry", + "starcoin-logger 1.13.5", + "starcoin-service-registry 1.13.5", "starcoin-state-api", "starcoin-state-tree", "starcoin-statedb", - "starcoin-storage", - "starcoin-types", - "starcoin-vm-types", + "starcoin-storage 1.13.5", + "starcoin-types 1.13.5", + "starcoin-vm-types 1.13.5", "stest", "test-helper", "tokio", @@ -10420,7 +10833,18 @@ name = "starcoin-state-store-api" version = "1.13.5" dependencies = [ "anyhow", - "forkable-jellyfish-merkle", + "forkable-jellyfish-merkle 1.13.5", + "serde 1.0.152", + "starcoin-crypto", +] + +[[package]] +name = "starcoin-state-store-api" +version = "1.13.5" +source = "git+https://github.com/starcoinorg/starcoin?rev=fb6fcc48fdcad3d6f54ed97e004e25099ac24000#fb6fcc48fdcad3d6f54ed97e004e25099ac24000" +dependencies = [ + "anyhow", + "forkable-jellyfish-merkle 1.13.5 (git+https://github.com/starcoinorg/starcoin?rev=fb6fcc48fdcad3d6f54ed97e004e25099ac24000)", "serde 1.0.152", "starcoin-crypto", ] @@ -10430,17 +10854,17 @@ name = "starcoin-state-tree" version = "1.13.5" dependencies = [ "anyhow", - "bcs-ext", - "forkable-jellyfish-merkle", + "bcs-ext 1.13.5", + "forkable-jellyfish-merkle 1.13.5", "parking_lot 0.12.1", "serde 1.0.152", - "starcoin-config", + "starcoin-config 1.13.5", "starcoin-crypto", - "starcoin-logger", - "starcoin-state-store-api", - "starcoin-storage", - "starcoin-types", - "starcoin-vm-types", + "starcoin-logger 1.13.5", + "starcoin-state-store-api 1.13.5", + "starcoin-storage 1.13.5", + "starcoin-types 1.13.5", + "starcoin-vm-types 1.13.5", ] [[package]] @@ -10448,17 +10872,17 @@ name = "starcoin-statedb" version = "1.13.5" dependencies = [ "anyhow", - "bcs-ext", - "forkable-jellyfish-merkle", + "bcs-ext 1.13.5", + "forkable-jellyfish-merkle 1.13.5", "lru 0.7.8", "parking_lot 0.12.1", "serde 1.0.152", "starcoin-crypto", - "starcoin-logger", + "starcoin-logger 1.13.5", "starcoin-state-api", "starcoin-state-tree", - "starcoin-types", - "starcoin-vm-types", + "starcoin-types 1.13.5", + "starcoin-vm-types 1.13.5", "thiserror", ] @@ -10467,14 +10891,14 @@ name = "starcoin-storage" version = "1.13.5" dependencies = [ "anyhow", - "bcs-ext", + "bcs-ext 1.13.5", "byteorder", "chrono", "coarsetime", - "forkable-jellyfish-merkle", + "forkable-jellyfish-merkle 1.13.5", "lru 0.7.8", - "network-p2p-types", - "network-types", + "network-p2p-types 1.13.5", + "network-types 1.13.5", "num_enum", "once_cell", "parking_lot 0.12.1", @@ -10483,19 +10907,50 @@ dependencies = [ "rand 0.8.5", "rocksdb", "serde 1.0.152", - "starcoin-accumulator", - "starcoin-config", + "starcoin-accumulator 1.13.5", + "starcoin-config 1.13.5", "starcoin-crypto", - "starcoin-logger", - "starcoin-metrics", - "starcoin-state-store-api", - "starcoin-types", - "starcoin-uint", - "starcoin-vm-types", + "starcoin-logger 1.13.5", + "starcoin-metrics 1.13.5", + "starcoin-state-store-api 1.13.5", + "starcoin-types 1.13.5", + "starcoin-uint 1.13.5", + "starcoin-vm-types 1.13.5", "stest", "thiserror", ] +[[package]] +name = "starcoin-storage" +version = "1.13.5" +source = "git+https://github.com/starcoinorg/starcoin?rev=fb6fcc48fdcad3d6f54ed97e004e25099ac24000#fb6fcc48fdcad3d6f54ed97e004e25099ac24000" +dependencies = [ + "anyhow", + "bcs-ext 1.13.5 (git+https://github.com/starcoinorg/starcoin?rev=fb6fcc48fdcad3d6f54ed97e004e25099ac24000)", + "byteorder", + "chrono", + "coarsetime", + "forkable-jellyfish-merkle 1.13.5 (git+https://github.com/starcoinorg/starcoin?rev=fb6fcc48fdcad3d6f54ed97e004e25099ac24000)", + "lru 0.7.8", + "network-p2p-types 1.13.5 (git+https://github.com/starcoinorg/starcoin?rev=fb6fcc48fdcad3d6f54ed97e004e25099ac24000)", + "network-types 1.13.5 (git+https://github.com/starcoinorg/starcoin?rev=fb6fcc48fdcad3d6f54ed97e004e25099ac24000)", + "num_enum", + "once_cell", + "parking_lot 0.12.1", + "rocksdb", + "serde 1.0.152", + "starcoin-accumulator 1.13.5 (git+https://github.com/starcoinorg/starcoin?rev=fb6fcc48fdcad3d6f54ed97e004e25099ac24000)", + "starcoin-config 1.13.5 (git+https://github.com/starcoinorg/starcoin?rev=fb6fcc48fdcad3d6f54ed97e004e25099ac24000)", + "starcoin-crypto", + "starcoin-logger 1.13.5 (git+https://github.com/starcoinorg/starcoin?rev=fb6fcc48fdcad3d6f54ed97e004e25099ac24000)", + "starcoin-metrics 1.13.5 (git+https://github.com/starcoinorg/starcoin?rev=fb6fcc48fdcad3d6f54ed97e004e25099ac24000)", + "starcoin-state-store-api 1.13.5 (git+https://github.com/starcoinorg/starcoin?rev=fb6fcc48fdcad3d6f54ed97e004e25099ac24000)", + "starcoin-types 1.13.5 (git+https://github.com/starcoinorg/starcoin?rev=fb6fcc48fdcad3d6f54ed97e004e25099ac24000)", + "starcoin-uint 1.13.5 (git+https://github.com/starcoinorg/starcoin?rev=fb6fcc48fdcad3d6f54ed97e004e25099ac24000)", + "starcoin-vm-types 1.13.5 (git+https://github.com/starcoinorg/starcoin?rev=fb6fcc48fdcad3d6f54ed97e004e25099ac24000)", + "thiserror", +] + [[package]] name = "starcoin-stratum" version = "1.13.5" @@ -10511,12 +10966,12 @@ dependencies = [ "jsonrpc-tcp-server", "serde 1.0.152", "serde_json", - "starcoin-config", + "starcoin-config 1.13.5", "starcoin-crypto", - "starcoin-logger", + "starcoin-logger 1.13.5", "starcoin-miner", - "starcoin-service-registry", - "starcoin-types", + "starcoin-service-registry 1.13.5", + "starcoin-types 1.13.5", "stest", ] @@ -10527,52 +10982,52 @@ dependencies = [ "anyhow", "async-std", "async-trait", - "bcs-ext", - "forkable-jellyfish-merkle", + "bcs-ext 1.13.5", + "forkable-jellyfish-merkle 1.13.5", "futures 0.3.26", "futures-retry", "futures-timer", "hex", "itertools", - "network-api", + "network-api 1.13.5", "network-p2p-core", "parking_lot 0.12.1", "pin-project 0.4.30", "pin-utils", "rand 0.8.5", - "starcoin-account-api", - "starcoin-accumulator", + "starcoin-account-api 1.13.5", + "starcoin-accumulator 1.13.5", "starcoin-block-relayer", "starcoin-chain", "starcoin-chain-api", "starcoin-chain-mock", "starcoin-chain-service", - "starcoin-config", + "starcoin-config 1.13.5", "starcoin-consensus", "starcoin-crypto", "starcoin-executor", "starcoin-genesis", - "starcoin-logger", - "starcoin-metrics", + "starcoin-logger 1.13.5", + "starcoin-metrics 1.13.5", "starcoin-miner", "starcoin-network", "starcoin-network-rpc", "starcoin-network-rpc-api", "starcoin-node", - "starcoin-service-registry", + "starcoin-service-registry 1.13.5", "starcoin-state-api", "starcoin-state-service", "starcoin-state-tree", "starcoin-statedb", - "starcoin-storage", + "starcoin-storage 1.13.5", "starcoin-sync-api", - "starcoin-time-service", + "starcoin-time-service 1.13.5", "starcoin-transaction-builder", "starcoin-txpool", "starcoin-txpool-api", "starcoin-txpool-mock-service", - "starcoin-types", - "starcoin-vm-types", + "starcoin-types 1.13.5", + "starcoin-vm-types 1.13.5", "stest", "stream-task", "sysinfo", @@ -10587,14 +11042,14 @@ version = "1.13.5" dependencies = [ "anyhow", "async-trait", - "network-api", + "network-api 1.13.5", "schemars", "serde 1.0.152", - "starcoin-accumulator", + "starcoin-accumulator 1.13.5", "starcoin-crypto", - "starcoin-logger", - "starcoin-service-registry", - "starcoin-types", + "starcoin-logger 1.13.5", + "starcoin-service-registry 1.13.5", + "starcoin-types 1.13.5", "stream-task", ] @@ -10606,9 +11061,27 @@ dependencies = [ "systemstat", ] +[[package]] +name = "starcoin-system" +version = "1.13.5" +source = "git+https://github.com/starcoinorg/starcoin?rev=fb6fcc48fdcad3d6f54ed97e004e25099ac24000#fb6fcc48fdcad3d6f54ed97e004e25099ac24000" +dependencies = [ + "anyhow", + "systemstat", +] + +[[package]] +name = "starcoin-time-service" +version = "1.13.5" +dependencies = [ + "log 0.4.17", + "serde 1.0.152", +] + [[package]] name = "starcoin-time-service" version = "1.13.5" +source = "git+https://github.com/starcoinorg/starcoin?rev=fb6fcc48fdcad3d6f54ed97e004e25099ac24000#fb6fcc48fdcad3d6f54ed97e004e25099ac24000" dependencies = [ "log 0.4.17", "serde 1.0.152", @@ -10619,12 +11092,12 @@ name = "starcoin-transaction-builder" version = "1.13.5" dependencies = [ "anyhow", - "bcs-ext", - "starcoin-config", + "bcs-ext 1.13.5", + "starcoin-config 1.13.5", "starcoin-crypto", - "starcoin-logger", - "starcoin-vm-types", - "stdlib", + "starcoin-logger 1.13.5", + "starcoin-vm-types 1.13.5", + "stdlib 1.13.5", "stest", ] @@ -10635,7 +11108,7 @@ dependencies = [ "anyhow", "async-trait", "bcs", - "bcs-ext", + "bcs-ext 1.13.5", "clap 3.2.23", "dashmap", "datatest-stable 0.1.3", @@ -10660,9 +11133,9 @@ dependencies = [ "serde 1.0.152", "serde_json", "starcoin-abi-decoder", - "starcoin-accumulator", + "starcoin-accumulator 1.13.5", "starcoin-chain-api", - "starcoin-config", + "starcoin-config 1.13.5", "starcoin-crypto", "starcoin-dev", "starcoin-genesis", @@ -10673,11 +11146,11 @@ dependencies = [ "starcoin-state-api", "starcoin-state-tree", "starcoin-statedb", - "starcoin-storage", - "starcoin-types", + "starcoin-storage 1.13.5", + "starcoin-types 1.13.5", "starcoin-vm-runtime", - "starcoin-vm-types", - "stdlib", + "starcoin-vm-types 1.13.5", + "stdlib 1.13.5", "tempfile", "tokio", ] @@ -10690,16 +11163,16 @@ dependencies = [ "clap 3.2.23", "ctrlc", "futures 0.3.26", - "starcoin-account-api", - "starcoin-config", + "starcoin-account-api 1.13.5", + "starcoin-config 1.13.5", "starcoin-crypto", "starcoin-executor", - "starcoin-logger", + "starcoin-logger 1.13.5", "starcoin-rpc-api", "starcoin-rpc-client", "starcoin-state-api", "starcoin-transaction-builder", - "starcoin-types", + "starcoin-types 1.13.5", "tokio", ] @@ -10709,12 +11182,12 @@ version = "1.13.5" dependencies = [ "anyhow", "async-trait", - "forkable-jellyfish-merkle", + "forkable-jellyfish-merkle 1.13.5", "futures 0.3.26", "futures-channel", "linked-hash-map", "log 0.4.17", - "network-api", + "network-api 1.13.5", "parking_lot 0.12.1", "proptest", "proptest-derive", @@ -10722,22 +11195,22 @@ dependencies = [ "rand_core 0.6.4", "serde 1.0.152", "serde_derive", - "starcoin-config", + "starcoin-config 1.13.5", "starcoin-crypto", "starcoin-executor", "starcoin-genesis", - "starcoin-logger", - "starcoin-metrics", + "starcoin-logger 1.13.5", + "starcoin-metrics 1.13.5", "starcoin-open-block", - "starcoin-service-registry", + "starcoin-service-registry 1.13.5", "starcoin-state-api", "starcoin-state-tree", "starcoin-statedb", - "starcoin-storage", + "starcoin-storage 1.13.5", "starcoin-transaction-builder", "starcoin-txpool-api", - "starcoin-types", - "stdlib", + "starcoin-types 1.13.5", + "stdlib 1.13.5", "stest", "tempfile", "test-helper", @@ -10756,7 +11229,7 @@ dependencies = [ "schemars", "serde 1.0.152", "starcoin-crypto", - "starcoin-types", + "starcoin-types 1.13.5", ] [[package]] @@ -10768,7 +11241,7 @@ dependencies = [ "futures-channel", "starcoin-crypto", "starcoin-txpool-api", - "starcoin-types", + "starcoin-types 1.13.5", "stest", "tokio", ] @@ -10778,10 +11251,11 @@ name = "starcoin-types" version = "1.13.5" dependencies = [ "anyhow", - "bcs-ext", + "bcs-ext 1.13.5", "byteorder", "bytes 1.4.0", - "forkable-jellyfish-merkle", + "consensus-types", + "forkable-jellyfish-merkle 1.13.5", "hex", "num_enum", "proptest", @@ -10791,10 +11265,34 @@ dependencies = [ "schemars", "serde 1.0.152", "serde_json", - "starcoin-accumulator", + "starcoin-accumulator 1.13.5", "starcoin-crypto", - "starcoin-uint", - "starcoin-vm-types", + "starcoin-uint 1.13.5", + "starcoin-vm-types 1.13.5", + "thiserror", +] + +[[package]] +name = "starcoin-types" +version = "1.13.5" +source = "git+https://github.com/starcoinorg/starcoin?rev=fb6fcc48fdcad3d6f54ed97e004e25099ac24000#fb6fcc48fdcad3d6f54ed97e004e25099ac24000" +dependencies = [ + "anyhow", + "bcs-ext 1.13.5 (git+https://github.com/starcoinorg/starcoin?rev=fb6fcc48fdcad3d6f54ed97e004e25099ac24000)", + "byteorder", + "bytes 1.4.0", + "forkable-jellyfish-merkle 1.13.5 (git+https://github.com/starcoinorg/starcoin?rev=fb6fcc48fdcad3d6f54ed97e004e25099ac24000)", + "hex", + "num_enum", + "rand 0.8.5", + "rand_core 0.6.4", + "schemars", + "serde 1.0.152", + "serde_json", + "starcoin-accumulator 1.13.5 (git+https://github.com/starcoinorg/starcoin?rev=fb6fcc48fdcad3d6f54ed97e004e25099ac24000)", + "starcoin-crypto", + "starcoin-uint 1.13.5 (git+https://github.com/starcoinorg/starcoin?rev=fb6fcc48fdcad3d6f54ed97e004e25099ac24000)", + "starcoin-vm-types 1.13.5 (git+https://github.com/starcoinorg/starcoin?rev=fb6fcc48fdcad3d6f54ed97e004e25099ac24000)", "thiserror", ] @@ -10802,12 +11300,23 @@ dependencies = [ name = "starcoin-uint" version = "1.13.5" dependencies = [ - "bcs-ext", + "bcs-ext 1.13.5", "hex", "serde 1.0.152", "serde_json", "starcoin-crypto", - "starcoin-types", + "starcoin-types 1.13.5", + "uint 0.9.5", +] + +[[package]] +name = "starcoin-uint" +version = "1.13.5" +source = "git+https://github.com/starcoinorg/starcoin?rev=fb6fcc48fdcad3d6f54ed97e004e25099ac24000#fb6fcc48fdcad3d6f54ed97e004e25099ac24000" +dependencies = [ + "hex", + "serde 1.0.152", + "starcoin-crypto", "uint 0.9.5", ] @@ -10816,7 +11325,7 @@ name = "starcoin-vm-runtime" version = "1.13.5" dependencies = [ "anyhow", - "bcs-ext", + "bcs-ext 1.13.5", "move-core-types", "move-stdlib", "move-table-extension", @@ -10826,16 +11335,16 @@ dependencies = [ "rand 0.8.5", "rand_core 0.6.4", "serde 1.0.152", - "starcoin-config", + "starcoin-config 1.13.5", "starcoin-crypto", - "starcoin-gas", - "starcoin-gas-algebra-ext", - "starcoin-logger", - "starcoin-metrics", - "starcoin-natives", - "starcoin-types", - "starcoin-vm-types", - "stdlib", + "starcoin-gas 1.13.5", + "starcoin-gas-algebra-ext 1.13.5", + "starcoin-logger 1.13.5", + "starcoin-metrics 1.13.5", + "starcoin-natives 1.13.5", + "starcoin-types 1.13.5", + "starcoin-vm-types 1.13.5", + "stdlib 1.13.5", "tracing", ] @@ -10844,10 +11353,10 @@ name = "starcoin-vm-types" version = "1.13.5" dependencies = [ "anyhow", - "bcs-ext", + "bcs-ext 1.13.5", "bech32", "chrono", - "forkable-jellyfish-merkle", + "forkable-jellyfish-merkle 1.13.5", "hex", "log 0.4.17", "mirai-annotations", @@ -10866,10 +11375,41 @@ dependencies = [ "serde 1.0.152", "serde_bytes", "serde_json", - "starcoin-accumulator", + "starcoin-accumulator 1.13.5", "starcoin-crypto", - "starcoin-gas-algebra-ext", - "starcoin-time-service", + "starcoin-gas-algebra-ext 1.13.5", + "starcoin-time-service 1.13.5", +] + +[[package]] +name = "starcoin-vm-types" +version = "1.13.5" +source = "git+https://github.com/starcoinorg/starcoin?rev=fb6fcc48fdcad3d6f54ed97e004e25099ac24000#fb6fcc48fdcad3d6f54ed97e004e25099ac24000" +dependencies = [ + "anyhow", + "bcs-ext 1.13.5 (git+https://github.com/starcoinorg/starcoin?rev=fb6fcc48fdcad3d6f54ed97e004e25099ac24000)", + "bech32", + "chrono", + "forkable-jellyfish-merkle 1.13.5 (git+https://github.com/starcoinorg/starcoin?rev=fb6fcc48fdcad3d6f54ed97e004e25099ac24000)", + "hex", + "log 0.4.17", + "mirai-annotations", + "move-binary-format", + "move-bytecode-verifier", + "move-core-types", + "move-ir-types", + "move-table-extension", + "move-vm-types", + "num_enum", + "once_cell", + "rand 0.8.5", + "schemars", + "serde 1.0.152", + "serde_bytes", + "serde_json", + "starcoin-accumulator 1.13.5 (git+https://github.com/starcoinorg/starcoin?rev=fb6fcc48fdcad3d6f54ed97e004e25099ac24000)", + "starcoin-crypto", + "starcoin-gas-algebra-ext 1.13.5 (git+https://github.com/starcoinorg/starcoin?rev=fb6fcc48fdcad3d6f54ed97e004e25099ac24000)", ] [[package]] @@ -10883,7 +11423,7 @@ name = "stdlib" version = "1.13.5" dependencies = [ "anyhow", - "bcs-ext", + "bcs-ext 1.13.5", "clap 3.2.23", "datatest-stable 0.1.3", "fs_extra", @@ -10899,8 +11439,35 @@ dependencies = [ "simplelog", "starcoin-crypto", "starcoin-framework", - "starcoin-move-compiler", - "starcoin-vm-types", + "starcoin-move-compiler 1.13.5", + "starcoin-vm-types 1.13.5", + "tempfile", + "walkdir", +] + +[[package]] +name = "stdlib" +version = "1.13.5" +source = "git+https://github.com/starcoinorg/starcoin?rev=fb6fcc48fdcad3d6f54ed97e004e25099ac24000#fb6fcc48fdcad3d6f54ed97e004e25099ac24000" +dependencies = [ + "anyhow", + "bcs-ext 1.13.5 (git+https://github.com/starcoinorg/starcoin?rev=fb6fcc48fdcad3d6f54ed97e004e25099ac24000)", + "clap 3.2.23", + "fs_extra", + "include_dir", + "itertools", + "log 0.4.17", + "move-bytecode-verifier", + "move-compiler", + "move-prover", + "once_cell", + "serde 1.0.152", + "sha2 0.10.6", + "simplelog", + "starcoin-crypto", + "starcoin-framework", + "starcoin-move-compiler 1.13.5 (git+https://github.com/starcoinorg/starcoin?rev=fb6fcc48fdcad3d6f54ed97e004e25099ac24000)", + "starcoin-vm-types 1.13.5 (git+https://github.com/starcoinorg/starcoin?rev=fb6fcc48fdcad3d6f54ed97e004e25099ac24000)", "tempfile", "walkdir", ] @@ -10914,9 +11481,9 @@ dependencies = [ "anyhow", "futures 0.3.26", "log 0.4.17", - "starcoin-logger", + "starcoin-logger 1.13.5", "stest-macro", - "timeout-join-handler", + "timeout-join-handler 1.13.5", "tokio", ] @@ -10959,7 +11526,7 @@ dependencies = [ "pin-utils", "schemars", "serde 1.0.152", - "starcoin-logger", + "starcoin-logger 1.13.5", "stest", "thiserror", "tokio", @@ -11303,51 +11870,51 @@ dependencies = [ "anyhow", "async-trait", "backtrace", - "bcs-ext", + "bcs-ext 1.13.5", "futures 0.3.26", "futures-timer", "hex", "move-ir-compiler", - "network-api", + "network-api 1.13.5", "network-p2p-core", - "network-p2p-types", + "network-p2p-types 1.13.5", "rand 0.8.5", "serde 1.0.152", "serde_json", - "starcoin-account-api", + "starcoin-account-api 1.13.5", "starcoin-account-service", "starcoin-block-relayer", "starcoin-chain", "starcoin-chain-notify", - "starcoin-config", + "starcoin-config 1.13.5", "starcoin-consensus", "starcoin-crypto", "starcoin-dev", "starcoin-executor", "starcoin-genesis", - "starcoin-logger", - "starcoin-metrics", + "starcoin-logger 1.13.5", + "starcoin-metrics 1.13.5", "starcoin-miner", - "starcoin-move-compiler", + "starcoin-move-compiler 1.13.5", "starcoin-network", "starcoin-network-rpc", "starcoin-network-rpc-api", "starcoin-node", "starcoin-node-api", "starcoin-rpc-server", - "starcoin-service-registry", + "starcoin-service-registry 1.13.5", "starcoin-state-api", "starcoin-state-service", "starcoin-statedb", - "starcoin-storage", + "starcoin-storage 1.13.5", "starcoin-sync", "starcoin-sync-api", "starcoin-transaction-builder", "starcoin-txpool", "starcoin-txpool-api", - "starcoin-types", - "starcoin-vm-types", - "stdlib", + "starcoin-types 1.13.5", + "starcoin-vm-types 1.13.5", + "stdlib 1.13.5", "stest", "thiserror", "tokio", @@ -11367,15 +11934,15 @@ dependencies = [ "serde 1.0.152", "serde_bytes", "serde_json", - "starcoin-account-api", + "starcoin-account-api 1.13.5", "starcoin-account-provider", "starcoin-chain", "starcoin-cmd", - "starcoin-config", + "starcoin-config 1.13.5", "starcoin-consensus", "starcoin-crypto", "starcoin-executor", - "starcoin-logger", + "starcoin-logger 1.13.5", "starcoin-miner", "starcoin-network", "starcoin-node", @@ -11383,12 +11950,12 @@ dependencies = [ "starcoin-rpc-client", "starcoin-rpc-server", "starcoin-state-api", - "starcoin-storage", + "starcoin-storage 1.13.5", "starcoin-transaction-builder", "starcoin-txpool", - "starcoin-types", + "starcoin-types 1.13.5", "starcoin-vm-runtime", - "starcoin-vm-types", + "starcoin-vm-types 1.13.5", ] [[package]] @@ -11525,6 +12092,14 @@ dependencies = [ "thiserror", ] +[[package]] +name = "timeout-join-handler" +version = "1.13.5" +source = "git+https://github.com/starcoinorg/starcoin?rev=fb6fcc48fdcad3d6f54ed97e004e25099ac24000#fb6fcc48fdcad3d6f54ed97e004e25099ac24000" +dependencies = [ + "thiserror", +] + [[package]] name = "tint" version = "1.0.1" @@ -11857,7 +12432,7 @@ dependencies = [ "serde-generate", "serde-reflection 0.3.2", "serde_yaml", - "starcoin-vm-types", + "starcoin-vm-types 1.13.5", "tempfile", "textwrap 0.14.2", "which", @@ -12297,7 +12872,7 @@ dependencies = [ "schemars", "serde 1.0.152", "starcoin-move-explain", - "starcoin-vm-types", + "starcoin-vm-types 1.13.5", ] [[package]] diff --git a/Cargo.toml b/Cargo.toml index be08e8be86..d191bcef88 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -513,6 +513,11 @@ walkdir = "2.3.1" wasm-timer = "0.2" which = "4.1.0" zeroize = "1.3.0" +database = { git = "https://github.com/starcoinorg/smolstc", rev = "167d700a0f99ba929cd6d156dac77859306f32da" } +consensus = { git = "https://github.com/starcoinorg/smolstc", rev = "167d700a0f99ba929cd6d156dac77859306f32da" } +ghostdag = { git = "https://github.com/starcoinorg/smolstc", rev = "167d700a0f99ba929cd6d156dac77859306f32da" } +reachability = { git = "https://github.com/starcoinorg/smolstc", rev = "167d700a0f99ba929cd6d156dac77859306f32da" } +consensus-types = { git = "https://github.com/starcoinorg/smolstc", rev = "167d700a0f99ba929cd6d156dac77859306f32da" } [profile.release.package] starcoin-service-registry.debug = 1 diff --git a/chain/Cargo.toml b/chain/Cargo.toml index 5fd1e9acf6..f3921e1ccb 100644 --- a/chain/Cargo.toml +++ b/chain/Cargo.toml @@ -23,6 +23,9 @@ starcoin-types = { package = "starcoin-types", workspace = true } starcoin-vm-types = { workspace = true } starcoin-storage = { workspace = true } thiserror = { workspace = true } +database = { workspace = true } +consensus = { workspace = true } +consensus-types = { workspace = true } [dev-dependencies] proptest = { workspace = true } @@ -39,6 +42,9 @@ stdlib = { workspace = true } stest = { workspace = true } test-helper = { workspace = true } tokio = { features = ["full"], workspace = true } +database = { workspace = true } +consensus = { workspace = true } +consensus-types = { workspace = true } [features] default = [] diff --git a/chain/api/src/message.rs b/chain/api/src/message.rs index d4144fe9a0..b6541f5920 100644 --- a/chain/api/src/message.rs +++ b/chain/api/src/message.rs @@ -60,6 +60,10 @@ pub enum ChainRequest { access_path: Option, }, GetBlockInfos(Vec), + GetDagAccumulatorLeaves { + start_index: u64, + batch_size: u64, + } } impl ServiceRequest for ChainRequest { diff --git a/chain/service/src/chain_service.rs b/chain/service/src/chain_service.rs index f7b32799d1..7933b91715 100644 --- a/chain/service/src/chain_service.rs +++ b/chain/service/src/chain_service.rs @@ -3,6 +3,7 @@ use anyhow::{format_err, Error, Result}; use starcoin_chain::BlockChain; +use starcoin_chain::dag_chain::DagBlockChain; use starcoin_chain_api::message::{ChainRequest, ChainResponse}; use starcoin_chain_api::{ ChainReader, ChainWriter, ReadableChainService, TransactionInfoWithProof, @@ -32,6 +33,8 @@ use std::sync::Arc; /// A Chain reader service to provider Reader API. pub struct ChainReaderService { inner: ChainReaderServiceInner, + + // dag_chain: DagBlockChain, } impl ChainReaderService { @@ -43,6 +46,7 @@ impl ChainReaderService { ) -> Result { Ok(Self { inner: ChainReaderServiceInner::new(config, startup_info, storage, vm_metrics)?, + // dag_chain: DagBlockChain::new(config, storage, vm_metrics)?, }) } } @@ -232,6 +236,8 @@ impl ServiceHandler for ChainReaderService { ChainRequest::GetBlockInfos(ids) => Ok(ChainResponse::BlockInfoVec(Box::new( self.inner.get_block_infos(ids)?, ))), + _ => todo!() + // ChainRequest::GetDagAccumulatorLeaves(start_index, batch_size) => Ok(ChainResponse::HashValue(self.dag_)), } } } diff --git a/chain/src/lib.rs b/chain/src/lib.rs index 70c73faab9..ae60e05dd2 100644 --- a/chain/src/lib.rs +++ b/chain/src/lib.rs @@ -2,6 +2,7 @@ // SPDX-License-Identifier: Apache-2.0 #![deny(clippy::integer_arithmetic)] mod chain; +pub mod dag_chain; pub mod verifier; pub use chain::BlockChain; pub use starcoin_chain_api::{ChainReader, ChainWriter}; diff --git a/network-rpc/api/Cargo.toml b/network-rpc/api/Cargo.toml index d49fa1e612..2fe399c8a0 100644 --- a/network-rpc/api/Cargo.toml +++ b/network-rpc/api/Cargo.toml @@ -15,6 +15,7 @@ starcoin-state-api = { workspace = true } starcoin-state-tree = { workspace = true } starcoin-types = { workspace = true } starcoin-vm-types = { workspace = true } +consensus-types = { workspace = true } [package] authors = { workspace = true } diff --git a/network-rpc/api/src/lib.rs b/network-rpc/api/src/lib.rs index 8188b72826..7dfb73bfe5 100644 --- a/network-rpc/api/src/lib.rs +++ b/network-rpc/api/src/lib.rs @@ -20,6 +20,7 @@ use starcoin_types::account_state::AccountState; use starcoin_types::block::{Block, BlockHeader, BlockInfo, BlockNumber}; use starcoin_types::transaction::{SignedUserTransaction, Transaction, TransactionInfo}; +pub mod dag_protocol; mod remote_chain_state; pub use network_p2p_core::RawRpcClient; @@ -286,6 +287,22 @@ pub trait NetworkRpc: Sized + Send + Sync + 'static { peer_id: PeerId, request: GetStateWithTableItemProof, ) -> BoxFuture>; + + fn get_dag_accumulator_leaves( + &self, + peer_id: PeerId, + req: dag_protocol::GetDagAccumulatorLeaves, + ) -> BoxFuture>>; + fn get_accumulator_leaf_detail( + &self, + peer_id: PeerId, + req: dag_protocol::GetTargetDagAccumulatorLeafDetail, + ) -> BoxFuture>>>; + fn get_dag_block_info( + &self, + peer_id: PeerId, + req: dag_protocol::GetSyncDagBlockInfo, + ) -> BoxFuture>>>; } #[derive(Debug, Serialize, Deserialize, Clone)] diff --git a/network-rpc/src/rpc.rs b/network-rpc/src/rpc.rs index e91c2f760d..7aca558e92 100644 --- a/network-rpc/src/rpc.rs +++ b/network-rpc/src/rpc.rs @@ -13,7 +13,7 @@ use starcoin_network_rpc_api::{ gen_server, BlockBody, GetAccountState, GetAccumulatorNodeByNodeHash, GetBlockHeadersByNumber, GetBlockIds, GetStateWithProof, GetStateWithTableItemProof, GetTxnsWithHash, GetTxnsWithSize, Ping, RpcRequest, MAX_BLOCK_HEADER_REQUEST_SIZE, MAX_BLOCK_INFO_REQUEST_SIZE, - MAX_BLOCK_REQUEST_SIZE, MAX_TXN_REQUEST_SIZE, + MAX_BLOCK_REQUEST_SIZE, MAX_TXN_REQUEST_SIZE, dag_protocol, }; use starcoin_service_registry::ServiceRef; use starcoin_state_api::{ChainStateAsyncService, StateWithProof, StateWithTableItemProof}; @@ -306,4 +306,28 @@ impl gen_server::NetworkRpc for NetworkRpcImpl { }; Box::pin(fut) } + + fn get_dag_accumulator_leaves( + &self, + peer_id: PeerId, + req: dag_protocol::GetDagAccumulatorLeaves, + ) -> BoxFuture>> { + todo!() + } + + fn get_accumulator_leaf_detail( + &self, + peer_id: PeerId, + req: dag_protocol::GetTargetDagAccumulatorLeafDetail, + ) -> BoxFuture>>> { + todo!() + } + + fn get_dag_block_info( + &self, + peer_id: PeerId, + req: dag_protocol::GetSyncDagBlockInfo, + ) -> BoxFuture>>> { + todo!() + } } diff --git a/storage/src/chain_info/mod.rs b/storage/src/chain_info/mod.rs index 3f193be3f0..c83ce383ff 100644 --- a/storage/src/chain_info/mod.rs +++ b/storage/src/chain_info/mod.rs @@ -28,6 +28,22 @@ impl ChainInfoStorage { const STORAGE_VERSION_KEY: &'static str = "storage_version"; const SNAPSHOT_RANGE_KEY: &'static str = "snapshot_height"; const BARNARD_HARD_FORK: &'static str = "barnard_hard_fork"; + const FLEXI_DAG_STARTUP_INFO_KEY: &'static str = "flexi_dag_startup_info"; + + pub fn get_flexi_dag_startup_info(&self) -> Result> { + self.get(Self::FLEXI_DAG_STARTUP_INFO_KEY.as_bytes()) + .and_then(|bytes| match bytes { + Some(bytes) => Ok(Some(bytes.try_into()?)), + None => Ok(None), + }) + } + + pub fn save_flexi_dag_startup_info(&self, startup_info: StartupInfo) -> Result<()> { + self.put_sync( + Self::FLEXI_DAG_STARTUP_INFO_KEY.as_bytes().to_vec(), + startup_info.try_into()?, + ) + } pub fn get_startup_info(&self) -> Result> { self.get(Self::STARTUP_INFO_KEY.as_bytes()) diff --git a/storage/src/lib.rs b/storage/src/lib.rs index 07031ddc7b..8fd446d9c4 100644 --- a/storage/src/lib.rs +++ b/storage/src/lib.rs @@ -195,6 +195,11 @@ impl StorageVersion { } } +pub trait DagBlockStore { + fn get_flexi_dag_startup_info(&self) -> Result>; + fn save_flexi_dag_startup_info(&self, startup_info: StartupInfo) -> Result<()>; +} + pub trait BlockStore { fn get_startup_info(&self) -> Result>; fn save_startup_info(&self, startup_info: StartupInfo) -> Result<()>; @@ -377,6 +382,16 @@ impl Debug for Storage { } } +impl DagBlockStore for Storage { + fn get_flexi_dag_startup_info(&self) -> Result> { + self.chain_info_storage.get_flexi_dag_startup_info() + } + + fn save_flexi_dag_startup_info(&self, startup_info: StartupInfo) -> Result<()> { + self.chain_info_storage.save_flexi_dag_startup_info(startup_info) + } +} + impl BlockStore for Storage { fn get_startup_info(&self) -> Result> { self.chain_info_storage.get_startup_info() @@ -621,6 +636,7 @@ impl SyncFlexiDagStore for Storage { /// Chain storage define pub trait Store: StateNodeStore + + DagBlockStore + BlockStore + BlockInfoStore + TransactionStore diff --git a/storage/src/upgrade.rs b/storage/src/upgrade.rs index ecf2b323b1..ac27e111f9 100644 --- a/storage/src/upgrade.rs +++ b/storage/src/upgrade.rs @@ -164,7 +164,6 @@ impl DBUpgrade { } fn db_upgrade_v3_v4(_instance: &mut StorageInstance) -> Result<()> { - // https://github.com/facebook/rocksdb/issues/1295 Ok(()) } diff --git a/types/Cargo.toml b/types/Cargo.toml index 4e1301a8fb..67656e3387 100644 --- a/types/Cargo.toml +++ b/types/Cargo.toml @@ -18,6 +18,7 @@ starcoin-crypto = { workspace = true } starcoin-uint = { workspace = true } starcoin-vm-types = { workspace = true } thiserror = { workspace = true } +consensus-types = { workspace = true } [features] default = [] diff --git a/types/src/lib.rs b/types/src/lib.rs index ec49aa8bed..67ac309280 100644 --- a/types/src/lib.rs +++ b/types/src/lib.rs @@ -24,6 +24,7 @@ pub mod account_state; #[allow(clippy::too_many_arguments)] pub mod block; pub mod compact_block; +pub mod dag_block; pub mod block_metadata { pub use starcoin_vm_types::block_metadata::BlockMetadata; diff --git a/vm/types/src/lib.rs b/vm/types/src/lib.rs index ea86f45141..79775d65b9 100644 --- a/vm/types/src/lib.rs +++ b/vm/types/src/lib.rs @@ -6,6 +6,7 @@ mod language_storage_ext; pub mod account_address; pub mod gas_schedule; +pub mod dag_block_metadata; pub mod location { pub use move_ir_types::location::Loc; } From 2ecb3e0ea650a60a2bc5789f933956856af2fd86 Mon Sep 17 00:00:00 2001 From: jackzhhuang Date: Tue, 25 Jul 2023 18:22:46 +0800 Subject: [PATCH 05/30] add dag block --- chain/src/dag_chain.rs | 60 ++ network-rpc/api/src/dag_protocol.rs | 46 ++ types/src/dag_block.rs | 945 ++++++++++++++++++++++++++++ vm/types/src/dag_block_metadata.rs | 146 +++++ 4 files changed, 1197 insertions(+) create mode 100644 chain/src/dag_chain.rs create mode 100644 network-rpc/api/src/dag_protocol.rs create mode 100644 types/src/dag_block.rs create mode 100644 vm/types/src/dag_block_metadata.rs diff --git a/chain/src/dag_chain.rs b/chain/src/dag_chain.rs new file mode 100644 index 0000000000..143c7b0020 --- /dev/null +++ b/chain/src/dag_chain.rs @@ -0,0 +1,60 @@ +use std::sync::Arc; + +use consensus::blockdag::BlockDAG; +use consensus_types::{blockhash::ORIGIN, header::Header}; +use database::prelude::{FlexiDagStorageConfig, FlexiDagStorage}; +use starcoin_accumulator::MerkleAccumulator; +use starcoin_config::NodeConfig; +use starcoin_crypto::HashValue; +use starcoin_executor::VMMetrics; +use starcoin_storage::{flexi_dag::{SyncFlexiDagSnapshotStorage, SyncFlexiDagStorage}, Store}; +use starcoin_types::{block::BlockHeader, dag_block::DagBlockHeader}; + + + +pub struct DagBlockChain { + dag: Option, + dag_block_accumulator: MerkleAccumulator, + accumulator_snapshot: Arc, +} + + +impl DagBlockChain { + pub fn new( + config: Arc, + storage: Arc, + vm_metrics: Option, + ) -> anyhow::Result { + todo!() + // initialize the dag + // let db_path = config.storage.dir(); + // let config = FlexiDagStorageConfig::create_with_params(1, 0, 1024); + // let db = FlexiDagStorage::create_from_path(db_path, config)?; + // let dag = BlockDAG::new(Header::new(DagBlockHeader::random(), vec![HashValue::new(ORIGIN)]), 16, db); + + // // initialize the block accumulator + // let sync_flexi_dag_store = Arc::new(SyncFlexiDagStorage::new(storage,)?); + // let startup_info = match storage.get_flexi_dag_startup_info()? { + // Some(startup_info) => startup_info, + // None => { + // return Ok(Self { + // dag: Some(dag), + // dag_block_accumulator: MerkleAccumulator::new_empty(sy), + // accumulator_snapshot: sync_flexi_dag_store.get_accumulator_storage(), + // }) + // } + // }; + + // let accmulator_info = sync_flexi_dag_store.get_snapshot_storage().get(startup_info.main); + + // Ok(Self { + // dag: Some(dag), + // dag_block_accumulator: MerkleAccumulator::new_with_info(accmulator_info, sync_flexi_dag_store.get_accumulator_storage()), + // accumulator_snapshot: Arc::new(SyncFlexiDagSnapshotStorage::new( + // storage, + // )?), + // }) + } + + +} \ No newline at end of file diff --git a/network-rpc/api/src/dag_protocol.rs b/network-rpc/api/src/dag_protocol.rs new file mode 100644 index 0000000000..792ae6b477 --- /dev/null +++ b/network-rpc/api/src/dag_protocol.rs @@ -0,0 +1,46 @@ +use consensus_types::header::Header; +use serde::{Deserialize, Serialize}; +use starcoin_accumulator::accumulator_info::AccumulatorInfo; +use starcoin_crypto::HashValue; + +#[derive(Clone, Debug, Hash, Eq, PartialOrd, Ord, PartialEq, Serialize, Deserialize)] +pub struct RelationshipPair { + pub parent: HashValue, + pub child: HashValue, +} + +#[derive(Debug, Serialize, Deserialize, Eq, PartialEq, Clone)] +pub struct GetDagAccumulatorLeaves { + pub accumulator_leaf_index: u64, + pub batch_size: u64, +} + +#[derive(Debug, Serialize, Deserialize, Eq, PartialEq, Clone)] +pub struct TargetDagAccumulatorLeaf { + pub accumulator_root: HashValue, // accumulator info root + pub leaf_index: u64, +} + +#[derive(Debug, Serialize, Deserialize, Eq, PartialEq, Clone)] +pub struct GetTargetDagAccumulatorLeafDetail { + pub leaf_index: u64, + pub batch_size: u64, +} + +#[derive(Debug, Serialize, Deserialize, Eq, PartialEq, Clone)] +pub struct TargetDagAccumulatorLeafDetail { + pub accumulator_root: HashValue, + pub relationship_pair: Vec, +} + +#[derive(Debug, Serialize, Deserialize, Clone)] +pub struct GetSyncDagBlockInfo { + pub leaf_index: u64, + pub batch_size: u64, +} + +#[derive(Debug, Serialize, Deserialize, Clone)] +pub struct SyncDagBlockInfo { + pub block_headers: Vec
, + pub accumulator_info: AccumulatorInfo, +} diff --git a/types/src/dag_block.rs b/types/src/dag_block.rs new file mode 100644 index 0000000000..40abaf2383 --- /dev/null +++ b/types/src/dag_block.rs @@ -0,0 +1,945 @@ +// Copyright (c) The Starcoin Core Contributors +// SPDX-License-Identifier: Apache-2.0 + +use crate::account_address::AccountAddress; +use crate::block::BlockHeaderExtra; +use crate::block_metadata::BlockMetadata; +use crate::genesis_config::{ChainId, ConsensusStrategy}; +use crate::language_storage::CORE_CODE_ADDRESS; +use crate::transaction::SignedUserTransaction; +use crate::U256; +use bcs_ext::Sample; +use consensus_types::blockhash::ORIGIN; +use schemars::{self, JsonSchema}; +use serde::{Deserialize, Deserializer, Serialize}; +pub use starcoin_accumulator::accumulator_info::AccumulatorInfo; +use starcoin_crypto::hash::{ACCUMULATOR_PLACEHOLDER_HASH, SPARSE_MERKLE_PLACEHOLDER_HASH}; +use starcoin_crypto::{ + hash::{CryptoHash, CryptoHasher, PlainCryptoHash}, + HashValue, +}; +use starcoin_vm_types::account_config::genesis_address; +use starcoin_vm_types::dag_block_metadata::DagBlockMetadata; +use starcoin_vm_types::transaction::authenticator::AuthenticationKey; +use std::fmt::Formatter; + +/// block timestamp allowed future times +pub const ALLOWED_FUTURE_BLOCKTIME: u64 = 30000; // 30 second; + +#[derive(Clone, Debug, Hash, Eq, PartialEq, Serialize, CryptoHasher, CryptoHash, JsonSchema)] +pub struct DagBlockHeader { + #[serde(skip)] + id: Option, + /// Parent hash. + parent_hash: Vec, + /// Block timestamp. + timestamp: u64, + /// Block author. + author: AccountAddress, + /// Block author auth key. + /// this field is deprecated + author_auth_key: Option, + /// The transaction accumulator root hash after executing this block. + txn_accumulator_root: HashValue, + /// The parent block info's block accumulator root hash. + block_accumulator_root: HashValue, + /// The last transaction state_root of this block after execute. + state_root: HashValue, + /// Gas used for contracts execution. + gas_used: u64, + /// Block difficulty + #[schemars(with = "String")] + difficulty: U256, + /// hash for block body + body_hash: HashValue, + /// The chain id + chain_id: ChainId, + /// Consensus nonce field. + nonce: u32, + /// block header extra + extra: BlockHeaderExtra, +} + +impl DagBlockHeader { + pub fn new( + parent_hash: Vec, + timestamp: u64, + author: AccountAddress, + txn_accumulator_root: HashValue, + block_accumulator_root: HashValue, + state_root: HashValue, + gas_used: u64, + difficulty: U256, + body_hash: HashValue, + chain_id: ChainId, + nonce: u32, + extra: BlockHeaderExtra, + ) -> DagBlockHeader { + Self::new_with_auth_key( + parent_hash, + timestamp, + author, + None, + txn_accumulator_root, + block_accumulator_root, + state_root, + gas_used, + difficulty, + body_hash, + chain_id, + nonce, + extra, + ) + } + + // the author_auth_key field is deprecated, but keep this fn for compat with old block. + fn new_with_auth_key( + parent_hash: Vec, + timestamp: u64, + author: AccountAddress, + author_auth_key: Option, + txn_accumulator_root: HashValue, + block_accumulator_root: HashValue, + state_root: HashValue, + gas_used: u64, + difficulty: U256, + body_hash: HashValue, + chain_id: ChainId, + nonce: u32, + extra: BlockHeaderExtra, + ) -> DagBlockHeader { + let mut header = DagBlockHeader { + id: None, + parent_hash, + block_accumulator_root, + timestamp, + author, + author_auth_key, + txn_accumulator_root, + state_root, + gas_used, + difficulty, + nonce, + body_hash, + chain_id, + extra, + }; + header.id = Some(header.crypto_hash()); + header + } + + pub fn as_pow_header_blob(&self) -> Vec { + let mut blob = Vec::new(); + let raw_header: RawDagBlockHeader = self.to_owned().into(); + let raw_header_hash = raw_header.crypto_hash(); + let mut diff = [0u8; 32]; + raw_header.difficulty.to_big_endian(&mut diff); + let extend_and_nonce = [0u8; 12]; + blob.extend_from_slice(raw_header_hash.to_vec().as_slice()); + blob.extend_from_slice(&extend_and_nonce); + blob.extend_from_slice(&diff); + blob + } + + pub fn id(&self) -> HashValue { + self.id.expect("DagBlockHeader id should be Some after init.") + } + + pub fn parent_hash(&self) -> Vec { + self.parent_hash.clone() + } + + pub fn timestamp(&self) -> u64 { + self.timestamp + } + + pub fn author(&self) -> AccountAddress { + self.author + } + + pub fn author_auth_key(&self) -> Option { + self.author_auth_key + } + + pub fn txn_accumulator_root(&self) -> HashValue { + self.txn_accumulator_root + } + + pub fn state_root(&self) -> HashValue { + self.state_root + } + + pub fn gas_used(&self) -> u64 { + self.gas_used + } + + pub fn nonce(&self) -> u32 { + self.nonce + } + + pub fn difficulty(&self) -> U256 { + self.difficulty + } + + pub fn block_accumulator_root(&self) -> HashValue { + self.block_accumulator_root + } + + pub fn body_hash(&self) -> HashValue { + self.body_hash + } + + pub fn chain_id(&self) -> ChainId { + self.chain_id + } + + pub fn extra(&self) -> &BlockHeaderExtra { + &self.extra + } + + pub fn is_genesis(&self) -> bool { + if self.parent_hash.len() == 1 { + return self.parent_hash[0] == HashValue::new(ORIGIN); + } + return false; + } + + pub fn genesis_block_header( + parent_hash: Vec, + timestamp: u64, + txn_accumulator_root: HashValue, + state_root: HashValue, + difficulty: U256, + body_hash: HashValue, + chain_id: ChainId, + ) -> Self { + Self::new( + parent_hash, + timestamp, + CORE_CODE_ADDRESS, + txn_accumulator_root, + *ACCUMULATOR_PLACEHOLDER_HASH, + state_root, + 0, + difficulty, + body_hash, + chain_id, + 0, + BlockHeaderExtra::default(), + ) + } + + pub fn random() -> Self { + Self::new( + vec![HashValue::random()], + rand::random(), + AccountAddress::random(), + HashValue::random(), + HashValue::random(), + HashValue::random(), + rand::random(), + U256::max_value(), + HashValue::random(), + ChainId::test(), + 0, + BlockHeaderExtra::new([0u8; 4]), + ) + } + + pub fn as_builder(&self) -> DagBlockHeaderBuilder { + DagBlockHeaderBuilder::new_with(self.clone()) + } +} + +impl<'de> Deserialize<'de> for DagBlockHeader { + fn deserialize(deserializer: D) -> Result>::Error> + where + D: Deserializer<'de>, + { + #[derive(Deserialize)] + #[serde(rename = "DagBlockHeader")] + struct DagBlockHeaderData { + parent_hash: Vec, + timestamp: u64, + author: AccountAddress, + author_auth_key: Option, + txn_accumulator_root: HashValue, + block_accumulator_root: HashValue, + state_root: HashValue, + gas_used: u64, + difficulty: U256, + body_hash: HashValue, + chain_id: ChainId, + nonce: u32, + extra: BlockHeaderExtra, + } + + let header_data = DagBlockHeaderData::deserialize(deserializer)?; + let block_header = Self::new_with_auth_key( + header_data.parent_hash, + header_data.timestamp, + header_data.author, + header_data.author_auth_key, + header_data.txn_accumulator_root, + header_data.block_accumulator_root, + header_data.state_root, + header_data.gas_used, + header_data.difficulty, + header_data.body_hash, + header_data.chain_id, + header_data.nonce, + header_data.extra, + ); + Ok(block_header) + } +} + +impl Default for DagBlockHeader { + fn default() -> Self { + Self::new( + vec![HashValue::zero()], + 0, + AccountAddress::ZERO, + HashValue::zero(), + HashValue::zero(), + HashValue::zero(), + 0, + 0.into(), + HashValue::zero(), + ChainId::test(), + 0, + BlockHeaderExtra::new([0u8; 4]), + ) + } +} + +impl Sample for DagBlockHeader { + fn sample() -> Self { + Self::new( + vec![HashValue::zero()], + 1610110515000, + genesis_address(), + *ACCUMULATOR_PLACEHOLDER_HASH, + *ACCUMULATOR_PLACEHOLDER_HASH, + *SPARSE_MERKLE_PLACEHOLDER_HASH, + 0, + U256::from(1), + BlockBody::sample().crypto_hash(), + ChainId::test(), + 0, + BlockHeaderExtra::new([0u8; 4]), + ) + } +} + +#[allow(clippy::from_over_into)] +impl Into for DagBlockHeader { + fn into(self) -> RawDagBlockHeader { + RawDagBlockHeader { + parent_hash: self.parent_hash, + timestamp: self.timestamp, + author: self.author, + author_auth_key: self.author_auth_key, + accumulator_root: self.txn_accumulator_root, + parent_block_accumulator_root: self.block_accumulator_root, + state_root: self.state_root, + gas_used: self.gas_used, + difficulty: self.difficulty, + body_hash: self.body_hash, + chain_id: self.chain_id, + } + } +} + +#[derive(Clone, Debug, Hash, Eq, PartialEq, Serialize, Deserialize, CryptoHasher, CryptoHash)] +pub struct RawDagBlockHeader { + /// Parent hash. + pub parent_hash: Vec, + /// Block timestamp. + pub timestamp: u64, + /// Block author. + pub author: AccountAddress, + /// Block author auth key. + /// this field is deprecated + pub author_auth_key: Option, + /// The transaction accumulator root hash after executing this block. + pub accumulator_root: HashValue, + /// The parent block accumulator root hash. + pub parent_block_accumulator_root: HashValue, + /// The last transaction state_root of this block after execute. + pub state_root: HashValue, + /// Gas used for contracts execution. + pub gas_used: u64, + /// Block difficulty + pub difficulty: U256, + /// hash for block body + pub body_hash: HashValue, + /// The chain id + pub chain_id: ChainId, +} + +#[derive(Default)] +pub struct DagBlockHeaderBuilder { + buffer: DagBlockHeader, +} + +impl DagBlockHeaderBuilder { + pub fn new() -> Self { + Self::default() + } + + pub fn random() -> Self { + Self { + buffer: DagBlockHeader::random(), + } + } + + fn new_with(buffer: DagBlockHeader) -> Self { + Self { buffer } + } + + pub fn with_parent_hash(mut self, parent_hash: Vec) -> Self { + self.buffer.parent_hash = parent_hash; + self + } + + pub fn with_timestamp(mut self, timestamp: u64) -> Self { + self.buffer.timestamp = timestamp; + self + } + + pub fn with_author(mut self, author: AccountAddress) -> Self { + self.buffer.author = author; + self + } + + pub fn with_author_auth_key(mut self, author_auth_key: Option) -> Self { + self.buffer.author_auth_key = author_auth_key; + self + } + + pub fn with_accumulator_root(mut self, accumulator_root: HashValue) -> Self { + self.buffer.txn_accumulator_root = accumulator_root; + self + } + + pub fn with_parent_block_accumulator_root( + mut self, + parent_block_accumulator_root: HashValue, + ) -> Self { + self.buffer.block_accumulator_root = parent_block_accumulator_root; + self + } + + pub fn with_state_root(mut self, state_root: HashValue) -> Self { + self.buffer.state_root = state_root; + self + } + + pub fn with_gas_used(mut self, gas_used: u64) -> Self { + self.buffer.gas_used = gas_used; + self + } + + pub fn with_difficulty(mut self, difficulty: U256) -> Self { + self.buffer.difficulty = difficulty; + self + } + + pub fn with_body_hash(mut self, body_hash: HashValue) -> Self { + self.buffer.body_hash = body_hash; + self + } + + pub fn with_chain_id(mut self, chain_id: ChainId) -> Self { + self.buffer.chain_id = chain_id; + self + } + + pub fn with_nonce(mut self, nonce: u32) -> Self { + self.buffer.nonce = nonce; + self + } + + pub fn with_extra(mut self, extra: BlockHeaderExtra) -> Self { + self.buffer.extra = extra; + self + } + + pub fn build(mut self) -> DagBlockHeader { + self.buffer.id = Some(self.buffer.crypto_hash()); + self.buffer + } +} + +#[derive( + Default, Clone, Debug, Hash, Eq, PartialEq, Serialize, Deserialize, CryptoHasher, CryptoHash, +)] +pub struct BlockBody { + /// The transactions in this block. + pub transactions: Vec, + /// uncles block header + pub uncles: Option>, +} + +impl BlockBody { + pub fn new(transactions: Vec, uncles: Option>) -> Self { + Self { + transactions, + uncles, + } + } + pub fn get_txn(&self, index: usize) -> Option<&SignedUserTransaction> { + self.transactions.get(index) + } + + /// Just for test + pub fn new_empty() -> BlockBody { + BlockBody { + transactions: Vec::new(), + uncles: None, + } + } + + pub fn hash(&self) -> HashValue { + self.crypto_hash() + } +} + +#[allow(clippy::from_over_into)] +impl Into for Vec { + fn into(self) -> BlockBody { + BlockBody { + transactions: self, + uncles: None, + } + } +} + +#[allow(clippy::from_over_into)] +impl Into> for BlockBody { + fn into(self) -> Vec { + self.transactions + } +} + +impl Sample for BlockBody { + fn sample() -> Self { + Self { + transactions: vec![], + uncles: None, + } + } +} + +/// A block, encoded as it is on the block chain. +#[derive(Clone, Debug, Hash, Eq, PartialEq, Serialize, Deserialize, CryptoHasher, CryptoHash)] +pub struct Block { + /// The header of this block. + pub header: DagBlockHeader, + /// The body of this block. + pub body: BlockBody, +} + +impl Block { + pub fn new(header: DagBlockHeader, body: B) -> Self + where + B: Into, + { + Block { + header, + body: body.into(), + } + } + + pub fn id(&self) -> HashValue { + self.header.id() + } + pub fn header(&self) -> &DagBlockHeader { + &self.header + } + pub fn transactions(&self) -> &[SignedUserTransaction] { + self.body.transactions.as_slice() + } + + pub fn uncles(&self) -> Option<&[DagBlockHeader]> { + match &self.body.uncles { + Some(uncles) => Some(uncles.as_slice()), + None => None, + } + } + + pub fn uncle_ids(&self) -> Vec { + self.uncles() + .map(|uncles| uncles.iter().map(|header| header.id()).collect()) + .unwrap_or_default() + } + + pub fn into_inner(self) -> (DagBlockHeader, BlockBody) { + (self.header, self.body) + } + + pub fn genesis_block( + parent_hash: Vec, + timestamp: u64, + accumulator_root: HashValue, + state_root: HashValue, + difficulty: U256, + genesis_txn: SignedUserTransaction, + ) -> Self { + let chain_id = genesis_txn.chain_id(); + let block_body = BlockBody::new(vec![genesis_txn], None); + let header = DagBlockHeader::genesis_block_header( + parent_hash, + timestamp, + accumulator_root, + state_root, + difficulty, + block_body.hash(), + chain_id, + ); + Self { + header, + body: block_body, + } + } + + pub fn to_metadata(&self, parent_gas_used: u64) -> DagBlockMetadata { + DagBlockMetadata::new( + self.header.parent_hash(), + self.header.timestamp, + self.header.author, + self.header.author_auth_key, + self.header.chain_id, + parent_gas_used, + ) + } +} + +impl std::fmt::Display for Block { + fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { + write!( + f, + "Block{{id:\"{}\", parent_id:\"{:?}\",", + self.id(), + self.header().parent_hash() + )?; + if let Some(uncles) = &self.body.uncles { + write!(f, "uncles:[")?; + for uncle in uncles { + write!(f, "\"{}\",", uncle.id())?; + } + write!(f, "],")?; + } + write!(f, "transactions:[")?; + for txn in &self.body.transactions { + write!(f, "\"{}\",", txn.id())?; + } + write!(f, "]}}") + } +} + +impl Sample for Block { + fn sample() -> Self { + Self { + header: DagBlockHeader::sample(), + body: BlockBody::sample(), + } + } +} + +/// `BlockInfo` is the object we store in the storage. It consists of the +/// block as well as the execution result of this block. +#[derive( + Clone, Debug, Hash, Eq, PartialEq, Serialize, Deserialize, CryptoHasher, CryptoHash, JsonSchema, +)] +pub struct BlockInfo { + /// Block id + pub block_id: HashValue, + /// The total difficulty. + #[schemars(with = "String")] + pub total_difficulty: U256, + /// The transaction accumulator info + pub txn_accumulator_info: AccumulatorInfo, + /// The block accumulator info. + pub block_accumulator_info: AccumulatorInfo, +} + +impl BlockInfo { + pub fn new( + block_id: HashValue, + total_difficulty: U256, + txn_accumulator_info: AccumulatorInfo, + block_accumulator_info: AccumulatorInfo, + ) -> Self { + Self { + block_id, + total_difficulty, + txn_accumulator_info, + block_accumulator_info, + } + } + + pub fn id(&self) -> HashValue { + self.crypto_hash() + } + + pub fn get_total_difficulty(&self) -> U256 { + self.total_difficulty + } + + pub fn get_block_accumulator_info(&self) -> &AccumulatorInfo { + &self.block_accumulator_info + } + + pub fn get_txn_accumulator_info(&self) -> &AccumulatorInfo { + &self.txn_accumulator_info + } + + pub fn block_id(&self) -> &HashValue { + &self.block_id + } +} + +impl Sample for BlockInfo { + fn sample() -> Self { + Self { + block_id: DagBlockHeader::sample().id(), + total_difficulty: 0.into(), + txn_accumulator_info: AccumulatorInfo::sample(), + block_accumulator_info: AccumulatorInfo::sample(), + } + } +} + +#[derive(Clone, Debug)] +pub struct DagBlockTemplate { + /// Parent hash. + pub parent_hash: Vec, + /// Block timestamp. + pub timestamp: u64, + /// Block author. + pub author: AccountAddress, + /// The transaction accumulator root hash after executing this block. + pub txn_accumulator_root: HashValue, + /// The block accumulator root hash. + pub block_accumulator_root: HashValue, + /// The last transaction state_root of this block after execute. + pub state_root: HashValue, + /// Gas used for contracts execution. + pub gas_used: u64, + /// hash for block body + pub body_hash: HashValue, + /// body of the block + pub body: BlockBody, + /// The chain id + pub chain_id: ChainId, + /// Block difficulty + pub difficulty: U256, + /// Block consensus strategy + pub strategy: ConsensusStrategy, +} + +impl DagBlockTemplate { + pub fn new( + parent_block_accumulator_root: HashValue, + accumulator_root: HashValue, + state_root: HashValue, + gas_used: u64, + body: BlockBody, + chain_id: ChainId, + difficulty: U256, + strategy: ConsensusStrategy, + block_metadata: DagBlockMetadata, + ) -> Self { + let (parent_hash, timestamp, author, _author_auth_key, _, _) = + block_metadata.into_inner(); + Self { + parent_hash, + block_accumulator_root: parent_block_accumulator_root, + timestamp, + author, + txn_accumulator_root: accumulator_root, + state_root, + gas_used, + body_hash: body.hash(), + body, + chain_id, + difficulty, + strategy, + } + } + + pub fn into_block(self, nonce: u32, extra: BlockHeaderExtra) -> Block { + let header = DagBlockHeader::new( + self.parent_hash, + self.timestamp, + self.author, + self.txn_accumulator_root, + self.block_accumulator_root, + self.state_root, + self.gas_used, + self.difficulty, + self.body_hash, + self.chain_id, + nonce, + extra, + ); + Block { + header, + body: self.body, + } + } + + pub fn as_raw_block_header(&self) -> RawDagBlockHeader { + RawDagBlockHeader { + parent_hash: self.parent_hash.clone(), + timestamp: self.timestamp, + author: self.author, + author_auth_key: None, + accumulator_root: self.txn_accumulator_root, + parent_block_accumulator_root: self.block_accumulator_root, + state_root: self.state_root, + gas_used: self.gas_used, + body_hash: self.body_hash, + difficulty: self.difficulty, + chain_id: self.chain_id, + } + } + + pub fn as_pow_header_blob(&self) -> Vec { + let mut blob = Vec::new(); + let raw_header = self.as_raw_block_header(); + let raw_header_hash = raw_header.crypto_hash(); + let mut dh = [0u8; 32]; + raw_header.difficulty.to_big_endian(&mut dh); + let extend_and_nonce = [0u8; 12]; + + blob.extend_from_slice(raw_header_hash.to_vec().as_slice()); + blob.extend_from_slice(&extend_and_nonce); + blob.extend_from_slice(&dh); + blob + } + + pub fn into_block_header(self, nonce: u32, extra: BlockHeaderExtra) -> DagBlockHeader { + DagBlockHeader::new( + self.parent_hash, + self.timestamp, + self.author, + self.txn_accumulator_root, + self.block_accumulator_root, + self.state_root, + self.gas_used, + self.difficulty, + self.body_hash, + self.chain_id, + nonce, + extra, + ) + } +} + +#[derive(Clone, Debug, Hash, Serialize, Deserialize, CryptoHasher, CryptoHash)] +pub struct ExecutedBlock { + pub block: Block, + pub block_info: BlockInfo, +} + +impl ExecutedBlock { + pub fn new(block: Block, block_info: BlockInfo) -> Self { + ExecutedBlock { block, block_info } + } + + pub fn total_difficulty(&self) -> U256 { + self.block_info.total_difficulty + } + + pub fn block(&self) -> &Block { + &self.block + } + + pub fn block_info(&self) -> &BlockInfo { + &self.block_info + } + + pub fn header(&self) -> &DagBlockHeader { + self.block.header() + } +} + +#[derive(Clone, Debug, Serialize, Deserialize)] +pub struct BlockSummary { + pub block_header: DagBlockHeader, + pub uncles: Vec, +} + +impl BlockSummary { + pub fn uncles(&self) -> &[DagBlockHeader] { + &self.uncles + } + + pub fn header(&self) -> &DagBlockHeader { + &self.block_header + } +} + +impl From for BlockSummary { + fn from(block: Block) -> Self { + Self { + block_header: block.header, + uncles: block.body.uncles.unwrap_or_default(), + } + } +} + +#[allow(clippy::from_over_into)] +impl Into<(DagBlockHeader, Vec)> for BlockSummary { + fn into(self) -> (DagBlockHeader, Vec) { + (self.block_header, self.uncles) + } +} + +#[derive(Clone, Debug, Serialize, Deserialize)] +pub struct UncleSummary { + /// total uncle + pub uncles: u64, + /// sum(number of the block which contain uncle block - uncle parent block number). + pub sum: u64, + pub avg: u64, + pub time_sum: u64, + pub time_avg: u64, +} + +impl UncleSummary { + pub fn new(uncles: u64, sum: u64, time_sum: u64) -> Self { + let (avg, time_avg) = ( + sum.checked_div(uncles).unwrap_or_default(), + time_sum.checked_div(uncles).unwrap_or_default(), + ); + Self { + uncles, + sum, + avg, + time_sum, + time_avg, + } + } +} + +#[derive(Clone, Debug, Serialize, Deserialize)] +pub struct EpochUncleSummary { + /// epoch number + pub epoch: u64, + pub number_summary: UncleSummary, + pub epoch_summary: UncleSummary, +} + +impl EpochUncleSummary { + pub fn new(epoch: u64, number_summary: UncleSummary, epoch_summary: UncleSummary) -> Self { + Self { + epoch, + number_summary, + epoch_summary, + } + } +} diff --git a/vm/types/src/dag_block_metadata.rs b/vm/types/src/dag_block_metadata.rs new file mode 100644 index 0000000000..db785968c0 --- /dev/null +++ b/vm/types/src/dag_block_metadata.rs @@ -0,0 +1,146 @@ +// Copyright (c) The Starcoin Core Contributors +// SPDX-License-Identifier: Apache-2.0 + +// Copyright (c) The Diem Core Contributors +// SPDX-License-Identifier: Apache-2.0 + +use crate::account_address::AccountAddress; +use crate::account_config::genesis_address; +use crate::genesis_config::ChainId; +use crate::transaction::authenticator::AuthenticationKey; +use bcs_ext::Sample; +use serde::{Deserialize, Deserializer, Serialize}; +use starcoin_crypto::hash::PlainCryptoHash; +use starcoin_crypto::{ + hash::{CryptoHash, CryptoHasher}, + HashValue, +}; + +/// Struct that will be persisted on chain to store the information of the current block. +/// +/// The flow will look like following: +/// 1. The executor will pass this struct to VM at the begin of a block proposal. +/// 2. The VM will use this struct to create a special system transaction that will modify the on +/// chain resource that represents the information of the current block. This transaction can't +/// be emitted by regular users and is generated by each of the miners on the fly. Such +/// transaction will be executed before all of the user-submitted transactions in the blocks. +/// 3. Once that special resource is modified, the other user transactions can read the consensus +/// info by calling into the read method of that resource, which would thus give users the +/// information such as the current block number. +#[derive(Clone, Debug, PartialEq, Eq, Serialize, CryptoHasher, CryptoHash)] +//TODO rename to DagBlockMetadataTransaction +pub struct DagBlockMetadata { + #[serde(skip)] + id: Option, + /// Parent block hash. + parent_hash: Vec, + timestamp: u64, + author: AccountAddress, + author_auth_key: Option, + chain_id: ChainId, + parent_gas_used: u64, +} + +impl DagBlockMetadata { + pub fn new( + parent_hash: Vec, + timestamp: u64, + author: AccountAddress, + author_auth_key: Option, + chain_id: ChainId, + parent_gas_used: u64, + ) -> Self { + let mut txn = Self { + id: None, + parent_hash, + timestamp, + author, + author_auth_key, + chain_id, + parent_gas_used, + }; + txn.id = Some(txn.crypto_hash()); + txn + } + + pub fn into_inner( + self, + ) -> ( + Vec, + u64, + AccountAddress, + Option, + ChainId, + u64, + ) { + ( + self.parent_hash, + self.timestamp, + self.author, + self.author_auth_key, + self.chain_id, + self.parent_gas_used, + ) + } + + pub fn parent_hash(&self) -> Vec { + self.parent_hash.clone() + } + + pub fn timestamp(&self) -> u64 { + self.timestamp + } + + pub fn chain_id(&self) -> ChainId { + self.chain_id + } + + pub fn id(&self) -> HashValue { + self.id + .expect("DagBlockMetadata's id should been Some after init.") + } + + pub fn author(&self) -> AccountAddress { + self.author + } +} + +impl<'de> Deserialize<'de> for DagBlockMetadata { + fn deserialize(deserializer: D) -> Result>::Error> + where + D: Deserializer<'de>, + { + #[derive(Deserialize)] + #[serde(rename = "DagBlockMetadata")] + struct DagBlockMetadataData { + parent_hash: Vec, + timestamp: u64, + author: AccountAddress, + author_auth_key: Option, + chain_id: ChainId, + parent_gas_used: u64, + } + let data = DagBlockMetadataData::deserialize(deserializer)?; + Ok(Self::new( + data.parent_hash, + data.timestamp, + data.author, + data.author_auth_key, + data.chain_id, + data.parent_gas_used, + )) + } +} + +impl Sample for DagBlockMetadata { + fn sample() -> Self { + Self::new( + vec![HashValue::zero()], + 0, + genesis_address(), + None, + ChainId::test(), + 0, + ) + } +} From 76a6f756569f4286037a636c58a0ffd75b9f7361 Mon Sep 17 00:00:00 2001 From: jackzhhuang Date: Wed, 26 Jul 2023 17:19:54 +0800 Subject: [PATCH 06/30] merge smolstc --- Cargo.lock | 1517 ++++++----------- Cargo.toml | 12 +- chain/Cargo.toml | 10 +- chain/api/src/message.rs | 2 +- chain/service/src/chain_service.rs | 16 +- chain/src/dag_chain.rs | 86 +- consensus/dag-consensus/Cargo.toml | 15 + consensus/dag-consensus/ghostdag/Cargo.toml | 17 + consensus/dag-consensus/ghostdag/src/lib.rs | 4 + .../dag-consensus/ghostdag/src/mergeset.rs | 71 + .../dag-consensus/ghostdag/src/protocol.rs | 332 ++++ consensus/dag-consensus/ghostdag/src/util.rs | 57 + .../dag-consensus/reachability/Cargo.toml | 16 + .../reachability/src/extensions.rs | 44 + .../reachability/src/inquirer.rs | 335 ++++ .../dag-consensus/reachability/src/lib.rs | 50 + .../reachability/src/reachability_service.rs | 316 ++++ .../dag-consensus/reachability/src/reindex.rs | 638 +++++++ .../reachability/src/relations_service.rs | 34 + .../dag-consensus/reachability/src/tests.rs | 267 +++ .../dag-consensus/reachability/src/tree.rs | 149 ++ consensus/dag-consensus/src/blockdag.rs | 235 +++ consensus/dag-consensus/src/lib.rs | 1 + network-rpc/api/Cargo.toml | 1 - network-rpc/api/src/dag_protocol.rs | 2 +- network-rpc/src/rpc.rs | 20 +- storage/dag-database/Cargo.toml | 23 + storage/dag-database/src/access.rs | 241 +++ storage/dag-database/src/cache/mod.rs | 15 + storage/dag-database/src/cache/stc_cache.rs | 45 + .../dag-database/src/consensus_ghostdag.rs | 461 +++++ storage/dag-database/src/consensus_header.rs | 168 ++ .../src/consensus_reachability.rs | 497 ++++++ .../dag-database/src/consensus_relations.rs | 273 +++ storage/dag-database/src/db.rs | 147 ++ storage/dag-database/src/errors.rs | 55 + storage/dag-database/src/item.rs | 96 ++ storage/dag-database/src/lib.rs | 30 + storage/dag-database/src/writer.rs | 68 + storage/src/lib.rs | 4 +- types/Cargo.toml | 1 - types/src/blockhash.rs | 71 + types/src/dag_block.rs | 18 +- types/src/ghostdata.rs | 146 ++ types/src/header.rs | 60 + types/src/interval.rs | 361 ++++ types/src/lib.rs | 9 + types/src/ordering.rs | 36 + types/src/perf.rs | 51 + types/src/reachability.rs | 25 + types/src/trusted.rs | 26 + vm/types/src/lib.rs | 2 +- 52 files changed, 6086 insertions(+), 1090 deletions(-) create mode 100644 consensus/dag-consensus/Cargo.toml create mode 100644 consensus/dag-consensus/ghostdag/Cargo.toml create mode 100644 consensus/dag-consensus/ghostdag/src/lib.rs create mode 100644 consensus/dag-consensus/ghostdag/src/mergeset.rs create mode 100644 consensus/dag-consensus/ghostdag/src/protocol.rs create mode 100644 consensus/dag-consensus/ghostdag/src/util.rs create mode 100644 consensus/dag-consensus/reachability/Cargo.toml create mode 100644 consensus/dag-consensus/reachability/src/extensions.rs create mode 100644 consensus/dag-consensus/reachability/src/inquirer.rs create mode 100644 consensus/dag-consensus/reachability/src/lib.rs create mode 100644 consensus/dag-consensus/reachability/src/reachability_service.rs create mode 100644 consensus/dag-consensus/reachability/src/reindex.rs create mode 100644 consensus/dag-consensus/reachability/src/relations_service.rs create mode 100644 consensus/dag-consensus/reachability/src/tests.rs create mode 100644 consensus/dag-consensus/reachability/src/tree.rs create mode 100644 consensus/dag-consensus/src/blockdag.rs create mode 100644 consensus/dag-consensus/src/lib.rs create mode 100644 storage/dag-database/Cargo.toml create mode 100644 storage/dag-database/src/access.rs create mode 100644 storage/dag-database/src/cache/mod.rs create mode 100644 storage/dag-database/src/cache/stc_cache.rs create mode 100644 storage/dag-database/src/consensus_ghostdag.rs create mode 100644 storage/dag-database/src/consensus_header.rs create mode 100644 storage/dag-database/src/consensus_reachability.rs create mode 100644 storage/dag-database/src/consensus_relations.rs create mode 100644 storage/dag-database/src/db.rs create mode 100644 storage/dag-database/src/errors.rs create mode 100644 storage/dag-database/src/item.rs create mode 100644 storage/dag-database/src/lib.rs create mode 100644 storage/dag-database/src/writer.rs create mode 100644 types/src/blockhash.rs create mode 100644 types/src/ghostdata.rs create mode 100644 types/src/header.rs create mode 100644 types/src/interval.rs create mode 100644 types/src/ordering.rs create mode 100644 types/src/perf.rs create mode 100644 types/src/reachability.rs create mode 100644 types/src/trusted.rs diff --git a/Cargo.lock b/Cargo.lock index db04cd6a0f..a3fc3be768 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -207,7 +207,7 @@ version = "1.13.5" dependencies = [ "anyhow", "async-trait", - "bcs-ext 1.13.5", + "bcs-ext", "clap 3.2.23", "csv", "elasticsearch", @@ -219,10 +219,10 @@ dependencies = [ "serde 1.0.152", "serde_json", "starcoin-crypto", - "starcoin-logger 1.13.5", + "starcoin-logger", "starcoin-rpc-api", - "starcoin-types 1.13.5", - "starcoin-vm-types 1.13.5", + "starcoin-types", + "starcoin-vm-types", "tokio", ] @@ -656,16 +656,6 @@ dependencies = [ "serde 1.0.152", ] -[[package]] -name = "bcs-ext" -version = "1.13.5" -source = "git+https://github.com/starcoinorg/starcoin?rev=fb6fcc48fdcad3d6f54ed97e004e25099ac24000#fb6fcc48fdcad3d6f54ed97e004e25099ac24000" -dependencies = [ - "anyhow", - "bcs", - "serde 1.0.152", -] - [[package]] name = "bech32" version = "0.9.1" @@ -684,33 +674,33 @@ version = "1.13.5" dependencies = [ "anyhow", "criterion", - "forkable-jellyfish-merkle 1.13.5", + "forkable-jellyfish-merkle", "futures 0.3.26", "futures-timer", - "network-api 1.13.5", + "network-api", "parking_lot 0.12.1", "pprof", "proptest", "rand 0.8.5", "rand_core 0.6.4", - "starcoin-account-api 1.13.5", - "starcoin-accumulator 1.13.5", + "starcoin-account-api", + "starcoin-accumulator", "starcoin-chain", - "starcoin-config 1.13.5", + "starcoin-config", "starcoin-consensus", "starcoin-crypto", "starcoin-executor", "starcoin-executor-benchmark", "starcoin-genesis", - "starcoin-logger 1.13.5", - "starcoin-service-registry 1.13.5", - "starcoin-state-store-api 1.13.5", + "starcoin-logger", + "starcoin-service-registry", + "starcoin-state-store-api", "starcoin-state-tree", - "starcoin-storage 1.13.5", + "starcoin-storage", "starcoin-transaction-builder", - "starcoin-types 1.13.5", + "starcoin-types", "starcoin-vm-runtime", - "starcoin-vm-types 1.13.5", + "starcoin-vm-types", ] [[package]] @@ -1360,32 +1350,6 @@ dependencies = [ "yaml-rust", ] -[[package]] -name = "consensus" -version = "0.1.0" -source = "git+https://github.com/starcoinorg/smolstc?rev=167d700a0f99ba929cd6d156dac77859306f32da#167d700a0f99ba929cd6d156dac77859306f32da" -dependencies = [ - "anyhow", - "consensus-types", - "database", - "ghostdag", - "parking_lot 0.12.1", - "reachability", - "starcoin-crypto", - "starcoin-types 1.13.5 (git+https://github.com/starcoinorg/starcoin?rev=fb6fcc48fdcad3d6f54ed97e004e25099ac24000)", -] - -[[package]] -name = "consensus-types" -version = "0.1.0" -source = "git+https://github.com/starcoinorg/smolstc?rev=167d700a0f99ba929cd6d156dac77859306f32da#167d700a0f99ba929cd6d156dac77859306f32da" -dependencies = [ - "itertools", - "serde 1.0.152", - "starcoin-crypto", - "starcoin-types 1.13.5 (git+https://github.com/starcoinorg/starcoin?rev=fb6fcc48fdcad3d6f54ed97e004e25099ac24000)", -] - [[package]] name = "console" version = "0.15.5" @@ -1415,7 +1379,7 @@ name = "contrib-contracts" version = "1.13.5" dependencies = [ "anyhow", - "bcs-ext 1.13.5", + "bcs-ext", "ethereum-types", "hex", "rlp", @@ -1425,9 +1389,9 @@ dependencies = [ "starcoin-crypto", "starcoin-executor", "starcoin-state-api", - "starcoin-types 1.13.5", - "starcoin-vm-types 1.13.5", - "stdlib 1.13.5", + "starcoin-types", + "starcoin-vm-types", + "stdlib", "stest", "tempfile", "test-helper", @@ -1880,6 +1844,40 @@ dependencies = [ "syn 1.0.107", ] +[[package]] +name = "dag-consensus" +version = "1.13.5" +dependencies = [ + "anyhow", + "dag-database", + "ghostdag", + "parking_lot 0.12.1", + "reachability", + "starcoin-crypto", + "starcoin-types", +] + +[[package]] +name = "dag-database" +version = "1.13.5" +dependencies = [ + "bincode", + "faster-hex", + "indexmap", + "itertools", + "num_cpus", + "parking_lot 0.12.1", + "rand 0.8.5", + "rocksdb", + "serde 1.0.152", + "starcoin-config", + "starcoin-crypto", + "starcoin-storage", + "starcoin-types", + "tempfile", + "thiserror", +] + [[package]] name = "darling" version = "0.9.0" @@ -2079,28 +2077,6 @@ dependencies = [ "syn 1.0.107", ] -[[package]] -name = "database" -version = "0.1.0" -source = "git+https://github.com/starcoinorg/smolstc?rev=167d700a0f99ba929cd6d156dac77859306f32da#167d700a0f99ba929cd6d156dac77859306f32da" -dependencies = [ - "bincode", - "consensus-types", - "faster-hex", - "indexmap", - "itertools", - "num_cpus", - "parking_lot 0.12.1", - "rand 0.8.5", - "rocksdb", - "serde 1.0.152", - "starcoin-config 1.13.5 (git+https://github.com/starcoinorg/starcoin?rev=fb6fcc48fdcad3d6f54ed97e004e25099ac24000)", - "starcoin-crypto", - "starcoin-storage 1.13.5 (git+https://github.com/starcoinorg/starcoin?rev=fb6fcc48fdcad3d6f54ed97e004e25099ac24000)", - "starcoin-types 1.13.5 (git+https://github.com/starcoinorg/starcoin?rev=fb6fcc48fdcad3d6f54ed97e004e25099ac24000)", - "thiserror", -] - [[package]] name = "datatest-stable" version = "0.1.1" @@ -2129,7 +2105,7 @@ version = "1.13.5" dependencies = [ "anyhow", "atomic-counter", - "bcs-ext 1.13.5", + "bcs-ext", "clap 3.2.23", "csv", "futures 0.3.26", @@ -2141,22 +2117,22 @@ dependencies = [ "rayon", "serde 1.0.152", "serde_json", - "starcoin-account-api 1.13.5", - "starcoin-accumulator 1.13.5", + "starcoin-account-api", + "starcoin-accumulator", "starcoin-chain", - "starcoin-config 1.13.5", + "starcoin-config", "starcoin-consensus", "starcoin-crypto", "starcoin-executor", "starcoin-genesis", - "starcoin-logger 1.13.5", + "starcoin-logger", "starcoin-resource-viewer", "starcoin-state-tree", "starcoin-statedb", - "starcoin-storage 1.13.5", + "starcoin-storage", "starcoin-transaction-builder", - "starcoin-types 1.13.5", - "starcoin-vm-types 1.13.5", + "starcoin-types", + "starcoin-vm-types", "tokio", ] @@ -2846,7 +2822,7 @@ dependencies = [ "anyhow", "backtrace", "bcs", - "bcs-ext 1.13.5", + "bcs-ext", "byteorder", "criterion", "hex", @@ -2860,27 +2836,7 @@ dependencies = [ "serde 1.0.152", "serde_bytes", "starcoin-crypto", - "starcoin-logger 1.13.5", - "thiserror", -] - -[[package]] -name = "forkable-jellyfish-merkle" -version = "1.13.5" -source = "git+https://github.com/starcoinorg/starcoin?rev=fb6fcc48fdcad3d6f54ed97e004e25099ac24000#fb6fcc48fdcad3d6f54ed97e004e25099ac24000" -dependencies = [ - "anyhow", - "backtrace", - "bcs-ext 1.13.5 (git+https://github.com/starcoinorg/starcoin?rev=fb6fcc48fdcad3d6f54ed97e004e25099ac24000)", - "byteorder", - "hex", - "mirai-annotations", - "num-derive", - "num-traits 0.2.15", - "serde 1.0.152", - "serde_bytes", - "starcoin-crypto", - "starcoin-logger 1.13.5 (git+https://github.com/starcoinorg/starcoin?rev=fb6fcc48fdcad3d6f54ed97e004e25099ac24000)", + "starcoin-logger", "thiserror", ] @@ -3118,7 +3074,7 @@ name = "genesis-nft-miner" version = "1.13.5" dependencies = [ "anyhow", - "bcs-ext 1.13.5", + "bcs-ext", "clap 3.2.23", "hex", "jsonrpc-core-client", @@ -3127,8 +3083,8 @@ dependencies = [ "serde_json", "starcoin-crypto", "starcoin-rpc-api", - "starcoin-types 1.13.5", - "starcoin-vm-types 1.13.5", + "starcoin-types", + "starcoin-vm-types", "tokio", ] @@ -3188,17 +3144,16 @@ dependencies = [ [[package]] name = "ghostdag" -version = "0.1.0" -source = "git+https://github.com/starcoinorg/smolstc?rev=167d700a0f99ba929cd6d156dac77859306f32da#167d700a0f99ba929cd6d156dac77859306f32da" +version = "1.13.5" dependencies = [ - "consensus-types", - "database", + "dag-database", "itertools", "parking_lot 0.12.1", "reachability", "rocksdb", "serde 1.0.152", "starcoin-crypto", + "starcoin-types", "thiserror", ] @@ -4914,7 +4869,7 @@ name = "merkle-generator" version = "1.13.5" dependencies = [ "anyhow", - "bcs-ext 1.13.5", + "bcs-ext", "clap 3.2.23", "csv", "hex", @@ -4923,7 +4878,7 @@ dependencies = [ "serde_json", "sha3", "starcoin-crypto", - "starcoin-vm-types 1.13.5", + "starcoin-vm-types", ] [[package]] @@ -5487,7 +5442,7 @@ version = "1.13.5" dependencies = [ "anyhow", "bcs", - "bcs-ext 1.13.5", + "bcs-ext", "clap 3.2.23", "datatest-stable 0.1.1", "difference", @@ -5512,17 +5467,17 @@ dependencies = [ "once_cell", "starcoin-account-provider", "starcoin-cmd", - "starcoin-config 1.13.5", + "starcoin-config", "starcoin-crypto", - "starcoin-logger 1.13.5", - "starcoin-move-compiler 1.13.5", + "starcoin-logger", + "starcoin-move-compiler", "starcoin-rpc-api", "starcoin-rpc-client", "starcoin-transactional-test-harness", - "starcoin-types 1.13.5", + "starcoin-types", "starcoin-vm-runtime", - "starcoin-vm-types 1.13.5", - "stdlib 1.13.5", + "starcoin-vm-types", + "stdlib", "tempfile", "tokio", "vm-status-translator", @@ -6035,45 +5990,21 @@ version = "1.13.5" dependencies = [ "anyhow", "async-trait", - "bcs-ext 1.13.5", - "futures 0.3.26", - "hex", - "itertools", - "network-p2p-types 1.13.5", - "network-types 1.13.5", - "parking_lot 0.12.1", - "rand 0.8.5", - "schemars", - "serde 1.0.152", - "starcoin-crypto", - "starcoin-logger 1.13.5", - "starcoin-metrics 1.13.5", - "starcoin-service-registry 1.13.5", - "starcoin-types 1.13.5", -] - -[[package]] -name = "network-api" -version = "1.13.5" -source = "git+https://github.com/starcoinorg/starcoin?rev=fb6fcc48fdcad3d6f54ed97e004e25099ac24000#fb6fcc48fdcad3d6f54ed97e004e25099ac24000" -dependencies = [ - "anyhow", - "async-trait", - "bcs-ext 1.13.5 (git+https://github.com/starcoinorg/starcoin?rev=fb6fcc48fdcad3d6f54ed97e004e25099ac24000)", + "bcs-ext", "futures 0.3.26", "hex", "itertools", - "network-p2p-types 1.13.5 (git+https://github.com/starcoinorg/starcoin?rev=fb6fcc48fdcad3d6f54ed97e004e25099ac24000)", - "network-types 1.13.5 (git+https://github.com/starcoinorg/starcoin?rev=fb6fcc48fdcad3d6f54ed97e004e25099ac24000)", + "network-p2p-types", + "network-types", "parking_lot 0.12.1", "rand 0.8.5", "schemars", "serde 1.0.152", "starcoin-crypto", - "starcoin-logger 1.13.5 (git+https://github.com/starcoinorg/starcoin?rev=fb6fcc48fdcad3d6f54ed97e004e25099ac24000)", - "starcoin-metrics 1.13.5 (git+https://github.com/starcoinorg/starcoin?rev=fb6fcc48fdcad3d6f54ed97e004e25099ac24000)", - "starcoin-service-registry 1.13.5 (git+https://github.com/starcoinorg/starcoin?rev=fb6fcc48fdcad3d6f54ed97e004e25099ac24000)", - "starcoin-types 1.13.5 (git+https://github.com/starcoinorg/starcoin?rev=fb6fcc48fdcad3d6f54ed97e004e25099ac24000)", + "starcoin-logger", + "starcoin-metrics", + "starcoin-service-registry", + "starcoin-types", ] [[package]] @@ -6084,7 +6015,7 @@ dependencies = [ "async-std", "async-trait", "asynchronous-codec 0.5.0", - "bcs-ext 1.13.5", + "bcs-ext", "bitflags", "bs58 0.3.1", "bytes 1.4.0", @@ -6100,20 +6031,20 @@ dependencies = [ "linked_hash_set", "log 0.4.17", "lru 0.7.8", - "network-p2p-types 1.13.5", + "network-p2p-types", "once_cell", "parking_lot 0.12.1", "pin-project 0.4.30", "prometheus", "rand 0.8.5", - "sc-peerset 1.13.5", + "sc-peerset", "serde 1.0.152", "serde_json", "smallvec 1.10.0", - "starcoin-config 1.13.5", + "starcoin-config", "starcoin-crypto", - "starcoin-metrics 1.13.5", - "starcoin-types 1.13.5", + "starcoin-metrics", + "starcoin-types", "stest", "tempfile", "thiserror", @@ -6129,14 +6060,14 @@ name = "network-p2p-core" version = "1.13.5" dependencies = [ "anyhow", - "bcs-ext 1.13.5", + "bcs-ext", "futures 0.3.26", "log 0.4.17", "network-p2p-derive", - "network-p2p-types 1.13.5", + "network-p2p-types", "num_enum", "serde 1.0.152", - "starcoin-types 1.13.5", + "starcoin-types", "stest", ] @@ -6160,31 +6091,12 @@ dependencies = [ "derive_more", "libp2p", "rand 0.8.5", - "sc-peerset 1.13.5", - "schemars", - "serde 1.0.152", - "serde_json", - "starcoin-crypto", - "starcoin-types 1.13.5", -] - -[[package]] -name = "network-p2p-types" -version = "1.13.5" -source = "git+https://github.com/starcoinorg/starcoin?rev=fb6fcc48fdcad3d6f54ed97e004e25099ac24000#fb6fcc48fdcad3d6f54ed97e004e25099ac24000" -dependencies = [ - "anyhow", - "bitflags", - "bytes 1.4.0", - "derive_more", - "libp2p", - "rand 0.8.5", - "sc-peerset 1.13.5 (git+https://github.com/starcoinorg/starcoin?rev=fb6fcc48fdcad3d6f54ed97e004e25099ac24000)", + "sc-peerset", "schemars", "serde 1.0.152", "serde_json", "starcoin-crypto", - "starcoin-types 1.13.5 (git+https://github.com/starcoinorg/starcoin?rev=fb6fcc48fdcad3d6f54ed97e004e25099ac24000)", + "starcoin-types", ] [[package]] @@ -6192,24 +6104,11 @@ name = "network-types" version = "1.13.5" dependencies = [ "anyhow", - "network-p2p-types 1.13.5", + "network-p2p-types", "schemars", "serde 1.0.152", "starcoin-crypto", - "starcoin-types 1.13.5", -] - -[[package]] -name = "network-types" -version = "1.13.5" -source = "git+https://github.com/starcoinorg/starcoin?rev=fb6fcc48fdcad3d6f54ed97e004e25099ac24000#fb6fcc48fdcad3d6f54ed97e004e25099ac24000" -dependencies = [ - "anyhow", - "network-p2p-types 1.13.5 (git+https://github.com/starcoinorg/starcoin?rev=fb6fcc48fdcad3d6f54ed97e004e25099ac24000)", - "schemars", - "serde 1.0.152", - "starcoin-crypto", - "starcoin-types 1.13.5 (git+https://github.com/starcoinorg/starcoin?rev=fb6fcc48fdcad3d6f54ed97e004e25099ac24000)", + "starcoin-types", ] [[package]] @@ -7910,17 +7809,16 @@ dependencies = [ [[package]] name = "reachability" -version = "0.1.0" -source = "git+https://github.com/starcoinorg/smolstc?rev=167d700a0f99ba929cd6d156dac77859306f32da#167d700a0f99ba929cd6d156dac77859306f32da" +version = "1.13.5" dependencies = [ - "consensus-types", - "database", + "dag-database", "itertools", "parking_lot 0.12.1", "rocksdb", "serde 1.0.152", "starcoin-crypto", - "starcoin-storage 1.13.5 (git+https://github.com/starcoinorg/starcoin?rev=fb6fcc48fdcad3d6f54ed97e004e25099ac24000)", + "starcoin-storage", + "starcoin-types", "thiserror", ] @@ -8088,7 +7986,7 @@ name = "resource-exporter" version = "1.13.5" dependencies = [ "anyhow", - "bcs-ext 1.13.5", + "bcs-ext", "clap 3.2.23", "csv", "hex", @@ -8099,9 +7997,9 @@ dependencies = [ "starcoin-resource-viewer", "starcoin-state-tree", "starcoin-statedb", - "starcoin-storage 1.13.5", - "starcoin-types 1.13.5", - "starcoin-vm-types 1.13.5", + "starcoin-storage", + "starcoin-types", + "starcoin-vm-types", ] [[package]] @@ -8476,20 +8374,7 @@ dependencies = [ "log 0.4.17", "rand 0.8.5", "serde_json", - "sp-utils 1.13.5", - "wasm-timer", -] - -[[package]] -name = "sc-peerset" -version = "1.13.5" -source = "git+https://github.com/starcoinorg/starcoin?rev=fb6fcc48fdcad3d6f54ed97e004e25099ac24000#fb6fcc48fdcad3d6f54ed97e004e25099ac24000" -dependencies = [ - "futures 0.3.26", - "libp2p", - "log 0.4.17", - "serde_json", - "sp-utils 1.13.5 (git+https://github.com/starcoinorg/starcoin?rev=fb6fcc48fdcad3d6f54ed97e004e25099ac24000)", + "sp-utils", "wasm-timer", ] @@ -8683,7 +8568,7 @@ dependencies = [ name = "serde-helpers" version = "1.13.5" dependencies = [ - "bcs-ext 1.13.5", + "bcs-ext", "hex", "serde 1.0.152", "serde_bytes", @@ -9150,20 +9035,6 @@ dependencies = [ "simple-stopwatch", ] -[[package]] -name = "sp-utils" -version = "1.13.5" -source = "git+https://github.com/starcoinorg/starcoin?rev=fb6fcc48fdcad3d6f54ed97e004e25099ac24000#fb6fcc48fdcad3d6f54ed97e004e25099ac24000" -dependencies = [ - "futures 0.3.26", - "futures-core", - "lazy_static 1.4.0", - "once_cell", - "parking_lot 0.12.1", - "prometheus", - "simple-stopwatch", -] - [[package]] name = "spin" version = "0.5.2" @@ -9192,7 +9063,7 @@ version = "1.13.5" dependencies = [ "anyhow", "bcs", - "bcs-ext 1.13.5", + "bcs-ext", "hex", "move-binary-format", "ordinal", @@ -9203,7 +9074,7 @@ dependencies = [ "starcoin-abi-resolver", "starcoin-abi-types", "starcoin-resource-viewer", - "starcoin-vm-types 1.13.5", + "starcoin-vm-types", ] [[package]] @@ -9215,8 +9086,8 @@ dependencies = [ "serde_json", "starcoin-abi-types", "starcoin-resource-viewer", - "starcoin-vm-types 1.13.5", - "stdlib 1.13.5", + "starcoin-vm-types", + "stdlib", "test-helper", ] @@ -9231,7 +9102,7 @@ dependencies = [ "serde 1.0.152", "serde_bytes", "serde_json", - "starcoin-vm-types 1.13.5", + "starcoin-vm-types", ] [[package]] @@ -9240,7 +9111,7 @@ version = "1.13.5" dependencies = [ "anyhow", "async-trait", - "bcs-ext 1.13.5", + "bcs-ext", "futures 0.3.26", "hex", "parking_lot 0.12.1", @@ -9248,13 +9119,13 @@ dependencies = [ "rand_core 0.6.4", "serde 1.0.152", "serde_json", - "starcoin-account-api 1.13.5", - "starcoin-config 1.13.5", + "starcoin-account-api", + "starcoin-config", "starcoin-crypto", "starcoin-decrypt", - "starcoin-logger 1.13.5", - "starcoin-storage 1.13.5", - "starcoin-types 1.13.5", + "starcoin-logger", + "starcoin-storage", + "starcoin-types", "tempfile", ] @@ -9264,29 +9135,7 @@ version = "1.13.5" dependencies = [ "anyhow", "async-trait", - "bcs-ext 1.13.5", - "futures 0.3.26", - "hex", - "rand 0.8.5", - "rand_core 0.6.4", - "schemars", - "serde 1.0.152", - "serde_bytes", - "serde_json", - "starcoin-crypto", - "starcoin-service-registry 1.13.5", - "starcoin-types 1.13.5", - "thiserror", -] - -[[package]] -name = "starcoin-account-api" -version = "1.13.5" -source = "git+https://github.com/starcoinorg/starcoin?rev=fb6fcc48fdcad3d6f54ed97e004e25099ac24000#fb6fcc48fdcad3d6f54ed97e004e25099ac24000" -dependencies = [ - "anyhow", - "async-trait", - "bcs-ext 1.13.5 (git+https://github.com/starcoinorg/starcoin?rev=fb6fcc48fdcad3d6f54ed97e004e25099ac24000)", + "bcs-ext", "futures 0.3.26", "hex", "rand 0.8.5", @@ -9296,8 +9145,8 @@ dependencies = [ "serde_bytes", "serde_json", "starcoin-crypto", - "starcoin-service-registry 1.13.5 (git+https://github.com/starcoinorg/starcoin?rev=fb6fcc48fdcad3d6f54ed97e004e25099ac24000)", - "starcoin-types 1.13.5 (git+https://github.com/starcoinorg/starcoin?rev=fb6fcc48fdcad3d6f54ed97e004e25099ac24000)", + "starcoin-service-registry", + "starcoin-types", "thiserror", ] @@ -9307,11 +9156,11 @@ version = "1.13.5" dependencies = [ "anyhow", "starcoin-account", - "starcoin-account-api 1.13.5", - "starcoin-config 1.13.5", + "starcoin-account-api", + "starcoin-config", "starcoin-crypto", "starcoin-rpc-client", - "starcoin-types 1.13.5", + "starcoin-types", ] [[package]] @@ -9320,16 +9169,16 @@ version = "1.13.5" dependencies = [ "anyhow", "async-trait", - "bcs-ext 1.13.5", + "bcs-ext", "futures 0.3.26", "starcoin-account", - "starcoin-account-api 1.13.5", + "starcoin-account-api", "starcoin-chain-notify", - "starcoin-config 1.13.5", + "starcoin-config", "starcoin-crypto", - "starcoin-logger 1.13.5", - "starcoin-service-registry 1.13.5", - "starcoin-types 1.13.5", + "starcoin-logger", + "starcoin-service-registry", + "starcoin-types", "stest", "tempfile", "tokio", @@ -9340,7 +9189,7 @@ name = "starcoin-accumulator" version = "1.13.5" dependencies = [ "anyhow", - "bcs-ext 1.13.5", + "bcs-ext", "byteorder", "itertools", "lru 0.7.8", @@ -9354,26 +9203,7 @@ dependencies = [ "schemars", "serde 1.0.152", "starcoin-crypto", - "starcoin-logger 1.13.5", -] - -[[package]] -name = "starcoin-accumulator" -version = "1.13.5" -source = "git+https://github.com/starcoinorg/starcoin?rev=fb6fcc48fdcad3d6f54ed97e004e25099ac24000#fb6fcc48fdcad3d6f54ed97e004e25099ac24000" -dependencies = [ - "anyhow", - "bcs-ext 1.13.5 (git+https://github.com/starcoinorg/starcoin?rev=fb6fcc48fdcad3d6f54ed97e004e25099ac24000)", - "byteorder", - "itertools", - "lru 0.7.8", - "mirai-annotations", - "once_cell", - "parking_lot 0.12.1", - "schemars", - "serde 1.0.152", - "starcoin-crypto", - "starcoin-logger 1.13.5 (git+https://github.com/starcoinorg/starcoin?rev=fb6fcc48fdcad3d6f54ed97e004e25099ac24000)", + "starcoin-logger", ] [[package]] @@ -9382,25 +9212,25 @@ version = "1.13.5" dependencies = [ "anyhow", "async-trait", - "bcs-ext 1.13.5", + "bcs-ext", "futures 0.3.26", "hex", - "network-api 1.13.5", + "network-api", "starcoin-chain", "starcoin-chain-api", - "starcoin-config 1.13.5", + "starcoin-config", "starcoin-crypto", - "starcoin-logger 1.13.5", - "starcoin-metrics 1.13.5", + "starcoin-logger", + "starcoin-metrics", "starcoin-network", "starcoin-network-rpc-api", - "starcoin-service-registry 1.13.5", + "starcoin-service-registry", "starcoin-sync", "starcoin-sync-api", - "starcoin-time-service 1.13.5", + "starcoin-time-service", "starcoin-txpool", "starcoin-txpool-api", - "starcoin-types 1.13.5", + "starcoin-types", "stest", "tokio", ] @@ -9410,37 +9240,36 @@ name = "starcoin-chain" version = "1.13.5" dependencies = [ "anyhow", - "bcs-ext 1.13.5", + "bcs-ext", "clap 3.2.23", - "consensus", - "consensus-types", - "database", + "dag-consensus", + "dag-database", "proptest", "proptest-derive", "rand 0.8.5", "rand_core 0.6.4", - "sp-utils 1.13.5", - "starcoin-account-api 1.13.5", - "starcoin-accumulator 1.13.5", + "sp-utils", + "starcoin-account-api", + "starcoin-accumulator", "starcoin-chain-api", "starcoin-chain-mock", - "starcoin-config 1.13.5", + "starcoin-config", "starcoin-consensus", "starcoin-crypto", "starcoin-executor", "starcoin-genesis", - "starcoin-logger 1.13.5", + "starcoin-logger", "starcoin-open-block", "starcoin-resource-viewer", - "starcoin-service-registry 1.13.5", + "starcoin-service-registry", "starcoin-state-api", "starcoin-statedb", - "starcoin-storage 1.13.5", - "starcoin-time-service 1.13.5", + "starcoin-storage", + "starcoin-time-service", "starcoin-transaction-builder", - "starcoin-types 1.13.5", - "starcoin-vm-types 1.13.5", - "stdlib 1.13.5", + "starcoin-types", + "starcoin-vm-types", + "stdlib", "stest", "test-helper", "thiserror", @@ -9454,18 +9283,18 @@ dependencies = [ "anyhow", "async-trait", "futures 0.3.26", - "network-api 1.13.5", + "network-api", "rand 0.8.5", "rand_core 0.6.4", "serde 1.0.152", - "starcoin-accumulator 1.13.5", + "starcoin-accumulator", "starcoin-crypto", - "starcoin-service-registry 1.13.5", + "starcoin-service-registry", "starcoin-state-api", "starcoin-statedb", - "starcoin-time-service 1.13.5", - "starcoin-types 1.13.5", - "starcoin-vm-types 1.13.5", + "starcoin-time-service", + "starcoin-types", + "starcoin-vm-types", "thiserror", ] @@ -9475,27 +9304,27 @@ version = "1.13.5" dependencies = [ "anyhow", "async-trait", - "bcs-ext 1.13.5", + "bcs-ext", "futures 0.3.26", "futures-timer", "parking_lot 0.12.1", "proptest", "proptest-derive", - "starcoin-account-api 1.13.5", - "starcoin-accumulator 1.13.5", + "starcoin-account-api", + "starcoin-accumulator", "starcoin-chain", - "starcoin-config 1.13.5", + "starcoin-config", "starcoin-consensus", "starcoin-crypto", "starcoin-executor", "starcoin-genesis", - "starcoin-logger 1.13.5", + "starcoin-logger", "starcoin-open-block", "starcoin-state-api", "starcoin-statedb", - "starcoin-storage 1.13.5", - "starcoin-types 1.13.5", - "starcoin-vm-types 1.13.5", + "starcoin-storage", + "starcoin-types", + "starcoin-vm-types", "thiserror", ] @@ -9505,10 +9334,10 @@ version = "1.13.5" dependencies = [ "anyhow", "starcoin-crypto", - "starcoin-logger 1.13.5", - "starcoin-service-registry 1.13.5", - "starcoin-storage 1.13.5", - "starcoin-types 1.13.5", + "starcoin-logger", + "starcoin-service-registry", + "starcoin-storage", + "starcoin-types", ] [[package]] @@ -9523,15 +9352,15 @@ dependencies = [ "serde 1.0.152", "starcoin-chain", "starcoin-chain-api", - "starcoin-config 1.13.5", + "starcoin-config", "starcoin-crypto", - "starcoin-logger 1.13.5", - "starcoin-service-registry 1.13.5", + "starcoin-logger", + "starcoin-service-registry", "starcoin-state-api", - "starcoin-storage 1.13.5", - "starcoin-types 1.13.5", + "starcoin-storage", + "starcoin-types", "starcoin-vm-runtime", - "starcoin-vm-types 1.13.5", + "starcoin-vm-types", "stest", "test-helper", "thiserror", @@ -9543,18 +9372,18 @@ name = "starcoin-cmd" version = "1.13.5" dependencies = [ "anyhow", - "bcs-ext 1.13.5", + "bcs-ext", "clap 3.2.23", "crossbeam-channel", - "forkable-jellyfish-merkle 1.13.5", + "forkable-jellyfish-merkle", "futures 0.3.26", "hex", "itertools", "move-command-line-common", "move-errmapgen", - "network-api 1.13.5", - "network-p2p-types 1.13.5", - "network-types 1.13.5", + "network-api", + "network-p2p-types", + "network-types", "rand 0.8.5", "scmd", "serde 1.0.152", @@ -9562,17 +9391,17 @@ dependencies = [ "starcoin-abi-decoder", "starcoin-abi-resolver", "starcoin-abi-types", - "starcoin-account-api 1.13.5", + "starcoin-account-api", "starcoin-account-provider", "starcoin-chain-api", - "starcoin-config 1.13.5", + "starcoin-config", "starcoin-consensus", "starcoin-crypto", "starcoin-dev", "starcoin-executor", "starcoin-genesis", - "starcoin-logger 1.13.5", - "starcoin-move-compiler 1.13.5", + "starcoin-logger", + "starcoin-move-compiler", "starcoin-move-explain", "starcoin-network-rpc-api", "starcoin-node", @@ -9580,15 +9409,15 @@ dependencies = [ "starcoin-resource-viewer", "starcoin-rpc-api", "starcoin-rpc-client", - "starcoin-service-registry 1.13.5", + "starcoin-service-registry", "starcoin-state-api", "starcoin-sync-api", "starcoin-transaction-builder", "starcoin-txpool-api", - "starcoin-types 1.13.5", + "starcoin-types", "starcoin-vm-runtime", - "starcoin-vm-types 1.13.5", - "stdlib 1.13.5", + "starcoin-vm-types", + "stdlib", "stest", "test-helper", "tokio", @@ -9606,9 +9435,9 @@ dependencies = [ "hex", "libc", "names", - "network-api 1.13.5", - "network-p2p-types 1.13.5", - "network-types 1.13.5", + "network-api", + "network-p2p-types", + "network-types", "num_cpus", "num_enum", "once_cell", @@ -9618,59 +9447,18 @@ dependencies = [ "schemars", "serde 1.0.152", "serde_json", - "starcoin-account-api 1.13.5", + "starcoin-account-api", "starcoin-crypto", - "starcoin-gas 1.13.5", - "starcoin-gas-algebra-ext 1.13.5", - "starcoin-logger 1.13.5", - "starcoin-metrics 1.13.5", - "starcoin-system 1.13.5", - "starcoin-time-service 1.13.5", - "starcoin-types 1.13.5", - "starcoin-uint 1.13.5", - "starcoin-vm-types 1.13.5", - "stdlib 1.13.5", - "tempfile", - "thiserror", - "toml", -] - -[[package]] -name = "starcoin-config" -version = "1.13.5" -source = "git+https://github.com/starcoinorg/starcoin?rev=fb6fcc48fdcad3d6f54ed97e004e25099ac24000#fb6fcc48fdcad3d6f54ed97e004e25099ac24000" -dependencies = [ - "anyhow", - "clap 3.2.23", - "dirs-next", - "git-version", - "hex", - "libc", - "names", - "network-api 1.13.5 (git+https://github.com/starcoinorg/starcoin?rev=fb6fcc48fdcad3d6f54ed97e004e25099ac24000)", - "network-p2p-types 1.13.5 (git+https://github.com/starcoinorg/starcoin?rev=fb6fcc48fdcad3d6f54ed97e004e25099ac24000)", - "network-types 1.13.5 (git+https://github.com/starcoinorg/starcoin?rev=fb6fcc48fdcad3d6f54ed97e004e25099ac24000)", - "num_cpus", - "num_enum", - "once_cell", - "parking_lot 0.12.1", - "rand 0.8.5", - "rand_core 0.6.4", - "schemars", - "serde 1.0.152", - "serde_json", - "starcoin-account-api 1.13.5 (git+https://github.com/starcoinorg/starcoin?rev=fb6fcc48fdcad3d6f54ed97e004e25099ac24000)", - "starcoin-crypto", - "starcoin-gas 1.13.5 (git+https://github.com/starcoinorg/starcoin?rev=fb6fcc48fdcad3d6f54ed97e004e25099ac24000)", - "starcoin-gas-algebra-ext 1.13.5 (git+https://github.com/starcoinorg/starcoin?rev=fb6fcc48fdcad3d6f54ed97e004e25099ac24000)", - "starcoin-logger 1.13.5 (git+https://github.com/starcoinorg/starcoin?rev=fb6fcc48fdcad3d6f54ed97e004e25099ac24000)", - "starcoin-metrics 1.13.5 (git+https://github.com/starcoinorg/starcoin?rev=fb6fcc48fdcad3d6f54ed97e004e25099ac24000)", - "starcoin-system 1.13.5 (git+https://github.com/starcoinorg/starcoin?rev=fb6fcc48fdcad3d6f54ed97e004e25099ac24000)", - "starcoin-time-service 1.13.5 (git+https://github.com/starcoinorg/starcoin?rev=fb6fcc48fdcad3d6f54ed97e004e25099ac24000)", - "starcoin-types 1.13.5 (git+https://github.com/starcoinorg/starcoin?rev=fb6fcc48fdcad3d6f54ed97e004e25099ac24000)", - "starcoin-uint 1.13.5 (git+https://github.com/starcoinorg/starcoin?rev=fb6fcc48fdcad3d6f54ed97e004e25099ac24000)", - "starcoin-vm-types 1.13.5 (git+https://github.com/starcoinorg/starcoin?rev=fb6fcc48fdcad3d6f54ed97e004e25099ac24000)", - "stdlib 1.13.5 (git+https://github.com/starcoinorg/starcoin?rev=fb6fcc48fdcad3d6f54ed97e004e25099ac24000)", + "starcoin-gas", + "starcoin-gas-algebra-ext", + "starcoin-logger", + "starcoin-metrics", + "starcoin-system", + "starcoin-time-service", + "starcoin-types", + "starcoin-uint", + "starcoin-vm-types", + "stdlib", "tempfile", "thiserror", "toml", @@ -9694,11 +9482,11 @@ dependencies = [ "sha3", "starcoin-chain-api", "starcoin-crypto", - "starcoin-logger 1.13.5", + "starcoin-logger", "starcoin-state-api", - "starcoin-time-service 1.13.5", - "starcoin-types 1.13.5", - "starcoin-vm-types 1.13.5", + "starcoin-time-service", + "starcoin-types", + "starcoin-vm-types", "stest", "thiserror", ] @@ -9737,13 +9525,13 @@ name = "starcoin-dataformat-generator" version = "1.13.5" dependencies = [ "anyhow", - "bcs-ext 1.13.5", + "bcs-ext", "serde 1.0.152", "serde-reflection 0.3.2", "serde_yaml", "starcoin-crypto", - "starcoin-types 1.13.5", - "starcoin-vm-types 1.13.5", + "starcoin-types", + "starcoin-vm-types", ] [[package]] @@ -9765,18 +9553,18 @@ name = "starcoin-dev" version = "1.13.5" dependencies = [ "anyhow", - "bcs-ext 1.13.5", + "bcs-ext", "starcoin-abi-decoder", "starcoin-abi-resolver", "starcoin-abi-types", "starcoin-crypto", - "starcoin-logger 1.13.5", + "starcoin-logger", "starcoin-resource-viewer", "starcoin-rpc-api", "starcoin-state-api", "starcoin-statedb", "starcoin-vm-runtime", - "starcoin-vm-types 1.13.5", + "starcoin-vm-types", "thiserror", "vm-status-translator", ] @@ -9786,7 +9574,7 @@ name = "starcoin-executor" version = "1.13.5" dependencies = [ "anyhow", - "bcs-ext 1.13.5", + "bcs-ext", "hex", "log 0.4.17", "move-transactional-test-runner", @@ -9795,20 +9583,20 @@ dependencies = [ "serde_json", "starcoin-abi-resolver", "starcoin-abi-types", - "starcoin-config 1.13.5", + "starcoin-config", "starcoin-consensus", "starcoin-crypto", "starcoin-dev", - "starcoin-logger 1.13.5", + "starcoin-logger", "starcoin-resource-viewer", "starcoin-state-api", "starcoin-state-tree", "starcoin-statedb", "starcoin-transaction-builder", - "starcoin-types 1.13.5", + "starcoin-types", "starcoin-vm-runtime", - "starcoin-vm-types 1.13.5", - "stdlib 1.13.5", + "starcoin-vm-types", + "stdlib", "stest", "tempfile", "test-helper", @@ -9823,19 +9611,19 @@ dependencies = [ "itertools", "rand 0.8.5", "rayon", - "starcoin-accumulator 1.13.5", - "starcoin-config 1.13.5", + "starcoin-accumulator", + "starcoin-config", "starcoin-consensus", "starcoin-crypto", "starcoin-executor", "starcoin-genesis", - "starcoin-logger 1.13.5", + "starcoin-logger", "starcoin-state-api", "starcoin-statedb", - "starcoin-storage 1.13.5", + "starcoin-storage", "starcoin-transaction-builder", - "starcoin-types 1.13.5", - "starcoin-vm-types 1.13.5", + "starcoin-types", + "starcoin-vm-types", ] [[package]] @@ -9852,15 +9640,15 @@ dependencies = [ "rust-embed", "serde 1.0.152", "serde_json", - "starcoin-account-api 1.13.5", - "starcoin-config 1.13.5", + "starcoin-account-api", + "starcoin-config", "starcoin-crypto", "starcoin-executor", - "starcoin-logger 1.13.5", + "starcoin-logger", "starcoin-rpc-client", "starcoin-state-api", "starcoin-transaction-builder", - "starcoin-types 1.13.5", + "starcoin-types", "tiny_http", "tokio", "tokio-executor 0.2.0-alpha.6", @@ -9889,25 +9677,9 @@ dependencies = [ "move-stdlib", "move-table-extension", "move-vm-types", - "starcoin-gas-algebra-ext 1.13.5", - "starcoin-logger 1.13.5", - "starcoin-natives 1.13.5", -] - -[[package]] -name = "starcoin-gas" -version = "1.13.5" -source = "git+https://github.com/starcoinorg/starcoin?rev=fb6fcc48fdcad3d6f54ed97e004e25099ac24000#fb6fcc48fdcad3d6f54ed97e004e25099ac24000" -dependencies = [ - "clap 3.2.23", - "move-binary-format", - "move-core-types", - "move-stdlib", - "move-table-extension", - "move-vm-types", - "starcoin-gas-algebra-ext 1.13.5 (git+https://github.com/starcoinorg/starcoin?rev=fb6fcc48fdcad3d6f54ed97e004e25099ac24000)", - "starcoin-logger 1.13.5 (git+https://github.com/starcoinorg/starcoin?rev=fb6fcc48fdcad3d6f54ed97e004e25099ac24000)", - "starcoin-natives 1.13.5 (git+https://github.com/starcoinorg/starcoin?rev=fb6fcc48fdcad3d6f54ed97e004e25099ac24000)", + "starcoin-gas-algebra-ext", + "starcoin-logger", + "starcoin-natives", ] [[package]] @@ -9921,22 +9693,7 @@ dependencies = [ "move-vm-test-utils", "move-vm-types", "serde 1.0.152", - "starcoin-natives 1.13.5", -] - -[[package]] -name = "starcoin-gas-algebra-ext" -version = "1.13.5" -source = "git+https://github.com/starcoinorg/starcoin?rev=fb6fcc48fdcad3d6f54ed97e004e25099ac24000#fb6fcc48fdcad3d6f54ed97e004e25099ac24000" -dependencies = [ - "move-binary-format", - "move-core-types", - "move-stdlib", - "move-table-extension", - "move-vm-test-utils", - "move-vm-types", - "serde 1.0.152", - "starcoin-natives 1.13.5 (git+https://github.com/starcoinorg/starcoin?rev=fb6fcc48fdcad3d6f54ed97e004e25099ac24000)", + "starcoin-natives", ] [[package]] @@ -9952,15 +9709,15 @@ dependencies = [ "serde 1.0.152", "serde_json", "starcoin-account", - "starcoin-account-api 1.13.5", + "starcoin-account-api", "starcoin-chain", "starcoin-chain-mock", - "starcoin-config 1.13.5", + "starcoin-config", "starcoin-crypto", "starcoin-genesis", - "starcoin-logger 1.13.5", - "starcoin-storage 1.13.5", - "starcoin-types 1.13.5", + "starcoin-logger", + "starcoin-storage", + "starcoin-types", ] [[package]] @@ -9968,26 +9725,26 @@ name = "starcoin-genesis" version = "1.13.5" dependencies = [ "anyhow", - "bcs-ext 1.13.5", + "bcs-ext", "clap 3.2.23", "include_dir", "once_cell", "serde 1.0.152", - "starcoin-accumulator 1.13.5", + "starcoin-accumulator", "starcoin-chain", - "starcoin-config 1.13.5", + "starcoin-config", "starcoin-consensus", "starcoin-crypto", "starcoin-executor", - "starcoin-logger 1.13.5", + "starcoin-logger", "starcoin-state-api", "starcoin-statedb", - "starcoin-storage 1.13.5", + "starcoin-storage", "starcoin-transaction-builder", - "starcoin-types 1.13.5", + "starcoin-types", "starcoin-vm-runtime", - "starcoin-vm-types 1.13.5", - "stdlib 1.13.5", + "starcoin-vm-types", + "stdlib", "stest", "thiserror", ] @@ -10006,9 +9763,9 @@ dependencies = [ "serde 1.0.152", "serde_json", "starcoin-crypto", - "starcoin-logger 1.13.5", + "starcoin-logger", "starcoin-rpc-api", - "starcoin-types 1.13.5", + "starcoin-types", "tokio", ] @@ -10031,26 +9788,6 @@ dependencies = [ "slog-term", ] -[[package]] -name = "starcoin-logger" -version = "1.13.5" -source = "git+https://github.com/starcoinorg/starcoin?rev=fb6fcc48fdcad3d6f54ed97e004e25099ac24000#fb6fcc48fdcad3d6f54ed97e004e25099ac24000" -dependencies = [ - "anyhow", - "arc-swap", - "chrono", - "lazy_static 1.4.0", - "log 0.4.17", - "log4rs", - "once_cell", - "parking_lot 0.12.1", - "schemars", - "serde 1.0.152", - "slog", - "slog-async", - "slog-term", -] - [[package]] name = "starcoin-metrics" version = "1.13.5" @@ -10061,21 +9798,8 @@ dependencies = [ "prometheus", "psutil", "serde_json", - "starcoin-logger 1.13.5", - "timeout-join-handler 1.13.5", -] - -[[package]] -name = "starcoin-metrics" -version = "1.13.5" -source = "git+https://github.com/starcoinorg/starcoin?rev=fb6fcc48fdcad3d6f54ed97e004e25099ac24000#fb6fcc48fdcad3d6f54ed97e004e25099ac24000" -dependencies = [ - "anyhow", - "prometheus", - "psutil", - "serde_json", - "starcoin-logger 1.13.5 (git+https://github.com/starcoinorg/starcoin?rev=fb6fcc48fdcad3d6f54ed97e004e25099ac24000)", - "timeout-join-handler 1.13.5 (git+https://github.com/starcoinorg/starcoin?rev=fb6fcc48fdcad3d6f54ed97e004e25099ac24000)", + "starcoin-logger", + "timeout-join-handler", ] [[package]] @@ -10083,39 +9807,39 @@ name = "starcoin-miner" version = "1.13.5" dependencies = [ "anyhow", - "bcs-ext 1.13.5", + "bcs-ext", "futures 0.3.26", "futures-timer", "hex", "once_cell", "parking_lot 0.12.1", "serde 1.0.152", - "starcoin-account-api 1.13.5", + "starcoin-account-api", "starcoin-account-service", - "starcoin-accumulator 1.13.5", + "starcoin-accumulator", "starcoin-chain", - "starcoin-config 1.13.5", + "starcoin-config", "starcoin-consensus", "starcoin-crypto", "starcoin-executor", "starcoin-genesis", - "starcoin-logger 1.13.5", - "starcoin-metrics 1.13.5", + "starcoin-logger", + "starcoin-metrics", "starcoin-network-rpc", "starcoin-network-rpc-api", "starcoin-node", "starcoin-open-block", - "starcoin-service-registry 1.13.5", + "starcoin-service-registry", "starcoin-state-api", "starcoin-state-service", - "starcoin-storage 1.13.5", + "starcoin-storage", "starcoin-sync", "starcoin-sync-api", - "starcoin-time-service 1.13.5", + "starcoin-time-service", "starcoin-txpool", "starcoin-txpool-api", - "starcoin-types 1.13.5", - "starcoin-vm-types 1.13.5", + "starcoin-types", + "starcoin-vm-types", "stest", "test-helper", "thiserror", @@ -10148,18 +9872,18 @@ dependencies = [ "rust-argon2", "serde 1.0.152", "serde_json", - "starcoin-config 1.13.5", + "starcoin-config", "starcoin-consensus", "starcoin-crypto", - "starcoin-logger 1.13.5", + "starcoin-logger", "starcoin-miner", "starcoin-miner-client-api", "starcoin-rpc-api", "starcoin-rpc-client", - "starcoin-service-registry 1.13.5", + "starcoin-service-registry", "starcoin-stratum", - "starcoin-time-service 1.13.5", - "starcoin-types 1.13.5", + "starcoin-time-service", + "starcoin-types", "stest", "thiserror", "tokio", @@ -10173,7 +9897,7 @@ dependencies = [ "async-trait", "dyn-clone", "futures 0.3.26", - "starcoin-types 1.13.5", + "starcoin-types", ] [[package]] @@ -10188,40 +9912,21 @@ dependencies = [ "petgraph 0.5.1", "regex", "starcoin-crypto", - "starcoin-logger 1.13.5", - "starcoin-vm-types 1.13.5", + "starcoin-logger", + "starcoin-vm-types", "stest", "tempfile", "walkdir", ] -[[package]] -name = "starcoin-move-compiler" -version = "1.13.5" -source = "git+https://github.com/starcoinorg/starcoin?rev=fb6fcc48fdcad3d6f54ed97e004e25099ac24000#fb6fcc48fdcad3d6f54ed97e004e25099ac24000" -dependencies = [ - "anyhow", - "move-binary-format", - "move-command-line-common", - "move-compiler", - "once_cell", - "petgraph 0.5.1", - "regex", - "starcoin-crypto", - "starcoin-logger 1.13.5 (git+https://github.com/starcoinorg/starcoin?rev=fb6fcc48fdcad3d6f54ed97e004e25099ac24000)", - "starcoin-vm-types 1.13.5 (git+https://github.com/starcoinorg/starcoin?rev=fb6fcc48fdcad3d6f54ed97e004e25099ac24000)", - "tempfile", - "walkdir", -] - [[package]] name = "starcoin-move-explain" version = "1.13.5" dependencies = [ - "bcs-ext 1.13.5", + "bcs-ext", "clap 3.2.23", "move-core-types", - "stdlib 1.13.5", + "stdlib", ] [[package]] @@ -10274,33 +9979,7 @@ dependencies = [ "ripemd160", "smallvec 1.10.0", "starcoin-crypto", - "starcoin-uint 1.13.5", - "tiny-keccak", - "walkdir", -] - -[[package]] -name = "starcoin-natives" -version = "1.13.5" -source = "git+https://github.com/starcoinorg/starcoin?rev=fb6fcc48fdcad3d6f54ed97e004e25099ac24000#fb6fcc48fdcad3d6f54ed97e004e25099ac24000" -dependencies = [ - "arrayref", - "libsecp256k1", - "log 0.4.17", - "move-binary-format", - "move-command-line-common", - "move-core-types", - "move-docgen", - "move-errmapgen", - "move-prover", - "move-stdlib", - "move-vm-runtime", - "move-vm-types", - "num_enum", - "ripemd160", - "smallvec 1.10.0", - "starcoin-crypto", - "starcoin-uint 1.13.5 (git+https://github.com/starcoinorg/starcoin?rev=fb6fcc48fdcad3d6f54ed97e004e25099ac24000)", + "starcoin-uint", "tiny-keccak", "walkdir", ] @@ -10312,7 +9991,7 @@ dependencies = [ "anyhow", "async-std", "async-trait", - "bcs-ext 1.13.5", + "bcs-ext", "bitflags", "bytes 1.4.0", "derive_more", @@ -10322,26 +10001,26 @@ dependencies = [ "hex", "log 0.4.17", "lru 0.7.8", - "network-api 1.13.5", + "network-api", "network-p2p", "network-p2p-core", - "network-p2p-types 1.13.5", + "network-p2p-types", "parking_lot 0.12.1", "prometheus", "rand 0.8.5", - "sc-peerset 1.13.5", + "sc-peerset", "serde 1.0.152", "serde_json", - "starcoin-config 1.13.5", + "starcoin-config", "starcoin-crypto", - "starcoin-logger 1.13.5", - "starcoin-metrics 1.13.5", + "starcoin-logger", + "starcoin-metrics", "starcoin-network-rpc", "starcoin-network-rpc-api", - "starcoin-service-registry 1.13.5", - "starcoin-storage 1.13.5", + "starcoin-service-registry", + "starcoin-storage", "starcoin-txpool-api", - "starcoin-types 1.13.5", + "starcoin-types", "stest", "tempfile", "test-helper", @@ -10355,41 +10034,41 @@ version = "1.13.5" dependencies = [ "anyhow", "api-limiter", - "bcs-ext 1.13.5", + "bcs-ext", "bytes 1.4.0", "futures 0.3.26", "futures-timer", "hex", - "network-api 1.13.5", + "network-api", "network-p2p-core", "network-p2p-derive", - "network-p2p-types 1.13.5", + "network-p2p-types", "once_cell", "prometheus", "serde 1.0.152", "serde_json", - "starcoin-account-api 1.13.5", - "starcoin-accumulator 1.13.5", + "starcoin-account-api", + "starcoin-accumulator", "starcoin-block-relayer", "starcoin-chain-service", - "starcoin-config 1.13.5", + "starcoin-config", "starcoin-consensus", "starcoin-crypto", - "starcoin-logger 1.13.5", + "starcoin-logger", "starcoin-miner", "starcoin-network", "starcoin-network-rpc-api", "starcoin-node", - "starcoin-service-registry 1.13.5", + "starcoin-service-registry", "starcoin-state-api", "starcoin-state-service", "starcoin-state-tree", "starcoin-statedb", - "starcoin-storage 1.13.5", + "starcoin-storage", "starcoin-txpool", "starcoin-txpool-api", - "starcoin-types 1.13.5", - "starcoin-vm-types 1.13.5", + "starcoin-types", + "starcoin-vm-types", "stest", "test-helper", "tokio", @@ -10400,22 +10079,21 @@ name = "starcoin-network-rpc-api" version = "1.13.5" dependencies = [ "anyhow", - "bcs-ext 1.13.5", - "consensus-types", + "bcs-ext", "futures 0.3.26", "network-p2p-core", "network-p2p-derive", - "network-p2p-types 1.13.5", - "network-types 1.13.5", + "network-p2p-types", + "network-types", "once_cell", "serde 1.0.152", - "starcoin-accumulator 1.13.5", + "starcoin-accumulator", "starcoin-crypto", - "starcoin-logger 1.13.5", + "starcoin-logger", "starcoin-state-api", "starcoin-state-tree", - "starcoin-types 1.13.5", - "starcoin-vm-types 1.13.5", + "starcoin-types", + "starcoin-vm-types", ] [[package]] @@ -10431,22 +10109,22 @@ dependencies = [ "chrono", "futures 0.3.26", "futures-timer", - "network-api 1.13.5", + "network-api", "network-p2p-core", "serde_json", - "starcoin-account-api 1.13.5", + "starcoin-account-api", "starcoin-account-service", "starcoin-block-relayer", "starcoin-chain-notify", "starcoin-chain-service", - "starcoin-config 1.13.5", + "starcoin-config", "starcoin-consensus", "starcoin-crypto", "starcoin-dev", "starcoin-executor", "starcoin-genesis", - "starcoin-logger 1.13.5", - "starcoin-metrics 1.13.5", + "starcoin-logger", + "starcoin-metrics", "starcoin-miner", "starcoin-miner-client", "starcoin-network", @@ -10455,21 +10133,21 @@ dependencies = [ "starcoin-node-api", "starcoin-rpc-client", "starcoin-rpc-server", - "starcoin-service-registry 1.13.5", + "starcoin-service-registry", "starcoin-state-api", "starcoin-state-service", "starcoin-statedb", - "starcoin-storage 1.13.5", + "starcoin-storage", "starcoin-stratum", "starcoin-sync", "starcoin-sync-api", "starcoin-txpool", "starcoin-txpool-api", - "starcoin-types 1.13.5", + "starcoin-types", "starcoin-vm-runtime", "stest", "thiserror", - "timeout-join-handler 1.13.5", + "timeout-join-handler", "tokio", ] @@ -10482,14 +10160,14 @@ dependencies = [ "backtrace", "futures 0.3.26", "serde 1.0.152", - "starcoin-config 1.13.5", + "starcoin-config", "starcoin-consensus", "starcoin-crypto", "starcoin-genesis", - "starcoin-logger 1.13.5", - "starcoin-service-registry 1.13.5", - "starcoin-storage 1.13.5", - "starcoin-types 1.13.5", + "starcoin-logger", + "starcoin-service-registry", + "starcoin-storage", + "starcoin-types", "stest", "thiserror", ] @@ -10500,19 +10178,19 @@ version = "1.13.5" dependencies = [ "anyhow", "async-trait", - "bcs-ext 1.13.5", + "bcs-ext", "futures 0.3.26", "parking_lot 0.12.1", - "starcoin-accumulator 1.13.5", + "starcoin-accumulator", "starcoin-chain-api", "starcoin-crypto", "starcoin-executor", "starcoin-genesis", - "starcoin-logger 1.13.5", + "starcoin-logger", "starcoin-state-api", "starcoin-statedb", - "starcoin-storage 1.13.5", - "starcoin-types 1.13.5", + "starcoin-storage", + "starcoin-types", "stest", ] @@ -10522,19 +10200,19 @@ version = "1.13.5" dependencies = [ "anyhow", "async-std", - "bcs-ext 1.13.5", + "bcs-ext", "clap 3.2.23", "futures 0.3.26", "network-p2p", - "network-p2p-types 1.13.5", - "network-types 1.13.5", - "starcoin-config 1.13.5", + "network-p2p-types", + "network-types", + "starcoin-config", "starcoin-crypto", "starcoin-genesis", - "starcoin-logger 1.13.5", + "starcoin-logger", "starcoin-network", - "starcoin-storage 1.13.5", - "starcoin-types 1.13.5", + "starcoin-storage", + "starcoin-types", ] [[package]] @@ -10543,14 +10221,14 @@ version = "1.13.5" dependencies = [ "anyhow", "clap 3.2.23", - "sp-utils 1.13.5", + "sp-utils", "starcoin-chain", - "starcoin-config 1.13.5", + "starcoin-config", "starcoin-genesis", - "starcoin-logger 1.13.5", - "starcoin-storage 1.13.5", - "starcoin-types 1.13.5", - "starcoin-vm-types 1.13.5", + "starcoin-logger", + "starcoin-storage", + "starcoin-types", + "starcoin-vm-types", ] [[package]] @@ -10563,7 +10241,7 @@ dependencies = [ "move-core-types", "serde 1.0.152", "serde_json", - "starcoin-vm-types 1.13.5", + "starcoin-vm-types", ] [[package]] @@ -10572,7 +10250,7 @@ version = "1.13.5" dependencies = [ "anyhow", "async-trait", - "bcs-ext 1.13.5", + "bcs-ext", "clap 3.2.23", "futures 0.3.26", "hex", @@ -10582,9 +10260,9 @@ dependencies = [ "jsonrpc-pubsub", "jsonrpc-server-utils", "move-core-types", - "network-api 1.13.5", - "network-p2p-types 1.13.5", - "network-types 1.13.5", + "network-api", + "network-p2p-types", + "network-types", "openrpc-derive", "openrpc-schema", "schemars", @@ -10593,19 +10271,19 @@ dependencies = [ "serde_json", "starcoin-abi-decoder", "starcoin-abi-types", - "starcoin-account-api 1.13.5", - "starcoin-accumulator 1.13.5", + "starcoin-account-api", + "starcoin-accumulator", "starcoin-chain-api", - "starcoin-config 1.13.5", + "starcoin-config", "starcoin-crypto", - "starcoin-logger 1.13.5", + "starcoin-logger", "starcoin-resource-viewer", - "starcoin-service-registry 1.13.5", + "starcoin-service-registry", "starcoin-state-api", "starcoin-sync-api", "starcoin-txpool-api", - "starcoin-types 1.13.5", - "starcoin-vm-types 1.13.5", + "starcoin-types", + "starcoin-vm-types", "thiserror", "vm-status-translator", ] @@ -10618,7 +10296,7 @@ dependencies = [ "actix-rt", "anyhow", "async-std", - "bcs-ext 1.13.5", + "bcs-ext", "futures 0.3.26", "futures-timer", "hex", @@ -10629,27 +10307,27 @@ dependencies = [ "jsonrpc-pubsub", "jsonrpc-server-utils", "log 0.4.17", - "network-api 1.13.5", - "network-p2p-types 1.13.5", - "network-types 1.13.5", + "network-api", + "network-p2p-types", + "network-types", "parity-tokio-ipc", "parking_lot 0.12.1", "serde 1.0.152", "serde_json", "starcoin-abi-types", - "starcoin-account-api 1.13.5", - "starcoin-config 1.13.5", + "starcoin-account-api", + "starcoin-config", "starcoin-crypto", - "starcoin-logger 1.13.5", + "starcoin-logger", "starcoin-rpc-api", "starcoin-rpc-server", - "starcoin-service-registry 1.13.5", + "starcoin-service-registry", "starcoin-state-api", "starcoin-state-tree", "starcoin-sync-api", "starcoin-txpool-api", - "starcoin-types 1.13.5", - "starcoin-vm-types 1.13.5", + "starcoin-types", + "starcoin-vm-types", "stest", "test-helper", "thiserror", @@ -10666,9 +10344,9 @@ dependencies = [ "once_cell", "rand 0.8.5", "serde_json", - "starcoin-config 1.13.5", - "starcoin-logger 1.13.5", - "starcoin-metrics 1.13.5", + "starcoin-config", + "starcoin-logger", + "starcoin-metrics", "starcoin-rpc-api", "stest", "thiserror", @@ -10683,7 +10361,7 @@ dependencies = [ "anyhow", "api-limiter", "bcs", - "bcs-ext 1.13.5", + "bcs-ext", "dashmap", "futures 0.3.26", "futures-channel", @@ -10699,30 +10377,30 @@ dependencies = [ "jsonrpc-tcp-server", "jsonrpc-ws-server", "log 0.4.17", - "network-api 1.13.5", + "network-api", "network-p2p-core", - "network-p2p-types 1.13.5", - "network-types 1.13.5", + "network-p2p-types", + "network-types", "parking_lot 0.12.1", "serde 1.0.152", "serde_json", "starcoin-abi-decoder", "starcoin-abi-resolver", "starcoin-abi-types", - "starcoin-account-api 1.13.5", + "starcoin-account-api", "starcoin-account-service", "starcoin-chain", "starcoin-chain-mock", "starcoin-chain-notify", "starcoin-chain-service", - "starcoin-config 1.13.5", + "starcoin-config", "starcoin-consensus", "starcoin-crypto", "starcoin-dev", "starcoin-executor", "starcoin-genesis", - "starcoin-logger 1.13.5", - "starcoin-metrics 1.13.5", + "starcoin-logger", + "starcoin-metrics", "starcoin-miner", "starcoin-network", "starcoin-node-api", @@ -10730,19 +10408,19 @@ dependencies = [ "starcoin-rpc-api", "starcoin-rpc-client", "starcoin-rpc-middleware", - "starcoin-service-registry 1.13.5", + "starcoin-service-registry", "starcoin-state-api", "starcoin-state-service", "starcoin-state-tree", "starcoin-statedb", - "starcoin-storage 1.13.5", + "starcoin-storage", "starcoin-sync-api", "starcoin-transaction-builder", "starcoin-txpool", "starcoin-txpool-api", "starcoin-txpool-mock-service", - "starcoin-types 1.13.5", - "starcoin-vm-types 1.13.5", + "starcoin-types", + "starcoin-vm-types", "stest", "test-helper", "thiserror", @@ -10770,40 +10448,21 @@ dependencies = [ "tokio", ] -[[package]] -name = "starcoin-service-registry" -version = "1.13.5" -source = "git+https://github.com/starcoinorg/starcoin?rev=fb6fcc48fdcad3d6f54ed97e004e25099ac24000#fb6fcc48fdcad3d6f54ed97e004e25099ac24000" -dependencies = [ - "actix", - "actix-rt", - "anyhow", - "async-trait", - "futures 0.3.26", - "futures-timer", - "log 0.4.17", - "once_cell", - "schemars", - "serde 1.0.152", - "thiserror", - "tokio", -] - [[package]] name = "starcoin-state-api" version = "1.13.5" dependencies = [ "anyhow", "async-trait", - "bcs-ext 1.13.5", - "forkable-jellyfish-merkle 1.13.5", + "bcs-ext", + "forkable-jellyfish-merkle", "once_cell", "serde 1.0.152", "starcoin-crypto", - "starcoin-service-registry 1.13.5", + "starcoin-service-registry", "starcoin-state-tree", - "starcoin-types 1.13.5", - "starcoin-vm-types 1.13.5", + "starcoin-types", + "starcoin-vm-types", ] [[package]] @@ -10813,16 +10472,16 @@ dependencies = [ "anyhow", "async-trait", "futures 0.3.26", - "starcoin-config 1.13.5", + "starcoin-config", "starcoin-crypto", - "starcoin-logger 1.13.5", - "starcoin-service-registry 1.13.5", + "starcoin-logger", + "starcoin-service-registry", "starcoin-state-api", "starcoin-state-tree", "starcoin-statedb", - "starcoin-storage 1.13.5", - "starcoin-types 1.13.5", - "starcoin-vm-types 1.13.5", + "starcoin-storage", + "starcoin-types", + "starcoin-vm-types", "stest", "test-helper", "tokio", @@ -10833,18 +10492,7 @@ name = "starcoin-state-store-api" version = "1.13.5" dependencies = [ "anyhow", - "forkable-jellyfish-merkle 1.13.5", - "serde 1.0.152", - "starcoin-crypto", -] - -[[package]] -name = "starcoin-state-store-api" -version = "1.13.5" -source = "git+https://github.com/starcoinorg/starcoin?rev=fb6fcc48fdcad3d6f54ed97e004e25099ac24000#fb6fcc48fdcad3d6f54ed97e004e25099ac24000" -dependencies = [ - "anyhow", - "forkable-jellyfish-merkle 1.13.5 (git+https://github.com/starcoinorg/starcoin?rev=fb6fcc48fdcad3d6f54ed97e004e25099ac24000)", + "forkable-jellyfish-merkle", "serde 1.0.152", "starcoin-crypto", ] @@ -10854,17 +10502,17 @@ name = "starcoin-state-tree" version = "1.13.5" dependencies = [ "anyhow", - "bcs-ext 1.13.5", - "forkable-jellyfish-merkle 1.13.5", + "bcs-ext", + "forkable-jellyfish-merkle", "parking_lot 0.12.1", "serde 1.0.152", - "starcoin-config 1.13.5", + "starcoin-config", "starcoin-crypto", - "starcoin-logger 1.13.5", - "starcoin-state-store-api 1.13.5", - "starcoin-storage 1.13.5", - "starcoin-types 1.13.5", - "starcoin-vm-types 1.13.5", + "starcoin-logger", + "starcoin-state-store-api", + "starcoin-storage", + "starcoin-types", + "starcoin-vm-types", ] [[package]] @@ -10872,17 +10520,17 @@ name = "starcoin-statedb" version = "1.13.5" dependencies = [ "anyhow", - "bcs-ext 1.13.5", - "forkable-jellyfish-merkle 1.13.5", + "bcs-ext", + "forkable-jellyfish-merkle", "lru 0.7.8", "parking_lot 0.12.1", "serde 1.0.152", "starcoin-crypto", - "starcoin-logger 1.13.5", + "starcoin-logger", "starcoin-state-api", "starcoin-state-tree", - "starcoin-types 1.13.5", - "starcoin-vm-types 1.13.5", + "starcoin-types", + "starcoin-vm-types", "thiserror", ] @@ -10891,14 +10539,14 @@ name = "starcoin-storage" version = "1.13.5" dependencies = [ "anyhow", - "bcs-ext 1.13.5", + "bcs-ext", "byteorder", "chrono", "coarsetime", - "forkable-jellyfish-merkle 1.13.5", + "forkable-jellyfish-merkle", "lru 0.7.8", - "network-p2p-types 1.13.5", - "network-types 1.13.5", + "network-p2p-types", + "network-types", "num_enum", "once_cell", "parking_lot 0.12.1", @@ -10907,50 +10555,19 @@ dependencies = [ "rand 0.8.5", "rocksdb", "serde 1.0.152", - "starcoin-accumulator 1.13.5", - "starcoin-config 1.13.5", + "starcoin-accumulator", + "starcoin-config", "starcoin-crypto", - "starcoin-logger 1.13.5", - "starcoin-metrics 1.13.5", - "starcoin-state-store-api 1.13.5", - "starcoin-types 1.13.5", - "starcoin-uint 1.13.5", - "starcoin-vm-types 1.13.5", + "starcoin-logger", + "starcoin-metrics", + "starcoin-state-store-api", + "starcoin-types", + "starcoin-uint", + "starcoin-vm-types", "stest", "thiserror", ] -[[package]] -name = "starcoin-storage" -version = "1.13.5" -source = "git+https://github.com/starcoinorg/starcoin?rev=fb6fcc48fdcad3d6f54ed97e004e25099ac24000#fb6fcc48fdcad3d6f54ed97e004e25099ac24000" -dependencies = [ - "anyhow", - "bcs-ext 1.13.5 (git+https://github.com/starcoinorg/starcoin?rev=fb6fcc48fdcad3d6f54ed97e004e25099ac24000)", - "byteorder", - "chrono", - "coarsetime", - "forkable-jellyfish-merkle 1.13.5 (git+https://github.com/starcoinorg/starcoin?rev=fb6fcc48fdcad3d6f54ed97e004e25099ac24000)", - "lru 0.7.8", - "network-p2p-types 1.13.5 (git+https://github.com/starcoinorg/starcoin?rev=fb6fcc48fdcad3d6f54ed97e004e25099ac24000)", - "network-types 1.13.5 (git+https://github.com/starcoinorg/starcoin?rev=fb6fcc48fdcad3d6f54ed97e004e25099ac24000)", - "num_enum", - "once_cell", - "parking_lot 0.12.1", - "rocksdb", - "serde 1.0.152", - "starcoin-accumulator 1.13.5 (git+https://github.com/starcoinorg/starcoin?rev=fb6fcc48fdcad3d6f54ed97e004e25099ac24000)", - "starcoin-config 1.13.5 (git+https://github.com/starcoinorg/starcoin?rev=fb6fcc48fdcad3d6f54ed97e004e25099ac24000)", - "starcoin-crypto", - "starcoin-logger 1.13.5 (git+https://github.com/starcoinorg/starcoin?rev=fb6fcc48fdcad3d6f54ed97e004e25099ac24000)", - "starcoin-metrics 1.13.5 (git+https://github.com/starcoinorg/starcoin?rev=fb6fcc48fdcad3d6f54ed97e004e25099ac24000)", - "starcoin-state-store-api 1.13.5 (git+https://github.com/starcoinorg/starcoin?rev=fb6fcc48fdcad3d6f54ed97e004e25099ac24000)", - "starcoin-types 1.13.5 (git+https://github.com/starcoinorg/starcoin?rev=fb6fcc48fdcad3d6f54ed97e004e25099ac24000)", - "starcoin-uint 1.13.5 (git+https://github.com/starcoinorg/starcoin?rev=fb6fcc48fdcad3d6f54ed97e004e25099ac24000)", - "starcoin-vm-types 1.13.5 (git+https://github.com/starcoinorg/starcoin?rev=fb6fcc48fdcad3d6f54ed97e004e25099ac24000)", - "thiserror", -] - [[package]] name = "starcoin-stratum" version = "1.13.5" @@ -10966,12 +10583,12 @@ dependencies = [ "jsonrpc-tcp-server", "serde 1.0.152", "serde_json", - "starcoin-config 1.13.5", + "starcoin-config", "starcoin-crypto", - "starcoin-logger 1.13.5", + "starcoin-logger", "starcoin-miner", - "starcoin-service-registry 1.13.5", - "starcoin-types 1.13.5", + "starcoin-service-registry", + "starcoin-types", "stest", ] @@ -10982,52 +10599,52 @@ dependencies = [ "anyhow", "async-std", "async-trait", - "bcs-ext 1.13.5", - "forkable-jellyfish-merkle 1.13.5", + "bcs-ext", + "forkable-jellyfish-merkle", "futures 0.3.26", "futures-retry", "futures-timer", "hex", "itertools", - "network-api 1.13.5", + "network-api", "network-p2p-core", "parking_lot 0.12.1", "pin-project 0.4.30", "pin-utils", "rand 0.8.5", - "starcoin-account-api 1.13.5", - "starcoin-accumulator 1.13.5", + "starcoin-account-api", + "starcoin-accumulator", "starcoin-block-relayer", "starcoin-chain", "starcoin-chain-api", "starcoin-chain-mock", "starcoin-chain-service", - "starcoin-config 1.13.5", + "starcoin-config", "starcoin-consensus", "starcoin-crypto", "starcoin-executor", "starcoin-genesis", - "starcoin-logger 1.13.5", - "starcoin-metrics 1.13.5", + "starcoin-logger", + "starcoin-metrics", "starcoin-miner", "starcoin-network", "starcoin-network-rpc", "starcoin-network-rpc-api", "starcoin-node", - "starcoin-service-registry 1.13.5", + "starcoin-service-registry", "starcoin-state-api", "starcoin-state-service", "starcoin-state-tree", "starcoin-statedb", - "starcoin-storage 1.13.5", + "starcoin-storage", "starcoin-sync-api", - "starcoin-time-service 1.13.5", + "starcoin-time-service", "starcoin-transaction-builder", "starcoin-txpool", "starcoin-txpool-api", "starcoin-txpool-mock-service", - "starcoin-types 1.13.5", - "starcoin-vm-types 1.13.5", + "starcoin-types", + "starcoin-vm-types", "stest", "stream-task", "sysinfo", @@ -11042,14 +10659,14 @@ version = "1.13.5" dependencies = [ "anyhow", "async-trait", - "network-api 1.13.5", + "network-api", "schemars", "serde 1.0.152", - "starcoin-accumulator 1.13.5", + "starcoin-accumulator", "starcoin-crypto", - "starcoin-logger 1.13.5", - "starcoin-service-registry 1.13.5", - "starcoin-types 1.13.5", + "starcoin-logger", + "starcoin-service-registry", + "starcoin-types", "stream-task", ] @@ -11061,15 +10678,6 @@ dependencies = [ "systemstat", ] -[[package]] -name = "starcoin-system" -version = "1.13.5" -source = "git+https://github.com/starcoinorg/starcoin?rev=fb6fcc48fdcad3d6f54ed97e004e25099ac24000#fb6fcc48fdcad3d6f54ed97e004e25099ac24000" -dependencies = [ - "anyhow", - "systemstat", -] - [[package]] name = "starcoin-time-service" version = "1.13.5" @@ -11078,26 +10686,17 @@ dependencies = [ "serde 1.0.152", ] -[[package]] -name = "starcoin-time-service" -version = "1.13.5" -source = "git+https://github.com/starcoinorg/starcoin?rev=fb6fcc48fdcad3d6f54ed97e004e25099ac24000#fb6fcc48fdcad3d6f54ed97e004e25099ac24000" -dependencies = [ - "log 0.4.17", - "serde 1.0.152", -] - [[package]] name = "starcoin-transaction-builder" version = "1.13.5" dependencies = [ "anyhow", - "bcs-ext 1.13.5", - "starcoin-config 1.13.5", + "bcs-ext", + "starcoin-config", "starcoin-crypto", - "starcoin-logger 1.13.5", - "starcoin-vm-types 1.13.5", - "stdlib 1.13.5", + "starcoin-logger", + "starcoin-vm-types", + "stdlib", "stest", ] @@ -11108,7 +10707,7 @@ dependencies = [ "anyhow", "async-trait", "bcs", - "bcs-ext 1.13.5", + "bcs-ext", "clap 3.2.23", "dashmap", "datatest-stable 0.1.3", @@ -11133,9 +10732,9 @@ dependencies = [ "serde 1.0.152", "serde_json", "starcoin-abi-decoder", - "starcoin-accumulator 1.13.5", + "starcoin-accumulator", "starcoin-chain-api", - "starcoin-config 1.13.5", + "starcoin-config", "starcoin-crypto", "starcoin-dev", "starcoin-genesis", @@ -11146,11 +10745,11 @@ dependencies = [ "starcoin-state-api", "starcoin-state-tree", "starcoin-statedb", - "starcoin-storage 1.13.5", - "starcoin-types 1.13.5", + "starcoin-storage", + "starcoin-types", "starcoin-vm-runtime", - "starcoin-vm-types 1.13.5", - "stdlib 1.13.5", + "starcoin-vm-types", + "stdlib", "tempfile", "tokio", ] @@ -11163,16 +10762,16 @@ dependencies = [ "clap 3.2.23", "ctrlc", "futures 0.3.26", - "starcoin-account-api 1.13.5", - "starcoin-config 1.13.5", + "starcoin-account-api", + "starcoin-config", "starcoin-crypto", "starcoin-executor", - "starcoin-logger 1.13.5", + "starcoin-logger", "starcoin-rpc-api", "starcoin-rpc-client", "starcoin-state-api", "starcoin-transaction-builder", - "starcoin-types 1.13.5", + "starcoin-types", "tokio", ] @@ -11182,12 +10781,12 @@ version = "1.13.5" dependencies = [ "anyhow", "async-trait", - "forkable-jellyfish-merkle 1.13.5", + "forkable-jellyfish-merkle", "futures 0.3.26", "futures-channel", "linked-hash-map", "log 0.4.17", - "network-api 1.13.5", + "network-api", "parking_lot 0.12.1", "proptest", "proptest-derive", @@ -11195,22 +10794,22 @@ dependencies = [ "rand_core 0.6.4", "serde 1.0.152", "serde_derive", - "starcoin-config 1.13.5", + "starcoin-config", "starcoin-crypto", "starcoin-executor", "starcoin-genesis", - "starcoin-logger 1.13.5", - "starcoin-metrics 1.13.5", + "starcoin-logger", + "starcoin-metrics", "starcoin-open-block", - "starcoin-service-registry 1.13.5", + "starcoin-service-registry", "starcoin-state-api", "starcoin-state-tree", "starcoin-statedb", - "starcoin-storage 1.13.5", + "starcoin-storage", "starcoin-transaction-builder", "starcoin-txpool-api", - "starcoin-types 1.13.5", - "stdlib 1.13.5", + "starcoin-types", + "stdlib", "stest", "tempfile", "test-helper", @@ -11229,7 +10828,7 @@ dependencies = [ "schemars", "serde 1.0.152", "starcoin-crypto", - "starcoin-types 1.13.5", + "starcoin-types", ] [[package]] @@ -11241,7 +10840,7 @@ dependencies = [ "futures-channel", "starcoin-crypto", "starcoin-txpool-api", - "starcoin-types 1.13.5", + "starcoin-types", "stest", "tokio", ] @@ -11251,11 +10850,10 @@ name = "starcoin-types" version = "1.13.5" dependencies = [ "anyhow", - "bcs-ext 1.13.5", + "bcs-ext", "byteorder", "bytes 1.4.0", - "consensus-types", - "forkable-jellyfish-merkle 1.13.5", + "forkable-jellyfish-merkle", "hex", "num_enum", "proptest", @@ -11265,34 +10863,10 @@ dependencies = [ "schemars", "serde 1.0.152", "serde_json", - "starcoin-accumulator 1.13.5", - "starcoin-crypto", - "starcoin-uint 1.13.5", - "starcoin-vm-types 1.13.5", - "thiserror", -] - -[[package]] -name = "starcoin-types" -version = "1.13.5" -source = "git+https://github.com/starcoinorg/starcoin?rev=fb6fcc48fdcad3d6f54ed97e004e25099ac24000#fb6fcc48fdcad3d6f54ed97e004e25099ac24000" -dependencies = [ - "anyhow", - "bcs-ext 1.13.5 (git+https://github.com/starcoinorg/starcoin?rev=fb6fcc48fdcad3d6f54ed97e004e25099ac24000)", - "byteorder", - "bytes 1.4.0", - "forkable-jellyfish-merkle 1.13.5 (git+https://github.com/starcoinorg/starcoin?rev=fb6fcc48fdcad3d6f54ed97e004e25099ac24000)", - "hex", - "num_enum", - "rand 0.8.5", - "rand_core 0.6.4", - "schemars", - "serde 1.0.152", - "serde_json", - "starcoin-accumulator 1.13.5 (git+https://github.com/starcoinorg/starcoin?rev=fb6fcc48fdcad3d6f54ed97e004e25099ac24000)", + "starcoin-accumulator", "starcoin-crypto", - "starcoin-uint 1.13.5 (git+https://github.com/starcoinorg/starcoin?rev=fb6fcc48fdcad3d6f54ed97e004e25099ac24000)", - "starcoin-vm-types 1.13.5 (git+https://github.com/starcoinorg/starcoin?rev=fb6fcc48fdcad3d6f54ed97e004e25099ac24000)", + "starcoin-uint", + "starcoin-vm-types", "thiserror", ] @@ -11300,23 +10874,12 @@ dependencies = [ name = "starcoin-uint" version = "1.13.5" dependencies = [ - "bcs-ext 1.13.5", + "bcs-ext", "hex", "serde 1.0.152", "serde_json", "starcoin-crypto", - "starcoin-types 1.13.5", - "uint 0.9.5", -] - -[[package]] -name = "starcoin-uint" -version = "1.13.5" -source = "git+https://github.com/starcoinorg/starcoin?rev=fb6fcc48fdcad3d6f54ed97e004e25099ac24000#fb6fcc48fdcad3d6f54ed97e004e25099ac24000" -dependencies = [ - "hex", - "serde 1.0.152", - "starcoin-crypto", + "starcoin-types", "uint 0.9.5", ] @@ -11325,7 +10888,7 @@ name = "starcoin-vm-runtime" version = "1.13.5" dependencies = [ "anyhow", - "bcs-ext 1.13.5", + "bcs-ext", "move-core-types", "move-stdlib", "move-table-extension", @@ -11335,16 +10898,16 @@ dependencies = [ "rand 0.8.5", "rand_core 0.6.4", "serde 1.0.152", - "starcoin-config 1.13.5", + "starcoin-config", "starcoin-crypto", - "starcoin-gas 1.13.5", - "starcoin-gas-algebra-ext 1.13.5", - "starcoin-logger 1.13.5", - "starcoin-metrics 1.13.5", - "starcoin-natives 1.13.5", - "starcoin-types 1.13.5", - "starcoin-vm-types 1.13.5", - "stdlib 1.13.5", + "starcoin-gas", + "starcoin-gas-algebra-ext", + "starcoin-logger", + "starcoin-metrics", + "starcoin-natives", + "starcoin-types", + "starcoin-vm-types", + "stdlib", "tracing", ] @@ -11353,10 +10916,10 @@ name = "starcoin-vm-types" version = "1.13.5" dependencies = [ "anyhow", - "bcs-ext 1.13.5", + "bcs-ext", "bech32", "chrono", - "forkable-jellyfish-merkle 1.13.5", + "forkable-jellyfish-merkle", "hex", "log 0.4.17", "mirai-annotations", @@ -11375,41 +10938,10 @@ dependencies = [ "serde 1.0.152", "serde_bytes", "serde_json", - "starcoin-accumulator 1.13.5", + "starcoin-accumulator", "starcoin-crypto", - "starcoin-gas-algebra-ext 1.13.5", - "starcoin-time-service 1.13.5", -] - -[[package]] -name = "starcoin-vm-types" -version = "1.13.5" -source = "git+https://github.com/starcoinorg/starcoin?rev=fb6fcc48fdcad3d6f54ed97e004e25099ac24000#fb6fcc48fdcad3d6f54ed97e004e25099ac24000" -dependencies = [ - "anyhow", - "bcs-ext 1.13.5 (git+https://github.com/starcoinorg/starcoin?rev=fb6fcc48fdcad3d6f54ed97e004e25099ac24000)", - "bech32", - "chrono", - "forkable-jellyfish-merkle 1.13.5 (git+https://github.com/starcoinorg/starcoin?rev=fb6fcc48fdcad3d6f54ed97e004e25099ac24000)", - "hex", - "log 0.4.17", - "mirai-annotations", - "move-binary-format", - "move-bytecode-verifier", - "move-core-types", - "move-ir-types", - "move-table-extension", - "move-vm-types", - "num_enum", - "once_cell", - "rand 0.8.5", - "schemars", - "serde 1.0.152", - "serde_bytes", - "serde_json", - "starcoin-accumulator 1.13.5 (git+https://github.com/starcoinorg/starcoin?rev=fb6fcc48fdcad3d6f54ed97e004e25099ac24000)", - "starcoin-crypto", - "starcoin-gas-algebra-ext 1.13.5 (git+https://github.com/starcoinorg/starcoin?rev=fb6fcc48fdcad3d6f54ed97e004e25099ac24000)", + "starcoin-gas-algebra-ext", + "starcoin-time-service", ] [[package]] @@ -11423,7 +10955,7 @@ name = "stdlib" version = "1.13.5" dependencies = [ "anyhow", - "bcs-ext 1.13.5", + "bcs-ext", "clap 3.2.23", "datatest-stable 0.1.3", "fs_extra", @@ -11439,35 +10971,8 @@ dependencies = [ "simplelog", "starcoin-crypto", "starcoin-framework", - "starcoin-move-compiler 1.13.5", - "starcoin-vm-types 1.13.5", - "tempfile", - "walkdir", -] - -[[package]] -name = "stdlib" -version = "1.13.5" -source = "git+https://github.com/starcoinorg/starcoin?rev=fb6fcc48fdcad3d6f54ed97e004e25099ac24000#fb6fcc48fdcad3d6f54ed97e004e25099ac24000" -dependencies = [ - "anyhow", - "bcs-ext 1.13.5 (git+https://github.com/starcoinorg/starcoin?rev=fb6fcc48fdcad3d6f54ed97e004e25099ac24000)", - "clap 3.2.23", - "fs_extra", - "include_dir", - "itertools", - "log 0.4.17", - "move-bytecode-verifier", - "move-compiler", - "move-prover", - "once_cell", - "serde 1.0.152", - "sha2 0.10.6", - "simplelog", - "starcoin-crypto", - "starcoin-framework", - "starcoin-move-compiler 1.13.5 (git+https://github.com/starcoinorg/starcoin?rev=fb6fcc48fdcad3d6f54ed97e004e25099ac24000)", - "starcoin-vm-types 1.13.5 (git+https://github.com/starcoinorg/starcoin?rev=fb6fcc48fdcad3d6f54ed97e004e25099ac24000)", + "starcoin-move-compiler", + "starcoin-vm-types", "tempfile", "walkdir", ] @@ -11481,9 +10986,9 @@ dependencies = [ "anyhow", "futures 0.3.26", "log 0.4.17", - "starcoin-logger 1.13.5", + "starcoin-logger", "stest-macro", - "timeout-join-handler 1.13.5", + "timeout-join-handler", "tokio", ] @@ -11526,7 +11031,7 @@ dependencies = [ "pin-utils", "schemars", "serde 1.0.152", - "starcoin-logger 1.13.5", + "starcoin-logger", "stest", "thiserror", "tokio", @@ -11870,51 +11375,51 @@ dependencies = [ "anyhow", "async-trait", "backtrace", - "bcs-ext 1.13.5", + "bcs-ext", "futures 0.3.26", "futures-timer", "hex", "move-ir-compiler", - "network-api 1.13.5", + "network-api", "network-p2p-core", - "network-p2p-types 1.13.5", + "network-p2p-types", "rand 0.8.5", "serde 1.0.152", "serde_json", - "starcoin-account-api 1.13.5", + "starcoin-account-api", "starcoin-account-service", "starcoin-block-relayer", "starcoin-chain", "starcoin-chain-notify", - "starcoin-config 1.13.5", + "starcoin-config", "starcoin-consensus", "starcoin-crypto", "starcoin-dev", "starcoin-executor", "starcoin-genesis", - "starcoin-logger 1.13.5", - "starcoin-metrics 1.13.5", + "starcoin-logger", + "starcoin-metrics", "starcoin-miner", - "starcoin-move-compiler 1.13.5", + "starcoin-move-compiler", "starcoin-network", "starcoin-network-rpc", "starcoin-network-rpc-api", "starcoin-node", "starcoin-node-api", "starcoin-rpc-server", - "starcoin-service-registry 1.13.5", + "starcoin-service-registry", "starcoin-state-api", "starcoin-state-service", "starcoin-statedb", - "starcoin-storage 1.13.5", + "starcoin-storage", "starcoin-sync", "starcoin-sync-api", "starcoin-transaction-builder", "starcoin-txpool", "starcoin-txpool-api", - "starcoin-types 1.13.5", - "starcoin-vm-types 1.13.5", - "stdlib 1.13.5", + "starcoin-types", + "starcoin-vm-types", + "stdlib", "stest", "thiserror", "tokio", @@ -11934,15 +11439,15 @@ dependencies = [ "serde 1.0.152", "serde_bytes", "serde_json", - "starcoin-account-api 1.13.5", + "starcoin-account-api", "starcoin-account-provider", "starcoin-chain", "starcoin-cmd", - "starcoin-config 1.13.5", + "starcoin-config", "starcoin-consensus", "starcoin-crypto", "starcoin-executor", - "starcoin-logger 1.13.5", + "starcoin-logger", "starcoin-miner", "starcoin-network", "starcoin-node", @@ -11950,12 +11455,12 @@ dependencies = [ "starcoin-rpc-client", "starcoin-rpc-server", "starcoin-state-api", - "starcoin-storage 1.13.5", + "starcoin-storage", "starcoin-transaction-builder", "starcoin-txpool", - "starcoin-types 1.13.5", + "starcoin-types", "starcoin-vm-runtime", - "starcoin-vm-types 1.13.5", + "starcoin-vm-types", ] [[package]] @@ -12092,14 +11597,6 @@ dependencies = [ "thiserror", ] -[[package]] -name = "timeout-join-handler" -version = "1.13.5" -source = "git+https://github.com/starcoinorg/starcoin?rev=fb6fcc48fdcad3d6f54ed97e004e25099ac24000#fb6fcc48fdcad3d6f54ed97e004e25099ac24000" -dependencies = [ - "thiserror", -] - [[package]] name = "tint" version = "1.0.1" @@ -12432,7 +11929,7 @@ dependencies = [ "serde-generate", "serde-reflection 0.3.2", "serde_yaml", - "starcoin-vm-types 1.13.5", + "starcoin-vm-types", "tempfile", "textwrap 0.14.2", "which", @@ -12872,7 +12369,7 @@ dependencies = [ "schemars", "serde 1.0.152", "starcoin-move-explain", - "starcoin-vm-types 1.13.5", + "starcoin-vm-types", ] [[package]] diff --git a/Cargo.toml b/Cargo.toml index d191bcef88..0c3fd21478 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -247,6 +247,9 @@ bcs-ext = { path = "commons/bcs_ext" } bech32 = "0.9" bencher = "0.1.5" bitflags = "1.3.2" +faster-hex = "0.6" +indexmap = "1.9.1" +bincode = { version = "1", default-features = false } bs58 = "0.3.1" byteorder = "1.3.4" bytes = "1" @@ -513,11 +516,10 @@ walkdir = "2.3.1" wasm-timer = "0.2" which = "4.1.0" zeroize = "1.3.0" -database = { git = "https://github.com/starcoinorg/smolstc", rev = "167d700a0f99ba929cd6d156dac77859306f32da" } -consensus = { git = "https://github.com/starcoinorg/smolstc", rev = "167d700a0f99ba929cd6d156dac77859306f32da" } -ghostdag = { git = "https://github.com/starcoinorg/smolstc", rev = "167d700a0f99ba929cd6d156dac77859306f32da" } -reachability = { git = "https://github.com/starcoinorg/smolstc", rev = "167d700a0f99ba929cd6d156dac77859306f32da" } -consensus-types = { git = "https://github.com/starcoinorg/smolstc", rev = "167d700a0f99ba929cd6d156dac77859306f32da" } +dag-database = { path = "storage/dag-database" } +dag-consensus = { path = "consensus/dag-consensus" } +ghostdag = { path = "consensus/dag-consensus/ghostdag" } +reachability = { path = "consensus/dag-consensus/reachability" } [profile.release.package] starcoin-service-registry.debug = 1 diff --git a/chain/Cargo.toml b/chain/Cargo.toml index f3921e1ccb..f89cf8c8d2 100644 --- a/chain/Cargo.toml +++ b/chain/Cargo.toml @@ -23,9 +23,8 @@ starcoin-types = { package = "starcoin-types", workspace = true } starcoin-vm-types = { workspace = true } starcoin-storage = { workspace = true } thiserror = { workspace = true } -database = { workspace = true } -consensus = { workspace = true } -consensus-types = { workspace = true } +dag-database = { workspace = true } +dag-consensus = { workspace = true } [dev-dependencies] proptest = { workspace = true } @@ -42,9 +41,8 @@ stdlib = { workspace = true } stest = { workspace = true } test-helper = { workspace = true } tokio = { features = ["full"], workspace = true } -database = { workspace = true } -consensus = { workspace = true } -consensus-types = { workspace = true } +dag-database = { workspace = true } +dag-consensus = { workspace = true } [features] default = [] diff --git a/chain/api/src/message.rs b/chain/api/src/message.rs index b6541f5920..04b91ba088 100644 --- a/chain/api/src/message.rs +++ b/chain/api/src/message.rs @@ -63,7 +63,7 @@ pub enum ChainRequest { GetDagAccumulatorLeaves { start_index: u64, batch_size: u64, - } + }, } impl ServiceRequest for ChainRequest { diff --git a/chain/service/src/chain_service.rs b/chain/service/src/chain_service.rs index 7933b91715..f1bfb65eeb 100644 --- a/chain/service/src/chain_service.rs +++ b/chain/service/src/chain_service.rs @@ -2,8 +2,8 @@ // SPDX-License-Identifier: Apache-2.0 use anyhow::{format_err, Error, Result}; -use starcoin_chain::BlockChain; use starcoin_chain::dag_chain::DagBlockChain; +use starcoin_chain::BlockChain; use starcoin_chain_api::message::{ChainRequest, ChainResponse}; use starcoin_chain_api::{ ChainReader, ChainWriter, ReadableChainService, TransactionInfoWithProof, @@ -34,7 +34,7 @@ use std::sync::Arc; pub struct ChainReaderService { inner: ChainReaderServiceInner, - // dag_chain: DagBlockChain, + dag_chain: DagBlockChain, } impl ChainReaderService { @@ -45,8 +45,13 @@ impl ChainReaderService { vm_metrics: Option, ) -> Result { Ok(Self { - inner: ChainReaderServiceInner::new(config, startup_info, storage, vm_metrics)?, - // dag_chain: DagBlockChain::new(config, storage, vm_metrics)?, + inner: ChainReaderServiceInner::new( + config.clone(), + startup_info, + storage.clone(), + vm_metrics.clone(), + )?, + dag_chain: DagBlockChain::new(config.clone(), storage.clone(), vm_metrics)?, }) } } @@ -236,8 +241,7 @@ impl ServiceHandler for ChainReaderService { ChainRequest::GetBlockInfos(ids) => Ok(ChainResponse::BlockInfoVec(Box::new( self.inner.get_block_infos(ids)?, ))), - _ => todo!() - // ChainRequest::GetDagAccumulatorLeaves(start_index, batch_size) => Ok(ChainResponse::HashValue(self.dag_)), + _ => todo!(), // ChainRequest::GetDagAccumulatorLeaves(start_index, batch_size) => Ok(ChainResponse::HashValue(self.dag_)), } } } diff --git a/chain/src/dag_chain.rs b/chain/src/dag_chain.rs index 143c7b0020..c660a7ab9d 100644 --- a/chain/src/dag_chain.rs +++ b/chain/src/dag_chain.rs @@ -1,60 +1,68 @@ use std::sync::Arc; -use consensus::blockdag::BlockDAG; -use consensus_types::{blockhash::ORIGIN, header::Header}; -use database::prelude::{FlexiDagStorageConfig, FlexiDagStorage}; -use starcoin_accumulator::MerkleAccumulator; +use anyhow::bail; +use dag_consensus::blockdag::BlockDAG; +use dag_database::prelude::{FlexiDagStorage, FlexiDagStorageConfig}; +use starcoin_accumulator::{node::AccumulatorStoreType, MerkleAccumulator}; use starcoin_config::NodeConfig; use starcoin_crypto::HashValue; use starcoin_executor::VMMetrics; -use starcoin_storage::{flexi_dag::{SyncFlexiDagSnapshotStorage, SyncFlexiDagStorage}, Store}; -use starcoin_types::{block::BlockHeader, dag_block::DagBlockHeader}; - - +use starcoin_storage::{flexi_dag::SyncFlexiDagSnapshotStorage, Store}; +use starcoin_types::block::BlockHeader; +use starcoin_types::{blockhash::ORIGIN, header::Header}; pub struct DagBlockChain { dag: Option, - dag_block_accumulator: MerkleAccumulator, - accumulator_snapshot: Arc, + dag_sync_accumulator: MerkleAccumulator, + sync_accumulator_snapshot: Arc, } - impl DagBlockChain { pub fn new( config: Arc, storage: Arc, vm_metrics: Option, ) -> anyhow::Result { - todo!() // initialize the dag - // let db_path = config.storage.dir(); - // let config = FlexiDagStorageConfig::create_with_params(1, 0, 1024); - // let db = FlexiDagStorage::create_from_path(db_path, config)?; - // let dag = BlockDAG::new(Header::new(DagBlockHeader::random(), vec![HashValue::new(ORIGIN)]), 16, db); + let db_path = config.storage.dir(); + let config = FlexiDagStorageConfig::create_with_params(1, 0, 1024); + let db = FlexiDagStorage::create_from_path(db_path, config)?; + let dag = BlockDAG::new( + Header::new(BlockHeader::random(), vec![HashValue::new(ORIGIN)]), + 16, + db, + ); - // // initialize the block accumulator - // let sync_flexi_dag_store = Arc::new(SyncFlexiDagStorage::new(storage,)?); - // let startup_info = match storage.get_flexi_dag_startup_info()? { - // Some(startup_info) => startup_info, - // None => { - // return Ok(Self { - // dag: Some(dag), - // dag_block_accumulator: MerkleAccumulator::new_empty(sy), - // accumulator_snapshot: sync_flexi_dag_store.get_accumulator_storage(), - // }) - // } - // }; + // initialize the block accumulator + let startup_info = match storage.get_flexi_dag_startup_info()? { + Some(startup_info) => startup_info, + None => { + return Ok(Self { + dag: Some(dag), + dag_sync_accumulator: MerkleAccumulator::new_empty( + storage.get_accumulator_store(AccumulatorStoreType::SyncDag), + ), + sync_accumulator_snapshot: storage.get_accumulator_snapshot_storage(), + }) + } + }; // let accmulator_info = sync_flexi_dag_store.get_snapshot_storage().get(startup_info.main); - - // Ok(Self { - // dag: Some(dag), - // dag_block_accumulator: MerkleAccumulator::new_with_info(accmulator_info, sync_flexi_dag_store.get_accumulator_storage()), - // accumulator_snapshot: Arc::new(SyncFlexiDagSnapshotStorage::new( - // storage, - // )?), - // }) - } + let accumulator_info = match storage.query_by_hash(startup_info.main) { + Ok(op_snapshot) => match op_snapshot { + Some(snapshot) => snapshot.accumulator_info, + None => bail!("failed to get sync accumulator info since it is None"), + }, + Err(error) => bail!("failed to get sync accumulator info: {}", error.to_string()), + }; - -} \ No newline at end of file + Ok(Self { + dag: Some(dag), + dag_sync_accumulator: MerkleAccumulator::new_with_info( + accumulator_info, + storage.get_accumulator_store(AccumulatorStoreType::SyncDag), + ), + sync_accumulator_snapshot: storage.get_accumulator_snapshot_storage(), + }) + } +} diff --git a/consensus/dag-consensus/Cargo.toml b/consensus/dag-consensus/Cargo.toml new file mode 100644 index 0000000000..ac54a0d90d --- /dev/null +++ b/consensus/dag-consensus/Cargo.toml @@ -0,0 +1,15 @@ +[package] +name = "dag-consensus" +version = "1.13.5" +edition.workspace = true + +# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html + +[dependencies] +ghostdag.workspace = true +reachability.workspace = true +dag-database.workspace = true +parking_lot.workspace = true +starcoin-crypto.workspace = true +starcoin-types.workspace = true +anyhow.workspace = true \ No newline at end of file diff --git a/consensus/dag-consensus/ghostdag/Cargo.toml b/consensus/dag-consensus/ghostdag/Cargo.toml new file mode 100644 index 0000000000..a38b4d4809 --- /dev/null +++ b/consensus/dag-consensus/ghostdag/Cargo.toml @@ -0,0 +1,17 @@ +[package] +name = "ghostdag" +version = "1.13.5" +edition.workspace = true + +# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html + +[dependencies] +thiserror.workspace = true +dag-database.workspace = true +starcoin-crypto.workspace = true +starcoin-types.workspace = true +serde.workspace = true +itertools.workspace = true +parking_lot.workspace = true +rocksdb.workspace = true +reachability.workspace = true \ No newline at end of file diff --git a/consensus/dag-consensus/ghostdag/src/lib.rs b/consensus/dag-consensus/ghostdag/src/lib.rs new file mode 100644 index 0000000000..51a2c8fc82 --- /dev/null +++ b/consensus/dag-consensus/ghostdag/src/lib.rs @@ -0,0 +1,4 @@ +pub mod mergeset; +pub mod protocol; + +mod util; diff --git a/consensus/dag-consensus/ghostdag/src/mergeset.rs b/consensus/dag-consensus/ghostdag/src/mergeset.rs new file mode 100644 index 0000000000..f47a221777 --- /dev/null +++ b/consensus/dag-consensus/ghostdag/src/mergeset.rs @@ -0,0 +1,71 @@ +use super::protocol::GhostdagManager; +use dag_database::consensus::{GhostdagStoreReader, HeaderStoreReader, RelationsStoreReader}; +use reachability::reachability_service::ReachabilityService; +use starcoin_crypto::HashValue as Hash; +use starcoin_types::blockhash::BlockHashSet; +use std::collections::VecDeque; + +impl< + T: GhostdagStoreReader, + S: RelationsStoreReader, + U: ReachabilityService, + V: HeaderStoreReader, + > GhostdagManager +{ + pub fn ordered_mergeset_without_selected_parent( + &self, + selected_parent: Hash, + parents: &[Hash], + ) -> Vec { + self.sort_blocks(self.unordered_mergeset_without_selected_parent(selected_parent, parents)) + } + + pub fn unordered_mergeset_without_selected_parent( + &self, + selected_parent: Hash, + parents: &[Hash], + ) -> BlockHashSet { + let mut queue: VecDeque<_> = parents + .iter() + .copied() + .filter(|p| p != &selected_parent) + .collect(); + let mut mergeset: BlockHashSet = queue.iter().copied().collect(); + let mut selected_parent_past = BlockHashSet::new(); + + while let Some(current) = queue.pop_front() { + let current_parents = self + .relations_store + .get_parents(current) + .unwrap_or_else(|err| { + println!("WUT"); + panic!("{err:?}"); + }); + + // For each parent of the current block we check whether it is in the past of the selected parent. If not, + // we add it to the resulting merge-set and queue it for further processing. + for parent in current_parents.iter() { + if mergeset.contains(parent) { + continue; + } + + if selected_parent_past.contains(parent) { + continue; + } + + if self + .reachability_service + .is_dag_ancestor_of(*parent, selected_parent) + { + selected_parent_past.insert(*parent); + continue; + } + + mergeset.insert(*parent); + queue.push_back(*parent); + } + } + + mergeset + } +} diff --git a/consensus/dag-consensus/ghostdag/src/protocol.rs b/consensus/dag-consensus/ghostdag/src/protocol.rs new file mode 100644 index 0000000000..4f6a0fb3fd --- /dev/null +++ b/consensus/dag-consensus/ghostdag/src/protocol.rs @@ -0,0 +1,332 @@ +use crate::util::Refs; +use dag_database::consensus::{GhostdagStoreReader, HeaderStoreReader, RelationsStoreReader}; +use reachability::reachability_service::ReachabilityService; +use starcoin_crypto::HashValue as Hash; +use starcoin_types::{ + blockhash::{ + self, BlockHashExtensions, BlockHashMap, BlockHashes, BlueWorkType, HashKTypeMap, KType, + }, + ghostdata::GhostdagData, + ordering::*, +}; +use std::sync::Arc; +// For GhostdagStoreReader-related functions, use GhostDagDataWrapper instead. +// ascending_mergeset_without_selected_parent +// descending_mergeset_without_selected_parent +// consensus_ordered_mergeset +// consensus_ordered_mergeset_without_selected_parent +//use dag_database::consensus::GhostDagDataWrapper; + +#[derive(Clone)] +pub struct GhostdagManager< + T: GhostdagStoreReader, + S: RelationsStoreReader, + U: ReachabilityService, + V: HeaderStoreReader, +> { + genesis_hash: Hash, + pub(super) k: KType, + pub(super) ghostdag_store: T, + pub(super) relations_store: S, + pub(super) headers_store: V, + pub(super) reachability_service: U, +} + +impl< + T: GhostdagStoreReader, + S: RelationsStoreReader, + U: ReachabilityService, + V: HeaderStoreReader, + > GhostdagManager +{ + pub fn new( + genesis_hash: Hash, + k: KType, + ghostdag_store: T, + relations_store: S, + headers_store: V, + reachability_service: U, + ) -> Self { + Self { + genesis_hash, + k, + ghostdag_store, + relations_store, + reachability_service, + headers_store, + } + } + + pub fn genesis_ghostdag_data(&self) -> GhostdagData { + GhostdagData::new( + 0, + Default::default(), // TODO: take blue score and work from actual genesis + Hash::new(blockhash::ORIGIN), + BlockHashes::new(Vec::new()), + BlockHashes::new(Vec::new()), + HashKTypeMap::new(BlockHashMap::new()), + ) + } + + pub fn origin_ghostdag_data(&self) -> Arc { + Arc::new(GhostdagData::new( + 0, + Default::default(), + 0.into(), + BlockHashes::new(Vec::new()), + BlockHashes::new(Vec::new()), + HashKTypeMap::new(BlockHashMap::new()), + )) + } + + pub fn find_selected_parent(&self, parents: impl IntoIterator) -> Hash { + parents + .into_iter() + .map(|parent| SortableBlock { + hash: parent, + blue_work: self.ghostdag_store.get_blue_work(parent).unwrap(), + }) + .max() + .unwrap() + .hash + } + + /// Runs the GHOSTDAG protocol and calculates the block GhostdagData by the given parents. + /// The function calculates mergeset blues by iterating over the blocks in + /// the anticone of the new block selected parent (which is the parent with the + /// highest blue work) and adds any block to the blue set if by adding + /// it these conditions will not be violated: + /// + /// 1) |anticone-of-candidate-block ∩ blue-set-of-new-block| ≤ K + /// + /// 2) For every blue block in blue-set-of-new-block: + /// |(anticone-of-blue-block ∩ blue-set-new-block) ∪ {candidate-block}| ≤ K. + /// We validate this condition by maintaining a map blues_anticone_sizes for + /// each block which holds all the blue anticone sizes that were affected by + /// the new added blue blocks. + /// So to find out what is |anticone-of-blue ∩ blue-set-of-new-block| we just iterate in + /// the selected parent chain of the new block until we find an existing entry in + /// blues_anticone_sizes. + /// + /// For further details see the article https://eprint.iacr.org/2018/104.pdf + pub fn ghostdag(&self, parents: &[Hash]) -> GhostdagData { + assert!( + !parents.is_empty(), + "genesis must be added via a call to init" + ); + + // Run the GHOSTDAG parent selection algorithm + let selected_parent = self.find_selected_parent(&mut parents.iter().copied()); + // Initialize new GHOSTDAG block data with the selected parent + let mut new_block_data = GhostdagData::new_with_selected_parent(selected_parent, self.k); + // Get the mergeset in consensus-agreed topological order (topological here means forward in time from blocks to children) + let ordered_mergeset = + self.ordered_mergeset_without_selected_parent(selected_parent, parents); + + for blue_candidate in ordered_mergeset.iter().cloned() { + let coloring = self.check_blue_candidate(&new_block_data, blue_candidate); + + if let ColoringOutput::Blue(blue_anticone_size, blues_anticone_sizes) = coloring { + // No k-cluster violation found, we can now set the candidate block as blue + new_block_data.add_blue(blue_candidate, blue_anticone_size, &blues_anticone_sizes); + } else { + new_block_data.add_red(blue_candidate); + } + } + + let blue_score = self.ghostdag_store.get_blue_score(selected_parent).unwrap() + + new_block_data.mergeset_blues.len() as u64; + + let added_blue_work: BlueWorkType = new_block_data + .mergeset_blues + .iter() + .cloned() + .map(|hash| { + if hash.is_origin() { + 0u128 + } else { + //TODO: implement caculate pow work + let _difficulty = self.headers_store.get_difficulty(hash).unwrap(); + 1024u128 + } + }) + .sum(); + + let blue_work = + self.ghostdag_store.get_blue_work(selected_parent).unwrap() + added_blue_work; + new_block_data.finalize_score_and_work(blue_score, blue_work); + + new_block_data + } + + fn check_blue_candidate_with_chain_block( + &self, + new_block_data: &GhostdagData, + chain_block: &ChainBlock, + blue_candidate: Hash, + candidate_blues_anticone_sizes: &mut BlockHashMap, + candidate_blue_anticone_size: &mut KType, + ) -> ColoringState { + // If blue_candidate is in the future of chain_block, it means + // that all remaining blues are in the past of chain_block and thus + // in the past of blue_candidate. In this case we know for sure that + // the anticone of blue_candidate will not exceed K, and we can mark + // it as blue. + // + // The new block is always in the future of blue_candidate, so there's + // no point in checking it. + + // We check if chain_block is not the new block by checking if it has a hash. + if let Some(hash) = chain_block.hash { + if self + .reachability_service + .is_dag_ancestor_of(hash, blue_candidate) + { + return ColoringState::Blue; + } + } + + for &block in chain_block.data.mergeset_blues.iter() { + // Skip blocks that exist in the past of blue_candidate. + if self + .reachability_service + .is_dag_ancestor_of(block, blue_candidate) + { + continue; + } + + candidate_blues_anticone_sizes + .insert(block, self.blue_anticone_size(block, new_block_data)); + + *candidate_blue_anticone_size += 1; + if *candidate_blue_anticone_size > self.k { + // k-cluster violation: The candidate's blue anticone exceeded k + return ColoringState::Red; + } + + if *candidate_blues_anticone_sizes.get(&block).unwrap() == self.k { + // k-cluster violation: A block in candidate's blue anticone already + // has k blue blocks in its own anticone + return ColoringState::Red; + } + + // This is a sanity check that validates that a blue + // block's blue anticone is not already larger than K. + assert!( + *candidate_blues_anticone_sizes.get(&block).unwrap() <= self.k, + "found blue anticone larger than K" + ); + } + + ColoringState::Pending + } + + /// Returns the blue anticone size of `block` from the worldview of `context`. + /// Expects `block` to be in the blue set of `context` + fn blue_anticone_size(&self, block: Hash, context: &GhostdagData) -> KType { + let mut current_blues_anticone_sizes = HashKTypeMap::clone(&context.blues_anticone_sizes); + let mut current_selected_parent = context.selected_parent; + loop { + if let Some(size) = current_blues_anticone_sizes.get(&block) { + return *size; + } + + if current_selected_parent == self.genesis_hash + || current_selected_parent == Hash::new(blockhash::ORIGIN) + { + panic!("block {block} is not in blue set of the given context"); + } + + current_blues_anticone_sizes = self + .ghostdag_store + .get_blues_anticone_sizes(current_selected_parent) + .unwrap(); + current_selected_parent = self + .ghostdag_store + .get_selected_parent(current_selected_parent) + .unwrap(); + } + } + + fn check_blue_candidate( + &self, + new_block_data: &GhostdagData, + blue_candidate: Hash, + ) -> ColoringOutput { + // The maximum length of new_block_data.mergeset_blues can be K+1 because + // it contains the selected parent. + if new_block_data.mergeset_blues.len() as KType == self.k + 1 { + return ColoringOutput::Red; + } + + let mut candidate_blues_anticone_sizes: BlockHashMap = + BlockHashMap::with_capacity(self.k as usize); + // Iterate over all blocks in the blue past of the new block that are not in the past + // of blue_candidate, and check for each one of them if blue_candidate potentially + // enlarges their blue anticone to be over K, or that they enlarge the blue anticone + // of blue_candidate to be over K. + let mut chain_block = ChainBlock { + hash: None, + data: new_block_data.into(), + }; + let mut candidate_blue_anticone_size: KType = 0; + + loop { + let state = self.check_blue_candidate_with_chain_block( + new_block_data, + &chain_block, + blue_candidate, + &mut candidate_blues_anticone_sizes, + &mut candidate_blue_anticone_size, + ); + + match state { + ColoringState::Blue => { + return ColoringOutput::Blue( + candidate_blue_anticone_size, + candidate_blues_anticone_sizes, + ) + } + ColoringState::Red => return ColoringOutput::Red, + ColoringState::Pending => (), // continue looping + } + + chain_block = ChainBlock { + hash: Some(chain_block.data.selected_parent), + data: self + .ghostdag_store + .get_data(chain_block.data.selected_parent) + .unwrap() + .into(), + } + } + } + + pub fn sort_blocks(&self, blocks: impl IntoIterator) -> Vec { + let mut sorted_blocks: Vec = blocks.into_iter().collect(); + sorted_blocks.sort_by_cached_key(|block| SortableBlock { + hash: *block, + blue_work: self.ghostdag_store.get_blue_work(*block).unwrap(), + }); + sorted_blocks + } +} + +/// Chain block with attached ghostdag data +struct ChainBlock<'a> { + hash: Option, // if set to `None`, signals being the new block + data: Refs<'a, GhostdagData>, +} + +/// Represents the intermediate GHOSTDAG coloring state for the current candidate +enum ColoringState { + Blue, + Red, + Pending, +} + +/// Represents the final output of GHOSTDAG coloring for the current candidate +enum ColoringOutput { + Blue(KType, BlockHashMap), // (blue anticone size, map of blue anticone sizes for each affected blue) + Red, +} diff --git a/consensus/dag-consensus/ghostdag/src/util.rs b/consensus/dag-consensus/ghostdag/src/util.rs new file mode 100644 index 0000000000..68eb4b9b31 --- /dev/null +++ b/consensus/dag-consensus/ghostdag/src/util.rs @@ -0,0 +1,57 @@ +use std::{ops::Deref, rc::Rc, sync::Arc}; +/// Enum used to represent a concrete varying pointer type which only needs to be accessed by ref. +/// We avoid adding a `Val(T)` variant in order to keep the size of the enum minimal +pub enum Refs<'a, T> { + Ref(&'a T), + Arc(Arc), + Rc(Rc), + Box(Box), +} + +impl AsRef for Refs<'_, T> { + fn as_ref(&self) -> &T { + match self { + Refs::Ref(r) => r, + Refs::Arc(a) => a, + Refs::Rc(r) => r, + Refs::Box(b) => b, + } + } +} + +impl Deref for Refs<'_, T> { + type Target = T; + + fn deref(&self) -> &Self::Target { + match self { + Refs::Ref(r) => r, + Refs::Arc(a) => a, + Refs::Rc(r) => r, + Refs::Box(b) => b, + } + } +} + +impl<'a, T> From<&'a T> for Refs<'a, T> { + fn from(r: &'a T) -> Self { + Self::Ref(r) + } +} + +impl From> for Refs<'_, T> { + fn from(a: Arc) -> Self { + Self::Arc(a) + } +} + +impl From> for Refs<'_, T> { + fn from(r: Rc) -> Self { + Self::Rc(r) + } +} + +impl From> for Refs<'_, T> { + fn from(b: Box) -> Self { + Self::Box(b) + } +} diff --git a/consensus/dag-consensus/reachability/Cargo.toml b/consensus/dag-consensus/reachability/Cargo.toml new file mode 100644 index 0000000000..f65bd6e222 --- /dev/null +++ b/consensus/dag-consensus/reachability/Cargo.toml @@ -0,0 +1,16 @@ +[package] +name = "reachability" +version = "1.13.5" +edition.workspace = true +# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html + +[dependencies] +thiserror.workspace = true +dag-database.workspace = true +starcoin-crypto.workspace = true +starcoin-types.workspace = true +serde.workspace = true +itertools.workspace = true +parking_lot.workspace = true +rocksdb.workspace = true +starcoin-storage.workspace = true diff --git a/consensus/dag-consensus/reachability/src/extensions.rs b/consensus/dag-consensus/reachability/src/extensions.rs new file mode 100644 index 0000000000..829d1a855e --- /dev/null +++ b/consensus/dag-consensus/reachability/src/extensions.rs @@ -0,0 +1,44 @@ +use dag_database::{consensus::ReachabilityStoreReader, prelude::StoreResult}; +use starcoin_crypto::hash::HashValue as Hash; +use starcoin_types::interval::Interval; + +pub(super) trait ReachabilityStoreIntervalExtensions { + fn interval_children_capacity(&self, block: Hash) -> StoreResult; + fn interval_remaining_before(&self, block: Hash) -> StoreResult; + fn interval_remaining_after(&self, block: Hash) -> StoreResult; +} + +impl ReachabilityStoreIntervalExtensions for T { + /// Returns the reachability allocation capacity for children of `block` + fn interval_children_capacity(&self, block: Hash) -> StoreResult { + // The interval of a block should *strictly* contain the intervals of its + // tree children, hence we subtract 1 from the end of the range. + Ok(self.get_interval(block)?.decrease_end(1)) + } + + /// Returns the available interval to allocate for tree children, taken from the + /// beginning of children allocation capacity + fn interval_remaining_before(&self, block: Hash) -> StoreResult { + let alloc_capacity = self.interval_children_capacity(block)?; + match self.get_children(block)?.first() { + Some(first_child) => { + let first_alloc = self.get_interval(*first_child)?; + Ok(Interval::new(alloc_capacity.start, first_alloc.start - 1)) + } + None => Ok(alloc_capacity), + } + } + + /// Returns the available interval to allocate for tree children, taken from the + /// end of children allocation capacity + fn interval_remaining_after(&self, block: Hash) -> StoreResult { + let alloc_capacity = self.interval_children_capacity(block)?; + match self.get_children(block)?.last() { + Some(last_child) => { + let last_alloc = self.get_interval(*last_child)?; + Ok(Interval::new(last_alloc.end + 1, alloc_capacity.end)) + } + None => Ok(alloc_capacity), + } + } +} diff --git a/consensus/dag-consensus/reachability/src/inquirer.rs b/consensus/dag-consensus/reachability/src/inquirer.rs new file mode 100644 index 0000000000..57f0960c2f --- /dev/null +++ b/consensus/dag-consensus/reachability/src/inquirer.rs @@ -0,0 +1,335 @@ +use super::{tree::*, *}; +use dag_database::consensus::{ReachabilityStore, ReachabilityStoreReader}; +use starcoin_crypto::HashValue as Hash; +use starcoin_types::{blockhash, interval::Interval, perf}; + +/// Init the reachability store to match the state required by the algorithmic layer. +/// The function first checks the store for possibly being initialized already. +pub fn init(store: &mut (impl ReachabilityStore + ?Sized)) -> Result<()> { + init_with_params(store, Hash::new(blockhash::ORIGIN), Interval::maximal()) +} + +pub(super) fn init_with_params( + store: &mut (impl ReachabilityStore + ?Sized), + origin: Hash, + capacity: Interval, +) -> Result<()> { + if store.has(origin)? { + return Ok(()); + } + store.init(origin, capacity)?; + Ok(()) +} + +type HashIterator<'a> = &'a mut dyn Iterator; + +/// Add a block to the DAG reachability data structures and persist using the provided `store`. +pub fn add_block( + store: &mut (impl ReachabilityStore + ?Sized), + new_block: Hash, + selected_parent: Hash, + mergeset_iterator: HashIterator, +) -> Result<()> { + add_block_with_params( + store, + new_block, + selected_parent, + mergeset_iterator, + None, + None, + ) +} + +fn add_block_with_params( + store: &mut (impl ReachabilityStore + ?Sized), + new_block: Hash, + selected_parent: Hash, + mergeset_iterator: HashIterator, + reindex_depth: Option, + reindex_slack: Option, +) -> Result<()> { + add_tree_block( + store, + new_block, + selected_parent, + reindex_depth.unwrap_or(perf::DEFAULT_REINDEX_DEPTH), + reindex_slack.unwrap_or(perf::DEFAULT_REINDEX_SLACK), + )?; + add_dag_block(store, new_block, mergeset_iterator)?; + Ok(()) +} + +fn add_dag_block( + store: &mut (impl ReachabilityStore + ?Sized), + new_block: Hash, + mergeset_iterator: HashIterator, +) -> Result<()> { + // Update the future covering set for blocks in the mergeset + for merged_block in mergeset_iterator { + insert_to_future_covering_set(store, merged_block, new_block)?; + } + Ok(()) +} + +fn insert_to_future_covering_set( + store: &mut (impl ReachabilityStore + ?Sized), + merged_block: Hash, + new_block: Hash, +) -> Result<()> { + match binary_search_descendant( + store, + store.get_future_covering_set(merged_block)?.as_slice(), + new_block, + )? { + // We expect the query to not succeed, and to only return the correct insertion index. + // The existences of a `future covering item` (`FCI`) which is a chain ancestor of `new_block` + // contradicts `merged_block ∈ mergeset(new_block)`. Similarly, the existence of an FCI + // which `new_block` is a chain ancestor of, contradicts processing order. + SearchOutput::Found(_, _) => Err(ReachabilityError::DataInconsistency), + SearchOutput::NotFound(i) => { + store.insert_future_covering_item(merged_block, new_block, i)?; + Ok(()) + } + } +} + +/// Hint to the reachability algorithm that `hint` is a candidate to become +/// the `virtual selected parent` (`VSP`). This might affect internal reachability heuristics such +/// as moving the reindex point. The consensus runtime is expected to call this function +/// for a new header selected tip which is `header only` / `pending UTXO verification`, or for a completely resolved `VSP`. +pub fn hint_virtual_selected_parent( + store: &mut (impl ReachabilityStore + ?Sized), + hint: Hash, +) -> Result<()> { + try_advancing_reindex_root( + store, + hint, + perf::DEFAULT_REINDEX_DEPTH, + perf::DEFAULT_REINDEX_SLACK, + ) +} + +/// Checks if the `this` block is a strict chain ancestor of the `queried` block (aka `this ∈ chain(queried)`). +/// Note that this results in `false` if `this == queried` +pub fn is_strict_chain_ancestor_of( + store: &(impl ReachabilityStoreReader + ?Sized), + this: Hash, + queried: Hash, +) -> Result { + Ok(store + .get_interval(this)? + .strictly_contains(store.get_interval(queried)?)) +} + +/// Checks if `this` block is a chain ancestor of `queried` block (aka `this ∈ chain(queried) ∪ {queried}`). +/// Note that we use the graph theory convention here which defines that a block is also an ancestor of itself. +pub fn is_chain_ancestor_of( + store: &(impl ReachabilityStoreReader + ?Sized), + this: Hash, + queried: Hash, +) -> Result { + Ok(store + .get_interval(this)? + .contains(store.get_interval(queried)?)) +} + +/// Returns true if `this` is a DAG ancestor of `queried` (aka `queried ∈ future(this) ∪ {this}`). +/// Note: this method will return true if `this == queried`. +/// The complexity of this method is O(log(|future_covering_set(this)|)) +pub fn is_dag_ancestor_of( + store: &(impl ReachabilityStoreReader + ?Sized), + this: Hash, + queried: Hash, +) -> Result { + // First, check if `this` is a chain ancestor of queried + if is_chain_ancestor_of(store, this, queried)? { + return Ok(true); + } + // Otherwise, use previously registered future blocks to complete the + // DAG reachability test + match binary_search_descendant( + store, + store.get_future_covering_set(this)?.as_slice(), + queried, + )? { + SearchOutput::Found(_, _) => Ok(true), + SearchOutput::NotFound(_) => Ok(false), + } +} + +/// Finds the child of `ancestor` which is also a chain ancestor of `descendant`. +pub fn get_next_chain_ancestor( + store: &(impl ReachabilityStoreReader + ?Sized), + descendant: Hash, + ancestor: Hash, +) -> Result { + if descendant == ancestor { + // The next ancestor does not exist + return Err(ReachabilityError::BadQuery); + } + if !is_strict_chain_ancestor_of(store, ancestor, descendant)? { + // `ancestor` isn't actually a chain ancestor of `descendant`, so by def + // we cannot find the next ancestor as well + return Err(ReachabilityError::BadQuery); + } + + get_next_chain_ancestor_unchecked(store, descendant, ancestor) +} + +/// Note: it is important to keep the unchecked version for internal module use, +/// since in some scenarios during reindexing `descendant` might have a modified +/// interval which was not propagated yet. +pub(super) fn get_next_chain_ancestor_unchecked( + store: &(impl ReachabilityStoreReader + ?Sized), + descendant: Hash, + ancestor: Hash, +) -> Result { + match binary_search_descendant(store, store.get_children(ancestor)?.as_slice(), descendant)? { + SearchOutput::Found(hash, _) => Ok(hash), + SearchOutput::NotFound(_) => Err(ReachabilityError::BadQuery), + } +} + +enum SearchOutput { + NotFound(usize), // `usize` is the position to insert at + Found(Hash, usize), +} + +fn binary_search_descendant( + store: &(impl ReachabilityStoreReader + ?Sized), + ordered_hashes: &[Hash], + descendant: Hash, +) -> Result { + if cfg!(debug_assertions) { + // This is a linearly expensive assertion, keep it debug only + assert_hashes_ordered(store, ordered_hashes); + } + + // `Interval::end` represents the unique number allocated to this block + let point = store.get_interval(descendant)?.end; + + // We use an `unwrap` here since otherwise we need to implement `binary_search` + // ourselves, which is not worth the effort given that this would be an unrecoverable + // error anyhow + match ordered_hashes.binary_search_by_key(&point, |c| store.get_interval(*c).unwrap().start) { + Ok(i) => Ok(SearchOutput::Found(ordered_hashes[i], i)), + Err(i) => { + // `i` is where `point` was expected (i.e., point < ordered_hashes[i].interval.start), + // so we expect `ordered_hashes[i - 1].interval` to be the only candidate to contain `point` + if i > 0 && is_chain_ancestor_of(store, ordered_hashes[i - 1], descendant)? { + Ok(SearchOutput::Found(ordered_hashes[i - 1], i - 1)) + } else { + Ok(SearchOutput::NotFound(i)) + } + } + } +} + +fn assert_hashes_ordered(store: &(impl ReachabilityStoreReader + ?Sized), ordered_hashes: &[Hash]) { + let intervals: Vec = ordered_hashes + .iter() + .cloned() + .map(|c| store.get_interval(c).unwrap()) + .collect(); + debug_assert!(intervals + .as_slice() + .windows(2) + .all(|w| w[0].end < w[1].start)) +} + +#[cfg(test)] +mod tests { + use super::{super::tests::*, *}; + use dag_database::consensus::MemoryReachabilityStore; + use starcoin_types::blockhash::ORIGIN; + + #[test] + fn test_add_tree_blocks() { + // Arrange + let mut store = MemoryReachabilityStore::new(); + // Act + let root: Hash = 1.into(); + TreeBuilder::new(&mut store) + .init_with_params(root, Interval::new(1, 15)) + .add_block(2.into(), root) + .add_block(3.into(), 2.into()) + .add_block(4.into(), 2.into()) + .add_block(5.into(), 3.into()) + .add_block(6.into(), 5.into()) + .add_block(7.into(), 1.into()) + .add_block(8.into(), 6.into()) + .add_block(9.into(), 6.into()) + .add_block(10.into(), 6.into()) + .add_block(11.into(), 6.into()); + // Assert + store.validate_intervals(root).unwrap(); + } + + #[test] + fn test_add_early_blocks() { + // Arrange + let mut store = MemoryReachabilityStore::new(); + + // Act + let root: Hash = Hash::from_u64(1); + let mut builder = TreeBuilder::new_with_params(&mut store, 2, 5); + builder.init_with_params(root, Interval::maximal()); + for i in 2u64..100 { + builder.add_block(Hash::from_u64(i), Hash::from_u64(i / 2)); + } + + // Should trigger an earlier than reindex root allocation + builder.add_block(Hash::from_u64(100), Hash::from_u64(2)); + store.validate_intervals(root).unwrap(); + } + + #[test] + fn test_add_dag_blocks() { + // Arrange + let mut store = MemoryReachabilityStore::new(); + let origin_hash = Hash::new(ORIGIN); + // Act + DagBuilder::new(&mut store) + .init() + .add_block(DagBlock::new(1.into(), vec![origin_hash])) + .add_block(DagBlock::new(2.into(), vec![1.into()])) + .add_block(DagBlock::new(3.into(), vec![1.into()])) + .add_block(DagBlock::new(4.into(), vec![2.into(), 3.into()])) + .add_block(DagBlock::new(5.into(), vec![4.into()])) + .add_block(DagBlock::new(6.into(), vec![1.into()])) + .add_block(DagBlock::new(7.into(), vec![5.into(), 6.into()])) + .add_block(DagBlock::new(8.into(), vec![1.into()])) + .add_block(DagBlock::new(9.into(), vec![1.into()])) + .add_block(DagBlock::new(10.into(), vec![7.into(), 8.into(), 9.into()])) + .add_block(DagBlock::new(11.into(), vec![1.into()])) + .add_block(DagBlock::new(12.into(), vec![11.into(), 10.into()])); + + // Assert intervals + store.validate_intervals(origin_hash).unwrap(); + + // Assert genesis + for i in 2u64..=12 { + assert!(store.in_past_of(1, i)); + } + + // Assert some futures + assert!(store.in_past_of(2, 4)); + assert!(store.in_past_of(2, 5)); + assert!(store.in_past_of(2, 7)); + assert!(store.in_past_of(5, 10)); + assert!(store.in_past_of(6, 10)); + assert!(store.in_past_of(10, 12)); + assert!(store.in_past_of(11, 12)); + + // Assert some anticones + assert!(store.are_anticone(2, 3)); + assert!(store.are_anticone(2, 6)); + assert!(store.are_anticone(3, 6)); + assert!(store.are_anticone(5, 6)); + assert!(store.are_anticone(3, 8)); + assert!(store.are_anticone(11, 2)); + assert!(store.are_anticone(11, 4)); + assert!(store.are_anticone(11, 6)); + assert!(store.are_anticone(11, 9)); + } +} diff --git a/consensus/dag-consensus/reachability/src/lib.rs b/consensus/dag-consensus/reachability/src/lib.rs new file mode 100644 index 0000000000..69510709e6 --- /dev/null +++ b/consensus/dag-consensus/reachability/src/lib.rs @@ -0,0 +1,50 @@ +mod extensions; +pub mod inquirer; +pub mod reachability_service; +mod reindex; +pub mod relations_service; + +#[cfg(test)] +mod tests; +mod tree; + +use dag_database::prelude::StoreError; +use thiserror::Error; + +#[derive(Error, Debug)] +pub enum ReachabilityError { + #[error("data store error")] + StoreError(#[from] StoreError), + + #[error("data overflow error")] + DataOverflow(String), + + #[error("data inconsistency error")] + DataInconsistency, + + #[error("query is inconsistent")] + BadQuery, +} + +impl ReachabilityError { + pub fn is_key_not_found(&self) -> bool { + matches!(self, ReachabilityError::StoreError(e) if matches!(e, StoreError::KeyNotFound(_))) + } +} + +pub type Result = std::result::Result; + +pub trait ReachabilityResultExtensions { + /// Unwraps the error into `None` if the internal error is `StoreError::KeyNotFound` or panics otherwise + fn unwrap_option(self) -> Option; +} + +impl ReachabilityResultExtensions for Result { + fn unwrap_option(self) -> Option { + match self { + Ok(value) => Some(value), + Err(err) if err.is_key_not_found() => None, + Err(err) => panic!("Unexpected reachability error: {err:?}"), + } + } +} diff --git a/consensus/dag-consensus/reachability/src/reachability_service.rs b/consensus/dag-consensus/reachability/src/reachability_service.rs new file mode 100644 index 0000000000..bf5c4e1a8b --- /dev/null +++ b/consensus/dag-consensus/reachability/src/reachability_service.rs @@ -0,0 +1,316 @@ +use crate::{inquirer, Result}; +use dag_database::consensus::ReachabilityStoreReader; +use parking_lot::RwLock; +use starcoin_crypto::{HashValue as Hash, HashValue}; +use starcoin_types::blockhash; +use std::{ops::Deref, sync::Arc}; + +pub trait ReachabilityService { + fn is_chain_ancestor_of(&self, this: Hash, queried: Hash) -> bool; + fn is_dag_ancestor_of_result(&self, this: Hash, queried: Hash) -> Result; + fn is_dag_ancestor_of(&self, this: Hash, queried: Hash) -> bool; + fn is_dag_ancestor_of_any(&self, this: Hash, queried: &mut impl Iterator) -> bool; + fn is_any_dag_ancestor(&self, list: &mut impl Iterator, queried: Hash) -> bool; + fn is_any_dag_ancestor_result( + &self, + list: &mut impl Iterator, + queried: Hash, + ) -> Result; + fn get_next_chain_ancestor(&self, descendant: Hash, ancestor: Hash) -> Hash; +} + +/// Multi-threaded reachability service imp +#[derive(Clone)] +pub struct MTReachabilityService { + store: Arc>, +} + +impl MTReachabilityService { + pub fn new(store: Arc>) -> Self { + Self { store } + } +} + +impl ReachabilityService for MTReachabilityService { + fn is_chain_ancestor_of(&self, this: Hash, queried: Hash) -> bool { + let read_guard = self.store.read(); + inquirer::is_chain_ancestor_of(read_guard.deref(), this, queried).unwrap() + } + + fn is_dag_ancestor_of_result(&self, this: Hash, queried: Hash) -> Result { + let read_guard = self.store.read(); + inquirer::is_dag_ancestor_of(read_guard.deref(), this, queried) + } + + fn is_dag_ancestor_of(&self, this: Hash, queried: Hash) -> bool { + let read_guard = self.store.read(); + inquirer::is_dag_ancestor_of(read_guard.deref(), this, queried).unwrap() + } + + fn is_any_dag_ancestor(&self, list: &mut impl Iterator, queried: Hash) -> bool { + let read_guard = self.store.read(); + list.any(|hash| inquirer::is_dag_ancestor_of(read_guard.deref(), hash, queried).unwrap()) + } + + fn is_any_dag_ancestor_result( + &self, + list: &mut impl Iterator, + queried: Hash, + ) -> Result { + let read_guard = self.store.read(); + for hash in list { + if inquirer::is_dag_ancestor_of(read_guard.deref(), hash, queried)? { + return Ok(true); + } + } + Ok(false) + } + + fn is_dag_ancestor_of_any(&self, this: Hash, queried: &mut impl Iterator) -> bool { + let read_guard = self.store.read(); + queried.any(|hash| inquirer::is_dag_ancestor_of(read_guard.deref(), this, hash).unwrap()) + } + + fn get_next_chain_ancestor(&self, descendant: Hash, ancestor: Hash) -> Hash { + let read_guard = self.store.read(); + inquirer::get_next_chain_ancestor(read_guard.deref(), descendant, ancestor).unwrap() + } +} + +impl MTReachabilityService { + /// Returns a forward iterator walking up the chain-selection tree from `from_ancestor` + /// to `to_descendant`, where `to_descendant` is included if `inclusive` is set to true. + /// + /// To skip `from_ancestor` simply apply `skip(1)`. + /// + /// The caller is expected to verify that `from_ancestor` is indeed a chain ancestor of + /// `to_descendant`, otherwise the function will panic. + pub fn forward_chain_iterator( + &self, + from_ancestor: Hash, + to_descendant: Hash, + inclusive: bool, + ) -> impl Iterator { + ForwardChainIterator::new(self.store.clone(), from_ancestor, to_descendant, inclusive) + } + + /// Returns a backward iterator walking down the selected chain from `from_descendant` + /// to `to_ancestor`, where `to_ancestor` is included if `inclusive` is set to true. + /// + /// To skip `from_descendant` simply apply `skip(1)`. + /// + /// The caller is expected to verify that `to_ancestor` is indeed a chain ancestor of + /// `from_descendant`, otherwise the function will panic. + pub fn backward_chain_iterator( + &self, + from_descendant: Hash, + to_ancestor: Hash, + inclusive: bool, + ) -> impl Iterator { + BackwardChainIterator::new(self.store.clone(), from_descendant, to_ancestor, inclusive) + } + + /// Returns the default chain iterator, walking from `from` backward down the + /// selected chain until `virtual genesis` (aka `blockhash::ORIGIN`; exclusive) + pub fn default_backward_chain_iterator(&self, from: Hash) -> impl Iterator { + BackwardChainIterator::new( + self.store.clone(), + from, + HashValue::new(blockhash::ORIGIN), + false, + ) + } +} + +/// Iterator design: we currently read-lock at each movement of the iterator. +/// Other options are to keep the read guard throughout the iterator lifetime, or +/// a compromise where the lock is released every constant number of items. +struct BackwardChainIterator { + store: Arc>, + current: Option, + ancestor: Hash, + inclusive: bool, +} + +impl BackwardChainIterator { + fn new( + store: Arc>, + from_descendant: Hash, + to_ancestor: Hash, + inclusive: bool, + ) -> Self { + Self { + store, + current: Some(from_descendant), + ancestor: to_ancestor, + inclusive, + } + } +} + +impl Iterator for BackwardChainIterator { + type Item = Hash; + + fn next(&mut self) -> Option { + if let Some(current) = self.current { + if current == self.ancestor { + if self.inclusive { + self.current = None; + Some(current) + } else { + self.current = None; + None + } + } else { + debug_assert_ne!(current, HashValue::new(blockhash::NONE)); + let next = self.store.read().get_parent(current).unwrap(); + self.current = Some(next); + Some(current) + } + } else { + None + } + } +} + +struct ForwardChainIterator { + store: Arc>, + current: Option, + descendant: Hash, + inclusive: bool, +} + +impl ForwardChainIterator { + fn new( + store: Arc>, + from_ancestor: Hash, + to_descendant: Hash, + inclusive: bool, + ) -> Self { + Self { + store, + current: Some(from_ancestor), + descendant: to_descendant, + inclusive, + } + } +} + +impl Iterator for ForwardChainIterator { + type Item = Hash; + + fn next(&mut self) -> Option { + if let Some(current) = self.current { + if current == self.descendant { + if self.inclusive { + self.current = None; + Some(current) + } else { + self.current = None; + None + } + } else { + let next = inquirer::get_next_chain_ancestor( + self.store.read().deref(), + self.descendant, + current, + ) + .unwrap(); + self.current = Some(next); + Some(current) + } + } else { + None + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::tests::TreeBuilder; + use dag_database::consensus::MemoryReachabilityStore; + use starcoin_types::interval::Interval; + + #[test] + fn test_forward_iterator() { + // Arrange + let mut store = MemoryReachabilityStore::new(); + + // Act + let root: Hash = 1.into(); + TreeBuilder::new(&mut store) + .init_with_params(root, Interval::new(1, 15)) + .add_block(2.into(), root) + .add_block(3.into(), 2.into()) + .add_block(4.into(), 2.into()) + .add_block(5.into(), 3.into()) + .add_block(6.into(), 5.into()) + .add_block(7.into(), 1.into()) + .add_block(8.into(), 6.into()) + .add_block(9.into(), 6.into()) + .add_block(10.into(), 6.into()) + .add_block(11.into(), 6.into()); + + let service = MTReachabilityService::new(Arc::new(RwLock::new(store))); + + // Exclusive + let iter = service.forward_chain_iterator(2.into(), 10.into(), false); + + // Assert + let expected_hashes = [2u64, 3, 5, 6].map(Hash::from); + assert!(expected_hashes.iter().cloned().eq(iter)); + + // Inclusive + let iter = service.forward_chain_iterator(2.into(), 10.into(), true); + + // Assert + let expected_hashes = [2u64, 3, 5, 6, 10].map(Hash::from); + assert!(expected_hashes.iter().cloned().eq(iter)); + + // Compare backward to reversed forward + let forward_iter = service.forward_chain_iterator(2.into(), 10.into(), true); + let backward_iter: Vec = service + .backward_chain_iterator(10.into(), 2.into(), true) + .collect(); + assert!(forward_iter.eq(backward_iter.iter().cloned().rev())) + } + + #[test] + fn test_iterator_boundaries() { + // Arrange & Act + let mut store = MemoryReachabilityStore::new(); + let root: Hash = 1.into(); + TreeBuilder::new(&mut store) + .init_with_params(root, Interval::new(1, 5)) + .add_block(2.into(), root); + + let service = MTReachabilityService::new(Arc::new(RwLock::new(store))); + + // Asserts + assert!([1u64, 2] + .map(Hash::from) + .iter() + .cloned() + .eq(service.forward_chain_iterator(1.into(), 2.into(), true))); + assert!([1u64] + .map(Hash::from) + .iter() + .cloned() + .eq(service.forward_chain_iterator(1.into(), 2.into(), false))); + assert!([2u64, 1] + .map(Hash::from) + .iter() + .cloned() + .eq(service.backward_chain_iterator(2.into(), root, true))); + assert!([2u64] + .map(Hash::from) + .iter() + .cloned() + .eq(service.backward_chain_iterator(2.into(), root, false))); + assert!(std::iter::once(root).eq(service.backward_chain_iterator(root, root, true))); + assert!(std::iter::empty::().eq(service.backward_chain_iterator(root, root, false))); + assert!(std::iter::once(root).eq(service.forward_chain_iterator(root, root, true))); + assert!(std::iter::empty::().eq(service.forward_chain_iterator(root, root, false))); + } +} diff --git a/consensus/dag-consensus/reachability/src/reindex.rs b/consensus/dag-consensus/reachability/src/reindex.rs new file mode 100644 index 0000000000..342517e86a --- /dev/null +++ b/consensus/dag-consensus/reachability/src/reindex.rs @@ -0,0 +1,638 @@ +use crate::{ + extensions::ReachabilityStoreIntervalExtensions, inquirer::get_next_chain_ancestor_unchecked, *, +}; +use dag_database::consensus::ReachabilityStore; +use starcoin_crypto::HashValue as Hash; +use starcoin_types::{ + blockhash::{BlockHashExtensions, BlockHashMap}, + interval::Interval, +}; +use std::collections::VecDeque; + +/// A struct used during reindex operations. It represents a temporary context +/// for caching subtree information during the *current* reindex operation only +pub(super) struct ReindexOperationContext<'a, T: ReachabilityStore + ?Sized> { + store: &'a mut T, + subtree_sizes: BlockHashMap, // Cache for subtree sizes computed during this operation + _depth: u64, + slack: u64, +} + +impl<'a, T: ReachabilityStore + ?Sized> ReindexOperationContext<'a, T> { + pub(super) fn new(store: &'a mut T, depth: u64, slack: u64) -> Self { + Self { + store, + subtree_sizes: BlockHashMap::new(), + _depth: depth, + slack, + } + } + + /// Traverses the reachability subtree that's defined by the new child + /// block and reallocates reachability interval space + /// such that another reindexing is unlikely to occur shortly + /// thereafter. It does this by traversing down the reachability + /// tree until it finds a block with an interval size that's greater than + /// its subtree size. See `propagate_interval` for further details. + pub(super) fn reindex_intervals(&mut self, new_child: Hash, reindex_root: Hash) -> Result<()> { + let mut current = new_child; + + // Search for the first ancestor with sufficient interval space + loop { + let current_interval = self.store.get_interval(current)?; + self.count_subtrees(current)?; + + // `current` has sufficient space, break and propagate + if current_interval.size() >= self.subtree_sizes[¤t] { + break; + } + + let parent = self.store.get_parent(current)?; + + if parent.is_none() { + // If we ended up here it means that there are more + // than 2^64 blocks, which shouldn't ever happen. + return Err(ReachabilityError::DataOverflow( + "missing tree + parent during reindexing. Theoretically, this + should only ever happen if there are more + than 2^64 blocks in the DAG." + .to_string(), + )); + } + + if current == reindex_root { + // Reindex root is expected to hold enough capacity as long as there are less + // than ~2^52 blocks in the DAG, which should never happen in our lifetimes + // even if block rate per second is above 100. The calculation follows from the allocation of + // 2^12 (which equals 2^64/2^52) for slack per chain block below the reindex root. + return Err(ReachabilityError::DataOverflow(format!( + "unexpected behavior: reindex root {reindex_root} is out of capacity during reindexing. + Theoretically, this should only ever happen if there are more than ~2^52 blocks in the DAG." + ))); + } + + if inquirer::is_strict_chain_ancestor_of(self.store, parent, reindex_root)? { + // In this case parent is guaranteed to have sufficient interval space, + // however we avoid reindexing the entire subtree above parent + // (which includes root and thus majority of blocks mined since) + // and use slacks along the chain up forward from parent to reindex root. + // Notes: + // 1. we set `required_allocation` = subtree size of current in order to double the + // current interval capacity + // 2. it might be the case that current is the `new_child` itself + return self.reindex_intervals_earlier_than_root( + current, + reindex_root, + parent, + self.subtree_sizes[¤t], + ); + } + + current = parent + } + + self.propagate_interval(current) + } + + /// + /// Core (BFS) algorithms used during reindexing (see `count_subtrees` and `propagate_interval` below) + /// + /// + /// count_subtrees counts the size of each subtree under this block, + /// and populates self.subtree_sizes with the results. + /// It is equivalent to the following recursive implementation: + /// + /// fn count_subtrees(&mut self, block: Hash) -> Result { + /// let mut subtree_size = 0u64; + /// for child in self.store.get_children(block)?.iter().cloned() { + /// subtree_size += self.count_subtrees(child)?; + /// } + /// self.subtree_sizes.insert(block, subtree_size + 1); + /// Ok(subtree_size + 1) + /// } + /// + /// However, we are expecting (linearly) deep trees, and so a + /// recursive stack-based approach is inefficient and will hit + /// recursion limits. Instead, the same logic was implemented + /// using a (queue-based) BFS method. At a high level, the + /// algorithm uses BFS for reaching all leaves and pushes + /// intermediate updates from leaves via parent chains until all + /// size information is gathered at the root of the operation + /// (i.e. at block). + fn count_subtrees(&mut self, block: Hash) -> Result<()> { + if self.subtree_sizes.contains_key(&block) { + return Ok(()); + } + + let mut queue = VecDeque::::from([block]); + let mut counts = BlockHashMap::::new(); + + while let Some(mut current) = queue.pop_front() { + let children = self.store.get_children(current)?; + if children.is_empty() { + // We reached a leaf + self.subtree_sizes.insert(current, 1); + } else if !self.subtree_sizes.contains_key(¤t) { + // We haven't yet calculated the subtree size of + // the current block. Add all its children to the + // queue + queue.extend(children.iter()); + continue; + } + + // We reached a leaf or a pre-calculated subtree. + // Push information up + while current != block { + current = self.store.get_parent(current)?; + + let count = counts.entry(current).or_insert(0); + let children = self.store.get_children(current)?; + + *count += 1; + if *count < children.len() as u64 { + // Not all subtrees of the current block are ready + break; + } + + // All children of `current` have calculated their subtree size. + // Sum them all together and add 1 to get the sub tree size of + // `current`. + let subtree_sum: u64 = children.iter().map(|c| self.subtree_sizes[c]).sum(); + self.subtree_sizes.insert(current, subtree_sum + 1); + } + } + + Ok(()) + } + + /// Propagates a new interval using a BFS traversal. + /// Subtree intervals are recursively allocated according to subtree sizes and + /// the allocation rule in `Interval::split_exponential`. + fn propagate_interval(&mut self, block: Hash) -> Result<()> { + // Make sure subtrees are counted before propagating + self.count_subtrees(block)?; + + let mut queue = VecDeque::::from([block]); + while let Some(current) = queue.pop_front() { + let children = self.store.get_children(current)?; + if !children.is_empty() { + let sizes: Vec = children.iter().map(|c| self.subtree_sizes[c]).collect(); + let interval = self.store.interval_children_capacity(current)?; + let intervals = interval.split_exponential(&sizes); + for (c, ci) in children.iter().copied().zip(intervals) { + self.store.set_interval(c, ci)?; + } + queue.extend(children.iter()); + } + } + Ok(()) + } + + /// This method implements the reindex algorithm for the case where the + /// new child node is not in reindex root's subtree. The function is expected to allocate + /// `required_allocation` to be added to interval of `allocation_block`. `common_ancestor` is + /// expected to be a direct parent of `allocation_block` and an ancestor of current `reindex_root`. + fn reindex_intervals_earlier_than_root( + &mut self, + allocation_block: Hash, + reindex_root: Hash, + common_ancestor: Hash, + required_allocation: u64, + ) -> Result<()> { + // The chosen child is: (i) child of `common_ancestor`; (ii) an + // ancestor of `reindex_root` or `reindex_root` itself + let chosen_child = + get_next_chain_ancestor_unchecked(self.store, reindex_root, common_ancestor)?; + let block_interval = self.store.get_interval(allocation_block)?; + let chosen_interval = self.store.get_interval(chosen_child)?; + + if block_interval.start < chosen_interval.start { + // `allocation_block` is in the subtree before the chosen child + self.reclaim_interval_before( + allocation_block, + common_ancestor, + chosen_child, + reindex_root, + required_allocation, + ) + } else { + // `allocation_block` is in the subtree after the chosen child + self.reclaim_interval_after( + allocation_block, + common_ancestor, + chosen_child, + reindex_root, + required_allocation, + ) + } + } + + fn reclaim_interval_before( + &mut self, + allocation_block: Hash, + common_ancestor: Hash, + chosen_child: Hash, + reindex_root: Hash, + required_allocation: u64, + ) -> Result<()> { + let mut slack_sum = 0u64; + let mut path_len = 0u64; + let mut path_slack_alloc = 0u64; + + let mut current = chosen_child; + // Walk up the chain from common ancestor's chosen child towards reindex root + loop { + if current == reindex_root { + // Reached reindex root. In this case, since we reached (the unlimited) root, + // we also re-allocate new slack for the chain we just traversed + let offset = required_allocation + self.slack * path_len - slack_sum; + self.apply_interval_op_and_propagate(current, offset, Interval::increase_start)?; + self.offset_siblings_before(allocation_block, current, offset)?; + + // Set the slack for each chain block to be reserved below during the chain walk-down + path_slack_alloc = self.slack; + break; + } + + let slack_before_current = self.store.interval_remaining_before(current)?.size(); + slack_sum += slack_before_current; + + if slack_sum >= required_allocation { + // Set offset to be just enough to satisfy required allocation + let offset = slack_before_current - (slack_sum - required_allocation); + self.apply_interval_op(current, offset, Interval::increase_start)?; + self.offset_siblings_before(allocation_block, current, offset)?; + + break; + } + + current = get_next_chain_ancestor_unchecked(self.store, reindex_root, current)?; + path_len += 1; + } + + // Go back down the reachability tree towards the common ancestor. + // On every hop we reindex the reachability subtree before the + // current block with an interval that is smaller. + // This is to make room for the required allocation. + loop { + current = self.store.get_parent(current)?; + if current == common_ancestor { + break; + } + + let slack_before_current = self.store.interval_remaining_before(current)?.size(); + let offset = slack_before_current - path_slack_alloc; + self.apply_interval_op(current, offset, Interval::increase_start)?; + self.offset_siblings_before(allocation_block, current, offset)?; + } + + Ok(()) + } + + fn reclaim_interval_after( + &mut self, + allocation_block: Hash, + common_ancestor: Hash, + chosen_child: Hash, + reindex_root: Hash, + required_allocation: u64, + ) -> Result<()> { + let mut slack_sum = 0u64; + let mut path_len = 0u64; + let mut path_slack_alloc = 0u64; + + let mut current = chosen_child; + // Walk up the chain from common ancestor's chosen child towards reindex root + loop { + if current == reindex_root { + // Reached reindex root. In this case, since we reached (the unlimited) root, + // we also re-allocate new slack for the chain we just traversed + let offset = required_allocation + self.slack * path_len - slack_sum; + self.apply_interval_op_and_propagate(current, offset, Interval::decrease_end)?; + self.offset_siblings_after(allocation_block, current, offset)?; + + // Set the slack for each chain block to be reserved below during the chain walk-down + path_slack_alloc = self.slack; + break; + } + + let slack_after_current = self.store.interval_remaining_after(current)?.size(); + slack_sum += slack_after_current; + + if slack_sum >= required_allocation { + // Set offset to be just enough to satisfy required allocation + let offset = slack_after_current - (slack_sum - required_allocation); + self.apply_interval_op(current, offset, Interval::decrease_end)?; + self.offset_siblings_after(allocation_block, current, offset)?; + + break; + } + + current = get_next_chain_ancestor_unchecked(self.store, reindex_root, current)?; + path_len += 1; + } + + // Go back down the reachability tree towards the common ancestor. + // On every hop we reindex the reachability subtree before the + // current block with an interval that is smaller. + // This is to make room for the required allocation. + loop { + current = self.store.get_parent(current)?; + if current == common_ancestor { + break; + } + + let slack_after_current = self.store.interval_remaining_after(current)?.size(); + let offset = slack_after_current - path_slack_alloc; + self.apply_interval_op(current, offset, Interval::decrease_end)?; + self.offset_siblings_after(allocation_block, current, offset)?; + } + + Ok(()) + } + + fn offset_siblings_before( + &mut self, + allocation_block: Hash, + current: Hash, + offset: u64, + ) -> Result<()> { + let parent = self.store.get_parent(current)?; + let children = self.store.get_children(parent)?; + + let (siblings_before, _) = split_children(&children, current)?; + for sibling in siblings_before.iter().cloned().rev() { + if sibling == allocation_block { + // We reached our final destination, allocate `offset` to `allocation_block` by increasing end and break + self.apply_interval_op_and_propagate( + allocation_block, + offset, + Interval::increase_end, + )?; + break; + } + // For non-`allocation_block` siblings offset the interval upwards in order to create space + self.apply_interval_op_and_propagate(sibling, offset, Interval::increase)?; + } + + Ok(()) + } + + fn offset_siblings_after( + &mut self, + allocation_block: Hash, + current: Hash, + offset: u64, + ) -> Result<()> { + let parent = self.store.get_parent(current)?; + let children = self.store.get_children(parent)?; + + let (_, siblings_after) = split_children(&children, current)?; + for sibling in siblings_after.iter().cloned() { + if sibling == allocation_block { + // We reached our final destination, allocate `offset` to `allocation_block` by decreasing only start and break + self.apply_interval_op_and_propagate( + allocation_block, + offset, + Interval::decrease_start, + )?; + break; + } + // For siblings before `allocation_block` offset the interval downwards to create space + self.apply_interval_op_and_propagate(sibling, offset, Interval::decrease)?; + } + + Ok(()) + } + + fn apply_interval_op( + &mut self, + block: Hash, + offset: u64, + op: fn(&Interval, u64) -> Interval, + ) -> Result<()> { + self.store + .set_interval(block, op(&self.store.get_interval(block)?, offset))?; + Ok(()) + } + + fn apply_interval_op_and_propagate( + &mut self, + block: Hash, + offset: u64, + op: fn(&Interval, u64) -> Interval, + ) -> Result<()> { + self.store + .set_interval(block, op(&self.store.get_interval(block)?, offset))?; + self.propagate_interval(block)?; + Ok(()) + } + + /// A method for handling reindex operations triggered by moving the reindex root + pub(super) fn concentrate_interval( + &mut self, + parent: Hash, + child: Hash, + is_final_reindex_root: bool, + ) -> Result<()> { + let children = self.store.get_children(parent)?; + + // Split the `children` of `parent` to siblings before `child` and siblings after `child` + let (siblings_before, siblings_after) = split_children(&children, child)?; + + let siblings_before_subtrees_sum: u64 = + self.tighten_intervals_before(parent, siblings_before)?; + let siblings_after_subtrees_sum: u64 = + self.tighten_intervals_after(parent, siblings_after)?; + + self.expand_interval_to_chosen( + parent, + child, + siblings_before_subtrees_sum, + siblings_after_subtrees_sum, + is_final_reindex_root, + )?; + + Ok(()) + } + + pub(super) fn tighten_intervals_before( + &mut self, + parent: Hash, + children_before: &[Hash], + ) -> Result { + let sizes = children_before + .iter() + .cloned() + .map(|block| { + self.count_subtrees(block)?; + Ok(self.subtree_sizes[&block]) + }) + .collect::>>()?; + let sum = sizes.iter().sum(); + + let interval = self.store.get_interval(parent)?; + let interval_before = Interval::new( + interval.start + self.slack, + interval.start + self.slack + sum - 1, + ); + + for (c, ci) in children_before + .iter() + .cloned() + .zip(interval_before.split_exact(sizes.as_slice())) + { + self.store.set_interval(c, ci)?; + self.propagate_interval(c)?; + } + + Ok(sum) + } + + pub(super) fn tighten_intervals_after( + &mut self, + parent: Hash, + children_after: &[Hash], + ) -> Result { + let sizes = children_after + .iter() + .cloned() + .map(|block| { + self.count_subtrees(block)?; + Ok(self.subtree_sizes[&block]) + }) + .collect::>>()?; + let sum = sizes.iter().sum(); + + let interval = self.store.get_interval(parent)?; + let interval_after = Interval::new( + interval.end - self.slack - sum, + interval.end - self.slack - 1, + ); + + for (c, ci) in children_after + .iter() + .cloned() + .zip(interval_after.split_exact(sizes.as_slice())) + { + self.store.set_interval(c, ci)?; + self.propagate_interval(c)?; + } + + Ok(sum) + } + + pub(super) fn expand_interval_to_chosen( + &mut self, + parent: Hash, + child: Hash, + siblings_before_subtrees_sum: u64, + siblings_after_subtrees_sum: u64, + is_final_reindex_root: bool, + ) -> Result<()> { + let interval = self.store.get_interval(parent)?; + let allocation = Interval::new( + interval.start + siblings_before_subtrees_sum + self.slack, + interval.end - siblings_after_subtrees_sum - self.slack - 1, + ); + let current = self.store.get_interval(child)?; + + // Propagate interval only if the chosen `child` is the final reindex root AND + // the new interval doesn't contain the previous one + if is_final_reindex_root && !allocation.contains(current) { + /* + We deallocate slack on both sides as an optimization. Were we to + assign the fully allocated interval, the next time the reindex root moves we + would need to propagate intervals again. However when we do allocate slack, + next time this method is called (next time the reindex root moves), `allocation` is likely to contain `current`. + Note that below following the propagation we reassign the full `allocation` to `child`. + */ + let narrowed = + Interval::new(allocation.start + self.slack, allocation.end - self.slack); + self.store.set_interval(child, narrowed)?; + self.propagate_interval(child)?; + } + + self.store.set_interval(child, allocation)?; + Ok(()) + } +} + +/// Splits `children` into two slices: the blocks that are before `pivot` and the blocks that are after. +fn split_children(children: &std::sync::Arc>, pivot: Hash) -> Result<(&[Hash], &[Hash])> { + if let Some(index) = children.iter().cloned().position(|c| c == pivot) { + Ok((&children[..index], &children[index + 1..])) + } else { + Err(ReachabilityError::DataInconsistency) + } +} + +#[cfg(test)] +mod tests { + use super::{super::tests::*, *}; + use dag_database::consensus::{MemoryReachabilityStore, ReachabilityStoreReader}; + use starcoin_types::{blockhash, interval::Interval}; + + #[test] + fn test_count_subtrees() { + let mut store = MemoryReachabilityStore::new(); + + // Arrange + let root: Hash = 1.into(); + StoreBuilder::new(&mut store) + .add_block(root, Hash::new(blockhash::NONE)) + .add_block(2.into(), root) + .add_block(3.into(), 2.into()) + .add_block(4.into(), 2.into()) + .add_block(5.into(), 3.into()) + .add_block(6.into(), 5.into()) + .add_block(7.into(), 1.into()) + .add_block(8.into(), 6.into()); + + // Act + let mut ctx = ReindexOperationContext::new(&mut store, 10, 16); + ctx.count_subtrees(root).unwrap(); + + // Assert + let expected = [ + (1u64, 8u64), + (2, 6), + (3, 4), + (4, 1), + (5, 3), + (6, 2), + (7, 1), + (8, 1), + ] + .iter() + .cloned() + .map(|(h, c)| (Hash::from(h), c)) + .collect::>(); + + assert_eq!(expected, ctx.subtree_sizes); + + // Act + ctx.store.set_interval(root, Interval::new(1, 8)).unwrap(); + ctx.propagate_interval(root).unwrap(); + + // Assert intervals manually + let expected_intervals = [ + (1u64, (1u64, 8u64)), + (2, (1, 6)), + (3, (1, 4)), + (4, (5, 5)), + (5, (1, 3)), + (6, (1, 2)), + (7, (7, 7)), + (8, (1, 1)), + ]; + let actual_intervals = (1u64..=8) + .map(|i| (i, ctx.store.get_interval(i.into()).unwrap().into())) + .collect::>(); + assert_eq!(actual_intervals, expected_intervals); + + // Assert intervals follow the general rules + store.validate_intervals(root).unwrap(); + } +} diff --git a/consensus/dag-consensus/reachability/src/relations_service.rs b/consensus/dag-consensus/reachability/src/relations_service.rs new file mode 100644 index 0000000000..9020f307c2 --- /dev/null +++ b/consensus/dag-consensus/reachability/src/relations_service.rs @@ -0,0 +1,34 @@ +use dag_database::{consensus::RelationsStoreReader, prelude::StoreError}; +use parking_lot::RwLock; +use starcoin_crypto::HashValue as Hash; +use starcoin_types::blockhash::BlockHashes; +use std::sync::Arc; +/// Multi-threaded block-relations service imp +#[derive(Clone)] +pub struct MTRelationsService { + store: Arc>>, + level: usize, +} + +impl MTRelationsService { + pub fn new(store: Arc>>, level: u8) -> Self { + Self { + store, + level: level as usize, + } + } +} + +impl RelationsStoreReader for MTRelationsService { + fn get_parents(&self, hash: Hash) -> Result { + self.store.read()[self.level].get_parents(hash) + } + + fn get_children(&self, hash: Hash) -> Result { + self.store.read()[self.level].get_children(hash) + } + + fn has(&self, hash: Hash) -> Result { + self.store.read()[self.level].has(hash) + } +} diff --git a/consensus/dag-consensus/reachability/src/tests.rs b/consensus/dag-consensus/reachability/src/tests.rs new file mode 100644 index 0000000000..80812bee99 --- /dev/null +++ b/consensus/dag-consensus/reachability/src/tests.rs @@ -0,0 +1,267 @@ +//! +//! Test utils for reachability +//! +use super::{inquirer::*, tree::*}; +use dag_database::{ + consensus::{ReachabilityStore, ReachabilityStoreReader}, + prelude::StoreError, +}; +use starcoin_crypto::HashValue as Hash; +use starcoin_types::{ + blockhash::{BlockHashExtensions, BlockHashMap, BlockHashSet}, + interval::Interval, + perf, +}; +use std::collections::VecDeque; +use thiserror::Error; + +/// A struct with fluent API to streamline reachability store building +pub struct StoreBuilder<'a, T: ReachabilityStore + ?Sized> { + store: &'a mut T, +} + +impl<'a, T: ReachabilityStore + ?Sized> StoreBuilder<'a, T> { + pub fn new(store: &'a mut T) -> Self { + Self { store } + } + + pub fn add_block(&mut self, hash: Hash, parent: Hash) -> &mut Self { + let parent_height = if !parent.is_none() { + self.store.append_child(parent, hash).unwrap() + } else { + 0 + }; + self.store + .insert(hash, parent, Interval::empty(), parent_height + 1) + .unwrap(); + self + } +} + +/// A struct with fluent API to streamline tree building +pub struct TreeBuilder<'a, T: ReachabilityStore + ?Sized> { + store: &'a mut T, + reindex_depth: u64, + reindex_slack: u64, +} + +impl<'a, T: ReachabilityStore + ?Sized> TreeBuilder<'a, T> { + pub fn new(store: &'a mut T) -> Self { + Self { + store, + reindex_depth: perf::DEFAULT_REINDEX_DEPTH, + reindex_slack: perf::DEFAULT_REINDEX_SLACK, + } + } + + pub fn new_with_params(store: &'a mut T, reindex_depth: u64, reindex_slack: u64) -> Self { + Self { + store, + reindex_depth, + reindex_slack, + } + } + + pub fn init(&mut self) -> &mut Self { + init(self.store).unwrap(); + self + } + + pub fn init_with_params(&mut self, origin: Hash, capacity: Interval) -> &mut Self { + init_with_params(self.store, origin, capacity).unwrap(); + self + } + + pub fn add_block(&mut self, hash: Hash, parent: Hash) -> &mut Self { + add_tree_block( + self.store, + hash, + parent, + self.reindex_depth, + self.reindex_slack, + ) + .unwrap(); + try_advancing_reindex_root(self.store, hash, self.reindex_depth, self.reindex_slack) + .unwrap(); + self + } + + pub fn store(&self) -> &&'a mut T { + &self.store + } +} + +#[derive(Clone)] +pub struct DagBlock { + pub hash: Hash, + pub parents: Vec, +} + +impl DagBlock { + pub fn new(hash: Hash, parents: Vec) -> Self { + Self { hash, parents } + } +} + +/// A struct with fluent API to streamline DAG building +pub struct DagBuilder<'a, T: ReachabilityStore + ?Sized> { + store: &'a mut T, + map: BlockHashMap, +} + +impl<'a, T: ReachabilityStore + ?Sized> DagBuilder<'a, T> { + pub fn new(store: &'a mut T) -> Self { + Self { + store, + map: BlockHashMap::new(), + } + } + + pub fn init(&mut self) -> &mut Self { + init(self.store).unwrap(); + self + } + + pub fn add_block(&mut self, block: DagBlock) -> &mut Self { + // Select by height (longest chain) just for the sake of internal isolated tests + let selected_parent = block + .parents + .iter() + .cloned() + .max_by_key(|p| self.store.get_height(*p).unwrap()) + .unwrap(); + let mergeset = self.mergeset(&block, selected_parent); + add_block( + self.store, + block.hash, + selected_parent, + &mut mergeset.iter().cloned(), + ) + .unwrap(); + hint_virtual_selected_parent(self.store, block.hash).unwrap(); + self.map.insert(block.hash, block); + self + } + + fn mergeset(&self, block: &DagBlock, selected_parent: Hash) -> Vec { + let mut queue: VecDeque = block + .parents + .iter() + .copied() + .filter(|p| *p != selected_parent) + .collect(); + let mut mergeset: BlockHashSet = queue.iter().copied().collect(); + let mut past = BlockHashSet::new(); + + while let Some(current) = queue.pop_front() { + for parent in self.map[¤t].parents.iter() { + if mergeset.contains(parent) || past.contains(parent) { + continue; + } + + if is_dag_ancestor_of(self.store, *parent, selected_parent).unwrap() { + past.insert(*parent); + continue; + } + + mergeset.insert(*parent); + queue.push_back(*parent); + } + } + mergeset.into_iter().collect() + } + + pub fn store(&self) -> &&'a mut T { + &self.store + } +} + +#[derive(Error, Debug)] +pub enum TestError { + #[error("data store error")] + StoreError(#[from] StoreError), + + #[error("empty interval")] + EmptyInterval(Hash, Interval), + + #[error("sibling intervals are expected to be consecutive")] + NonConsecutiveSiblingIntervals(Interval, Interval), + + #[error("child interval out of parent bounds")] + IntervalOutOfParentBounds { + parent: Hash, + child: Hash, + parent_interval: Interval, + child_interval: Interval, + }, +} + +pub trait StoreValidationExtensions { + /// Checks if `block` is in the past of `other` (creates hashes from the u64 numbers) + fn in_past_of(&self, block: u64, other: u64) -> bool; + + /// Checks if `block` and `other` are in the anticone of each other + /// (creates hashes from the u64 numbers) + fn are_anticone(&self, block: u64, other: u64) -> bool; + + /// Validates that all tree intervals match the expected interval relations + fn validate_intervals(&self, root: Hash) -> std::result::Result<(), TestError>; +} + +impl StoreValidationExtensions for T { + fn in_past_of(&self, block: u64, other: u64) -> bool { + if block == other { + return false; + } + let res = is_dag_ancestor_of(self, block.into(), other.into()).unwrap(); + if res { + // Assert that the `future` relation is indeed asymmetric + assert!(!is_dag_ancestor_of(self, other.into(), block.into()).unwrap()) + } + res + } + + fn are_anticone(&self, block: u64, other: u64) -> bool { + !is_dag_ancestor_of(self, block.into(), other.into()).unwrap() + && !is_dag_ancestor_of(self, other.into(), block.into()).unwrap() + } + + fn validate_intervals(&self, root: Hash) -> std::result::Result<(), TestError> { + let mut queue = VecDeque::::from([root]); + while let Some(parent) = queue.pop_front() { + let children = self.get_children(parent)?; + queue.extend(children.iter()); + + let parent_interval = self.get_interval(parent)?; + if parent_interval.is_empty() { + return Err(TestError::EmptyInterval(parent, parent_interval)); + } + + // Verify parent-child strict relation + for child in children.iter().cloned() { + let child_interval = self.get_interval(child)?; + if !parent_interval.strictly_contains(child_interval) { + return Err(TestError::IntervalOutOfParentBounds { + parent, + child, + parent_interval, + child_interval, + }); + } + } + + // Iterate over consecutive siblings + for siblings in children.windows(2) { + let sibling_interval = self.get_interval(siblings[0])?; + let current_interval = self.get_interval(siblings[1])?; + if sibling_interval.end + 1 != current_interval.start { + return Err(TestError::NonConsecutiveSiblingIntervals( + sibling_interval, + current_interval, + )); + } + } + } + Ok(()) + } +} diff --git a/consensus/dag-consensus/reachability/src/tree.rs b/consensus/dag-consensus/reachability/src/tree.rs new file mode 100644 index 0000000000..46c7cc28db --- /dev/null +++ b/consensus/dag-consensus/reachability/src/tree.rs @@ -0,0 +1,149 @@ +//! +//! Tree-related functions internal to the module +//! +use super::{ + extensions::ReachabilityStoreIntervalExtensions, inquirer::*, reindex::ReindexOperationContext, + *, +}; +use dag_database::consensus::ReachabilityStore; +use starcoin_crypto::HashValue as Hash; + +/// Adds `new_block` as a child of `parent` in the tree structure. If this block +/// has no remaining interval to allocate, a reindexing is triggered. When a reindexing +/// is triggered, the reindex root point is used within the reindex algorithm's logic +pub fn add_tree_block( + store: &mut (impl ReachabilityStore + ?Sized), + new_block: Hash, + parent: Hash, + reindex_depth: u64, + reindex_slack: u64, +) -> Result<()> { + // Get the remaining interval capacity + let remaining = store.interval_remaining_after(parent)?; + // Append the new child to `parent.children` + let parent_height = store.append_child(parent, new_block)?; + if remaining.is_empty() { + // Init with the empty interval. + // Note: internal logic relies on interval being this specific interval + // which comes exactly at the end of current capacity + store.insert(new_block, parent, remaining, parent_height + 1)?; + + // Start a reindex operation (TODO: add timing) + let reindex_root = store.get_reindex_root()?; + let mut ctx = ReindexOperationContext::new(store, reindex_depth, reindex_slack); + ctx.reindex_intervals(new_block, reindex_root)?; + } else { + let allocated = remaining.split_half().0; + store.insert(new_block, parent, allocated, parent_height + 1)?; + }; + Ok(()) +} + +/// Finds the most recent tree ancestor common to both `block` and the given `reindex root`. +/// Note that we assume that almost always the chain between the reindex root and the common +/// ancestor is longer than the chain between block and the common ancestor, hence we iterate +/// from `block`. +pub fn find_common_tree_ancestor( + store: &(impl ReachabilityStore + ?Sized), + block: Hash, + reindex_root: Hash, +) -> Result { + let mut current = block; + loop { + if is_chain_ancestor_of(store, current, reindex_root)? { + return Ok(current); + } + current = store.get_parent(current)?; + } +} + +/// Finds a possible new reindex root, based on the `current` reindex root and the selected tip `hint` +pub fn find_next_reindex_root( + store: &(impl ReachabilityStore + ?Sized), + current: Hash, + hint: Hash, + reindex_depth: u64, + reindex_slack: u64, +) -> Result<(Hash, Hash)> { + let mut ancestor = current; + let mut next = current; + + let hint_height = store.get_height(hint)?; + + // Test if current root is ancestor of selected tip (`hint`) - if not, this is a reorg case + if !is_chain_ancestor_of(store, current, hint)? { + let current_height = store.get_height(current)?; + + // We have reindex root out of (hint) selected tip chain, however we switch chains only after a sufficient + // threshold of `reindex_slack` diff in order to address possible alternating reorg attacks. + // The `reindex_slack` constant is used as an heuristic large enough on the one hand, but + // one which will not harm performance on the other hand - given the available slack at the chain split point. + // + // Note: In some cases the height of the (hint) selected tip can be lower than the current reindex root height. + // If that's the case we keep the reindex root unchanged. + if hint_height < current_height || hint_height - current_height < reindex_slack { + return Ok((current, current)); + } + + let common = find_common_tree_ancestor(store, hint, current)?; + ancestor = common; + next = common; + } + + // Iterate from ancestor towards the selected tip (`hint`) until passing the + // `reindex_window` threshold, for finding the new reindex root + loop { + let child = get_next_chain_ancestor_unchecked(store, hint, next)?; + let child_height = store.get_height(child)?; + + if hint_height < child_height { + return Err(ReachabilityError::DataInconsistency); + } + if hint_height - child_height < reindex_depth { + break; + } + next = child; + } + + Ok((ancestor, next)) +} + +/// Attempts to advance or move the current reindex root according to the +/// provided `virtual selected parent` (`VSP`) hint. +/// It is important for the reindex root point to follow the consensus-agreed chain +/// since this way it can benefit from chain-robustness which is implied by the security +/// of the ordering protocol. That is, it enjoys from the fact that all future blocks are +/// expected to elect the root subtree (by converging to the agreement to have it on the +/// selected chain). See also the reachability algorithms overview (TODO) +pub fn try_advancing_reindex_root( + store: &mut (impl ReachabilityStore + ?Sized), + hint: Hash, + reindex_depth: u64, + reindex_slack: u64, +) -> Result<()> { + // Get current root from the store + let current = store.get_reindex_root()?; + + // Find the possible new root + let (mut ancestor, next) = + find_next_reindex_root(store, current, hint, reindex_depth, reindex_slack)?; + + // No update to root, return + if current == next { + return Ok(()); + } + + // if ancestor == next { + // trace!("next reindex root is an ancestor of current one, skipping concentration.") + // } + while ancestor != next { + let child = get_next_chain_ancestor_unchecked(store, next, ancestor)?; + let mut ctx = ReindexOperationContext::new(store, reindex_depth, reindex_slack); + ctx.concentrate_interval(ancestor, child, child == next)?; + ancestor = child; + } + + // Update reindex root in the data store + store.set_reindex_root(next)?; + Ok(()) +} diff --git a/consensus/dag-consensus/src/blockdag.rs b/consensus/dag-consensus/src/blockdag.rs new file mode 100644 index 0000000000..8d8b95920c --- /dev/null +++ b/consensus/dag-consensus/src/blockdag.rs @@ -0,0 +1,235 @@ +use anyhow::bail; +use dag_database::consensus::{ + DbGhostdagStore, DbHeadersStore, DbReachabilityStore, DbRelationsStore, GhostdagStore, + HeaderStore, ReachabilityStoreReader, RelationsStore, RelationsStoreReader, +}; +use dag_database::prelude::FlexiDagStorage; +use ghostdag::protocol::GhostdagManager; +use parking_lot::RwLock; +use reachability::{inquirer, reachability_service::MTReachabilityService}; +use starcoin_crypto::HashValue as Hash; +use starcoin_types::{ + blockhash::{BlockHashes, KType, ORIGIN}, + header::{ConsensusHeader, Header}, +}; +use std::collections::HashMap; +use std::collections::HashSet; +use std::sync::Arc; + +pub type DbGhostdagManager = GhostdagManager< + DbGhostdagStore, + DbRelationsStore, + MTReachabilityService, + DbHeadersStore, +>; +pub struct BlockDAG { + genesis: Header, + ghostdag_manager: DbGhostdagManager, + relations_store: DbRelationsStore, + reachability_store: DbReachabilityStore, + ghostdag_store: DbGhostdagStore, + header_store: DbHeadersStore, + /// orphan blocks, parent hash -> orphan block + missing_blocks: HashMap>, +} + +impl BlockDAG { + pub fn new(genesis: Header, k: KType, db: FlexiDagStorage) -> Self { + let ghostdag_store = db.ghost_dag_store.clone(); + let header_store = db.header_store.clone(); + let relations_store = db.relations_store.clone(); + let mut reachability_store = db.reachability_store; + inquirer::init(&mut reachability_store).unwrap(); + let reachability_service = + MTReachabilityService::new(Arc::new(RwLock::new(reachability_store.clone()))); + let ghostdag_manager = DbGhostdagManager::new( + genesis.hash(), + k, + ghostdag_store.clone(), + relations_store.clone(), + header_store.clone(), + reachability_service, + ); + + let mut dag = Self { + genesis, + ghostdag_manager, + relations_store, + reachability_store, + ghostdag_store, + header_store, + missing_blocks: HashMap::new(), + }; + dag.init_with_genesis(); + dag + } + + pub fn init_with_genesis(&mut self) { + if self.relations_store.has(Hash::new(ORIGIN)).unwrap() { + return; + } + self.relations_store + .insert(Hash::new(ORIGIN), BlockHashes::new(vec![])) + .unwrap(); + self.commit_header(&self.genesis.clone()) + } + + pub fn commit_header(&mut self, header: &Header) { + // Generate ghostdag data + + let parents_hash = header.parents_hash(); + let ghostdag_data = if header.hash() != self.genesis.hash() { + self.ghostdag_manager.ghostdag(parents_hash) + } else { + self.ghostdag_manager.genesis_ghostdag_data() + }; + // Store ghostdata + self.ghostdag_store + .insert(header.hash(), Arc::new(ghostdag_data.clone())) + .unwrap(); + + // Update reachability store + let mut reachability_store = self.reachability_store.clone(); + let mut merge_set = ghostdag_data + .unordered_mergeset_without_selected_parent() + .filter(|hash| self.reachability_store.has(*hash).unwrap()); + + inquirer::add_block( + &mut reachability_store, + header.hash(), + ghostdag_data.selected_parent, + &mut merge_set, + ) + .unwrap(); + + // store relations + self.relations_store + .insert(header.hash(), BlockHashes::new(parents_hash.to_vec())) + .unwrap(); + // Store header store + self.header_store + .insert(header.hash(), Arc::new(header.to_owned()), 0) + .unwrap(); + } + fn is_in_dag(&self, _hash: Hash) -> anyhow::Result { + return Ok(true); + } + pub fn verify_header(&self, _header: &Header) -> anyhow::Result<()> { + //TODO: implemented it + Ok(()) + } + + pub fn connect_block(&mut self, header: &Header) -> anyhow::Result<()> { + let _ = self.verify_header(header)?; + let is_orphan_block = self.update_orphans(header)?; + if is_orphan_block { + return Ok(()); + } + self.commit_header(header); + self.check_missing_block(header)?; + Ok(()) + } + + pub fn check_missing_block(&mut self, header: &Header) -> anyhow::Result<()> { + if let Some(orphans) = self.missing_blocks.remove(&header.hash()) { + for orphan in orphans.iter() { + let is_orphan = self.is_orphan(&orphan)?; + if !is_orphan { + self.commit_header(header); + } + } + } + Ok(()) + } + fn is_orphan(&self, header: &Header) -> anyhow::Result { + for parent in header.parents_hash() { + if !self.is_in_dag(parent.to_owned())? { + return Ok(false); + } + } + return Ok(true); + } + + fn update_orphans(&mut self, block_header: &Header) -> anyhow::Result { + let mut is_orphan = false; + for parent in block_header.parents_hash() { + if self.is_in_dag(parent.to_owned())? { + continue; + } + if !self + .missing_blocks + .entry(parent.to_owned()) + .or_insert_with(HashSet::new) + .insert(block_header.to_owned()) + { + return Err(anyhow::anyhow!("Block already processed as a orphan")); + } + is_orphan = true; + } + Ok(is_orphan) + } + + pub fn get_block_header(&self, hash: Hash) -> anyhow::Result
{ + match self.header_store.get_header(hash) { + Ok(header) => anyhow::Result::Ok(header), + Err(error) => { + println!("failed to get header by hash: {}", error.to_string()); + bail!("failed to get header by hash: {}", error.to_string()); + } + } + } + + pub fn get_parents(&self, hash: Hash) -> anyhow::Result> { + match self.relations_store.get_parents(hash) { + Ok(parents) => anyhow::Result::Ok((*parents).clone()), + Err(error) => { + println!("failed to get parents by hash: {}", error.to_string()); + bail!("failed to get parents by hash: {}", error.to_string()); + } + } + } + + pub fn get_children(&self, hash: Hash) -> anyhow::Result> { + match self.relations_store.get_children(hash) { + Ok(children) => anyhow::Result::Ok((*children).clone()), + Err(error) => { + println!("failed to get parents by hash: {}", error.to_string()); + bail!("failed to get parents by hash: {}", error.to_string()); + } + } + } + + pub fn get_genesis_hash(&self) -> Hash { + self.genesis.hash() + } +} + +#[cfg(test)] +mod tests { + use super::*; + use dag_database::prelude::{FlexiDagStorage, FlexiDagStorageConfig}; + use starcoin_types::block::BlockHeader; + use std::{env, fs}; + #[test] + fn base_test() { + let genesis = Header::new(BlockHeader::random(), vec![Hash::new(ORIGIN)]); + let genesis_hash = genesis.hash(); + let k = 16; + let db_path = env::temp_dir().join("smolstc"); + println!("db path:{}", db_path.to_string_lossy()); + if db_path + .as_path() + .try_exists() + .unwrap_or_else(|_| panic!("Failed to check {db_path:?}")) + { + fs::remove_dir_all(db_path.as_path()).expect("Failed to delete temporary directory"); + } + let config = FlexiDagStorageConfig::create_with_params(1, 0, 1024); + let db = FlexiDagStorage::create_from_path(db_path, config) + .expect("Failed to create flexidag storage"); + let mut dag = BlockDAG::new(genesis, k, db); + + let block = Header::new(BlockHeader::random(), vec![genesis_hash]); + dag.commit_header(&block); + } +} diff --git a/consensus/dag-consensus/src/lib.rs b/consensus/dag-consensus/src/lib.rs new file mode 100644 index 0000000000..89210c01fe --- /dev/null +++ b/consensus/dag-consensus/src/lib.rs @@ -0,0 +1 @@ +pub mod blockdag; diff --git a/network-rpc/api/Cargo.toml b/network-rpc/api/Cargo.toml index 2fe399c8a0..d49fa1e612 100644 --- a/network-rpc/api/Cargo.toml +++ b/network-rpc/api/Cargo.toml @@ -15,7 +15,6 @@ starcoin-state-api = { workspace = true } starcoin-state-tree = { workspace = true } starcoin-types = { workspace = true } starcoin-vm-types = { workspace = true } -consensus-types = { workspace = true } [package] authors = { workspace = true } diff --git a/network-rpc/api/src/dag_protocol.rs b/network-rpc/api/src/dag_protocol.rs index 792ae6b477..fb689fadf5 100644 --- a/network-rpc/api/src/dag_protocol.rs +++ b/network-rpc/api/src/dag_protocol.rs @@ -1,7 +1,7 @@ -use consensus_types::header::Header; use serde::{Deserialize, Serialize}; use starcoin_accumulator::accumulator_info::AccumulatorInfo; use starcoin_crypto::HashValue; +use starcoin_types::header::Header; #[derive(Clone, Debug, Hash, Eq, PartialOrd, Ord, PartialEq, Serialize, Deserialize)] pub struct RelationshipPair { diff --git a/network-rpc/src/rpc.rs b/network-rpc/src/rpc.rs index 7aca558e92..d28ea6ed38 100644 --- a/network-rpc/src/rpc.rs +++ b/network-rpc/src/rpc.rs @@ -10,10 +10,10 @@ use starcoin_accumulator::AccumulatorNode; use starcoin_chain_service::{ChainAsyncService, ChainReaderService}; use starcoin_crypto::HashValue; use starcoin_network_rpc_api::{ - gen_server, BlockBody, GetAccountState, GetAccumulatorNodeByNodeHash, GetBlockHeadersByNumber, - GetBlockIds, GetStateWithProof, GetStateWithTableItemProof, GetTxnsWithHash, GetTxnsWithSize, - Ping, RpcRequest, MAX_BLOCK_HEADER_REQUEST_SIZE, MAX_BLOCK_INFO_REQUEST_SIZE, - MAX_BLOCK_REQUEST_SIZE, MAX_TXN_REQUEST_SIZE, dag_protocol, + dag_protocol, gen_server, BlockBody, GetAccountState, GetAccumulatorNodeByNodeHash, + GetBlockHeadersByNumber, GetBlockIds, GetStateWithProof, GetStateWithTableItemProof, + GetTxnsWithHash, GetTxnsWithSize, Ping, RpcRequest, MAX_BLOCK_HEADER_REQUEST_SIZE, + MAX_BLOCK_INFO_REQUEST_SIZE, MAX_BLOCK_REQUEST_SIZE, MAX_TXN_REQUEST_SIZE, }; use starcoin_service_registry::ServiceRef; use starcoin_state_api::{ChainStateAsyncService, StateWithProof, StateWithTableItemProof}; @@ -309,24 +309,24 @@ impl gen_server::NetworkRpc for NetworkRpcImpl { fn get_dag_accumulator_leaves( &self, - peer_id: PeerId, - req: dag_protocol::GetDagAccumulatorLeaves, + _peer_id: PeerId, + _req: dag_protocol::GetDagAccumulatorLeaves, ) -> BoxFuture>> { todo!() } fn get_accumulator_leaf_detail( &self, - peer_id: PeerId, - req: dag_protocol::GetTargetDagAccumulatorLeafDetail, + _peer_id: PeerId, + _req: dag_protocol::GetTargetDagAccumulatorLeafDetail, ) -> BoxFuture>>> { todo!() } fn get_dag_block_info( &self, - peer_id: PeerId, - req: dag_protocol::GetSyncDagBlockInfo, + _peer_id: PeerId, + _req: dag_protocol::GetSyncDagBlockInfo, ) -> BoxFuture>>> { todo!() } diff --git a/storage/dag-database/Cargo.toml b/storage/dag-database/Cargo.toml new file mode 100644 index 0000000000..dbc41e5a69 --- /dev/null +++ b/storage/dag-database/Cargo.toml @@ -0,0 +1,23 @@ +[package] +name = "dag-database" +version = "1.13.5" +edition.workspace = true + +[dependencies] +starcoin-storage.workspace = true +starcoin-config.workspace = true +starcoin-crypto.workspace = true +starcoin-types.workspace = true +rocksdb.workspace = true +serde.workspace = true +bincode.workspace = true +indexmap.workspace = true +parking_lot.workspace = true +thiserror.workspace = true +rand.workspace = true +faster-hex.workspace = true +itertools.workspace = true +num_cpus.workspace = true + +[dev-dependencies] +tempfile.workspace = true diff --git a/storage/dag-database/src/access.rs b/storage/dag-database/src/access.rs new file mode 100644 index 0000000000..3e074cb190 --- /dev/null +++ b/storage/dag-database/src/access.rs @@ -0,0 +1,241 @@ +use crate::{cache::DagCache, db::DBStorage, errors::StoreError}; + +use super::prelude::{Cache, DbWriter}; +use itertools::Itertools; +use rocksdb::{Direction, IteratorMode, ReadOptions}; +use serde::{de::DeserializeOwned, Serialize}; +use starcoin_storage::storage::RawDBStorage; +use std::{ + collections::hash_map::RandomState, error::Error, hash::BuildHasher, marker::PhantomData, + sync::Arc, +}; + +/// A concurrent DB store access with typed caching. +#[derive(Clone)] +pub struct CachedDbAccess +where + TKey: Clone + std::hash::Hash + Eq + Send + Sync + AsRef<[u8]>, + TData: Clone + Send + Sync + DeserializeOwned, +{ + db: Arc, + + // Cache + cache: Cache, + + // DB bucket/path + prefix: &'static str, + + _phantom: PhantomData<(TData, S)>, +} + +impl CachedDbAccess +where + TKey: Clone + std::hash::Hash + Eq + Send + Sync + AsRef<[u8]>, + TData: Clone + Send + Sync + DeserializeOwned, + S: BuildHasher + Default, +{ + pub fn new(db: Arc, cache_size: u64, prefix: &'static str) -> Self { + Self { + db, + cache: Cache::new_with_capacity(cache_size), + prefix, + _phantom: Default::default(), + } + } + + pub fn read_from_cache(&self, key: TKey) -> Result, StoreError> + where + TKey: Copy + AsRef<[u8]>, + { + self.cache + .get(&key) + .map(|b| bincode::deserialize(&b).map_err(StoreError::DeserializationError)) + .transpose() + } + + pub fn has(&self, key: TKey) -> Result + where + TKey: Clone + AsRef<[u8]>, + { + Ok(self.cache.contains_key(&key) + || self + .db + .raw_get_pinned_cf(self.prefix, key) + .map_err(|_| StoreError::CFNotExist(self.prefix.to_string()))? + .is_some()) + } + + pub fn read(&self, key: TKey) -> Result + where + TKey: Clone + AsRef<[u8]> + ToString, + TData: DeserializeOwned, // We need `DeserializeOwned` since the slice coming from `db.get_pinned_cf` has short lifetime + { + if let Some(data) = self.cache.get(&key) { + let data = bincode::deserialize(&data)?; + Ok(data) + } else if let Some(slice) = self + .db + .raw_get_pinned_cf(self.prefix, &key) + .map_err(|_| StoreError::CFNotExist(self.prefix.to_string()))? + { + let data: TData = bincode::deserialize(&slice)?; + self.cache.insert(key, slice.to_vec()); + Ok(data) + } else { + Err(StoreError::KeyNotFound(key.to_string())) + } + } + + pub fn iterator( + &self, + ) -> Result, TData), Box>> + '_, StoreError> + where + TKey: Clone + AsRef<[u8]>, + TData: DeserializeOwned, // We need `DeserializeOwned` since the slice coming from `db.get_pinned_cf` has short lifetime + { + let db_iterator = self + .db + .raw_iterator_cf_opt(self.prefix, IteratorMode::Start, ReadOptions::default()) + .map_err(|e| StoreError::CFNotExist(e.to_string()))?; + + Ok(db_iterator.map(|iter_result| match iter_result { + Ok((key, data_bytes)) => match bincode::deserialize(&data_bytes) { + Ok(data) => Ok((key, data)), + Err(e) => Err(e.into()), + }, + Err(e) => Err(e.into()), + })) + } + + pub fn write(&self, mut writer: impl DbWriter, key: TKey, data: TData) -> Result<(), StoreError> + where + TKey: Clone + AsRef<[u8]>, + TData: Serialize, + { + let bin_data = bincode::serialize(&data)?; + self.cache.insert(key.clone(), bin_data.clone()); + writer.put(self.prefix, key.as_ref(), bin_data)?; + Ok(()) + } + + pub fn write_many( + &self, + mut writer: impl DbWriter, + iter: &mut (impl Iterator + Clone), + ) -> Result<(), StoreError> + where + TKey: Clone + AsRef<[u8]>, + TData: Serialize, + { + for (key, data) in iter { + let bin_data = bincode::serialize(&data)?; + self.cache.insert(key.clone(), bin_data.clone()); + writer.put(self.prefix, key.as_ref(), bin_data)?; + } + Ok(()) + } + + /// Write directly from an iterator and do not cache any data. NOTE: this action also clears the cache + pub fn write_many_without_cache( + &self, + mut writer: impl DbWriter, + iter: &mut impl Iterator, + ) -> Result<(), StoreError> + where + TKey: Clone + AsRef<[u8]>, + TData: Serialize, + { + for (key, data) in iter { + let bin_data = bincode::serialize(&data)?; + writer.put(self.prefix, key.as_ref(), bin_data)?; + } + // The cache must be cleared in order to avoid invalidated entries + self.cache.remove_all(); + Ok(()) + } + + pub fn delete(&self, mut writer: impl DbWriter, key: TKey) -> Result<(), StoreError> + where + TKey: Clone + AsRef<[u8]>, + { + self.cache.remove(&key); + writer.delete(self.prefix, key.as_ref())?; + Ok(()) + } + + pub fn delete_many( + &self, + mut writer: impl DbWriter, + key_iter: &mut (impl Iterator + Clone), + ) -> Result<(), StoreError> + where + TKey: Clone + AsRef<[u8]>, + { + let key_iter_clone = key_iter.clone(); + self.cache.remove_many(key_iter); + for key in key_iter_clone { + writer.delete(self.prefix, key.as_ref())?; + } + Ok(()) + } + + pub fn delete_all(&self, mut writer: impl DbWriter) -> Result<(), StoreError> + where + TKey: Clone + AsRef<[u8]>, + { + self.cache.remove_all(); + let keys = self + .db + .raw_iterator_cf_opt(self.prefix, IteratorMode::Start, ReadOptions::default()) + .map_err(|e| StoreError::CFNotExist(e.to_string()))? + .map(|iter_result| match iter_result { + Ok((key, _)) => Ok::<_, rocksdb::Error>(key), + Err(e) => Err(e), + }) + .collect_vec(); + for key in keys { + writer.delete(self.prefix, key?.as_ref())?; + } + Ok(()) + } + + /// A dynamic iterator that can iterate through a specific prefix, and from a certain start point. + //TODO: loop and chain iterators for multi-prefix iterator. + pub fn seek_iterator( + &self, + seek_from: Option, // iter whole range if None + limit: usize, // amount to take. + skip_first: bool, // skips the first value, (useful in conjunction with the seek-key, as to not re-retrieve). + ) -> Result, TData), Box>> + '_, StoreError> + where + TKey: Clone + AsRef<[u8]>, + TData: DeserializeOwned, + { + let read_opts = ReadOptions::default(); + let mut db_iterator = match seek_from { + Some(seek_key) => self.db.raw_iterator_cf_opt( + self.prefix, + IteratorMode::From(seek_key.as_ref(), Direction::Forward), + read_opts, + ), + None => self + .db + .raw_iterator_cf_opt(self.prefix, IteratorMode::Start, read_opts), + } + .map_err(|e| StoreError::CFNotExist(e.to_string()))?; + + if skip_first { + db_iterator.next(); + } + + Ok(db_iterator.take(limit).map(move |item| match item { + Ok((key_bytes, value_bytes)) => { + match bincode::deserialize::(value_bytes.as_ref()) { + Ok(value) => Ok((key_bytes, value)), + Err(err) => Err(err.into()), + } + } + Err(err) => Err(err.into()), + })) + } +} diff --git a/storage/dag-database/src/cache/mod.rs b/storage/dag-database/src/cache/mod.rs new file mode 100644 index 0000000000..ca7bcaa830 --- /dev/null +++ b/storage/dag-database/src/cache/mod.rs @@ -0,0 +1,15 @@ +mod stc_cache; +pub use stc_cache::*; + +pub trait DagCache { + type TKey: Clone + std::hash::Hash + Eq + Send + Sync + AsRef<[u8]>; + type TData: Clone + Send + Sync + AsRef<[u8]>; + + fn new_with_capacity(size: u64) -> Self; + fn get(&self, key: &Self::TKey) -> Option; + fn contains_key(&self, key: &Self::TKey) -> bool; + fn insert(&self, key: Self::TKey, data: Self::TData); + fn remove(&self, key: &Self::TKey); + fn remove_many(&self, key_iter: &mut impl Iterator); + fn remove_all(&self); +} diff --git a/storage/dag-database/src/cache/stc_cache.rs b/storage/dag-database/src/cache/stc_cache.rs new file mode 100644 index 0000000000..45b99dd550 --- /dev/null +++ b/storage/dag-database/src/cache/stc_cache.rs @@ -0,0 +1,45 @@ +use super::DagCache; +use starcoin_storage::cache_storage::CacheStorage; +use std::{marker::PhantomData, sync::Arc}; + +#[derive(Clone)] +pub struct Cache { + cache: Arc, + _phantom: PhantomData, +} + +impl> DagCache for Cache { + type TKey = TKey; + type TData = Vec; + + fn new_with_capacity(size: u64) -> Self { + Self { + cache: Arc::new(CacheStorage::new_with_capacity(size as usize, None)), + _phantom: Default::default(), + } + } + + fn get(&self, key: &Self::TKey) -> Option { + self.cache.get_inner(None, key.as_ref().to_vec()) + } + + fn contains_key(&self, key: &Self::TKey) -> bool { + self.get(key).is_some() + } + + fn insert(&self, key: Self::TKey, data: Self::TData) { + self.cache.put_inner(None, key.as_ref().to_vec(), data); + } + + fn remove(&self, key: &Self::TKey) { + self.cache.remove_inner(None, key.as_ref().to_vec()); + } + + fn remove_many(&self, key_iter: &mut impl Iterator) { + key_iter.for_each(|k| self.remove(&k)); + } + + fn remove_all(&self) { + self.cache.remove_all(); + } +} diff --git a/storage/dag-database/src/consensus_ghostdag.rs b/storage/dag-database/src/consensus_ghostdag.rs new file mode 100644 index 0000000000..63d6b20dbd --- /dev/null +++ b/storage/dag-database/src/consensus_ghostdag.rs @@ -0,0 +1,461 @@ +use crate::{ + db::DBStorage, + errors::StoreError, + prelude::{CachedDbAccess, DirectDbWriter}, + writer::BatchDbWriter, +}; +use itertools::{ + EitherOrBoth::{Both, Left, Right}, + Itertools, +}; +use rocksdb::WriteBatch; +use starcoin_crypto::HashValue as Hash; +use starcoin_types::{ + blockhash::{BlockHashMap, BlockHashes, BlockLevel, BlueWorkType, HashKTypeMap}, + ghostdata::{CompactGhostdagData, GhostdagData}, + ordering::SortableBlock, +}; +use std::{cell::RefCell, cmp, iter::once, sync::Arc}; + +pub trait GhostdagStoreReader { + fn get_blue_score(&self, hash: Hash) -> Result; + fn get_blue_work(&self, hash: Hash) -> Result; + fn get_selected_parent(&self, hash: Hash) -> Result; + fn get_mergeset_blues(&self, hash: Hash) -> Result; + fn get_mergeset_reds(&self, hash: Hash) -> Result; + fn get_blues_anticone_sizes(&self, hash: Hash) -> Result; + + /// Returns full block data for the requested hash + fn get_data(&self, hash: Hash) -> Result, StoreError>; + + fn get_compact_data(&self, hash: Hash) -> Result; + + /// Check if the store contains data for the requested hash + fn has(&self, hash: Hash) -> Result; +} + +pub trait GhostdagStore: GhostdagStoreReader { + /// Insert GHOSTDAG data for block `hash` into the store. Note that GHOSTDAG data + /// is added once and never modified, so no need for specific setters for each element. + /// Additionally, this means writes are semantically "append-only", which is why + /// we can keep the `insert` method non-mutable on self. See "Parallel Processing.md" for an overview. + fn insert(&self, hash: Hash, data: Arc) -> Result<(), StoreError>; +} + +pub struct GhostDagDataWrapper(GhostdagData); + +impl From for GhostDagDataWrapper { + fn from(value: GhostdagData) -> Self { + Self(value) + } +} + +impl GhostDagDataWrapper { + /// Returns an iterator to the mergeset in ascending blue work order (tie-breaking by hash) + pub fn ascending_mergeset_without_selected_parent<'a>( + &'a self, + store: &'a (impl GhostdagStoreReader + ?Sized), + ) -> impl Iterator> + '_ { + self.0 + .mergeset_blues + .iter() + .skip(1) // Skip the selected parent + .cloned() + .map(|h| { + store + .get_blue_work(h) + .map(|blue| SortableBlock::new(h, blue)) + }) + .merge_join_by( + self.0 + .mergeset_reds + .iter() + .cloned() + .map(|h| store.get_blue_work(h).map(|red| SortableBlock::new(h, red))), + |a, b| match (a, b) { + (Ok(a), Ok(b)) => a.cmp(b), + (Err(_), Ok(_)) => cmp::Ordering::Less, // select left Err node + (Ok(_), Err(_)) => cmp::Ordering::Greater, // select right Err node + (Err(_), Err(_)) => cmp::Ordering::Equal, // remove both Err nodes + }, + ) + .map(|r| match r { + Left(b) | Right(b) => b, + Both(c, _) => Err(StoreError::DAGDupBlocksError(format!("{c:?}"))), + }) + } + + /// Returns an iterator to the mergeset in descending blue work order (tie-breaking by hash) + pub fn descending_mergeset_without_selected_parent<'a>( + &'a self, + store: &'a (impl GhostdagStoreReader + ?Sized), + ) -> impl Iterator> + '_ { + self.0 + .mergeset_blues + .iter() + .skip(1) // Skip the selected parent + .rev() // Reverse since blues and reds are stored with ascending blue work order + .cloned() + .map(|h| { + store + .get_blue_work(h) + .map(|blue| SortableBlock::new(h, blue)) + }) + .merge_join_by( + self.0 + .mergeset_reds + .iter() + .rev() // Reverse + .cloned() + .map(|h| store.get_blue_work(h).map(|red| SortableBlock::new(h, red))), + |a, b| match (b, a) { + (Ok(b), Ok(a)) => b.cmp(a), + (Err(_), Ok(_)) => cmp::Ordering::Less, // select left Err node + (Ok(_), Err(_)) => cmp::Ordering::Greater, // select right Err node + (Err(_), Err(_)) => cmp::Ordering::Equal, // select both Err nodes + }, // Reverse + ) + .map(|r| match r { + Left(b) | Right(b) => b, + Both(c, _) => Err(StoreError::DAGDupBlocksError(format!("{c:?}"))), + }) + } + + /// Returns an iterator to the mergeset in topological consensus order -- starting with the selected parent, + /// and adding the mergeset in increasing blue work order. Note that this is a topological order even though + /// the selected parent has highest blue work by def -- since the mergeset is in its anticone. + pub fn consensus_ordered_mergeset<'a>( + &'a self, + store: &'a (impl GhostdagStoreReader + ?Sized), + ) -> impl Iterator> + '_ { + once(Ok(self.0.selected_parent)).chain( + self.ascending_mergeset_without_selected_parent(store) + .map(|s| s.map(|s| s.hash)), + ) + } + + /// Returns an iterator to the mergeset in topological consensus order without the selected parent + pub fn consensus_ordered_mergeset_without_selected_parent<'a>( + &'a self, + store: &'a (impl GhostdagStoreReader + ?Sized), + ) -> impl Iterator> + '_ { + self.ascending_mergeset_without_selected_parent(store) + .map(|s| s.map(|s| s.hash)) + } +} + +pub(crate) const GHOST_DAG_STORE_CF: &str = "block-ghostdag-data"; +pub(crate) const COMPACT_GHOST_DAG_STORE_CF: &str = "compact-block-ghostdag-data"; + +/// A DB + cache implementation of `GhostdagStore` trait, with concurrency support. +#[derive(Clone)] +pub struct DbGhostdagStore { + db: Arc, + level: BlockLevel, + access: CachedDbAccess>, + compact_access: CachedDbAccess, +} + +impl DbGhostdagStore { + pub fn new(db: Arc, level: BlockLevel, cache_size: u64) -> Self { + Self { + db: Arc::clone(&db), + level, + access: CachedDbAccess::new(db.clone(), cache_size, GHOST_DAG_STORE_CF), + compact_access: CachedDbAccess::new(db, cache_size, COMPACT_GHOST_DAG_STORE_CF), + } + } + + pub fn clone_with_new_cache(&self, cache_size: u64) -> Self { + Self::new(Arc::clone(&self.db), self.level, cache_size) + } + + pub fn insert_batch( + &self, + batch: &mut WriteBatch, + hash: Hash, + data: &Arc, + ) -> Result<(), StoreError> { + if self.access.has(hash)? { + return Err(StoreError::KeyAlreadyExists(hash.to_string())); + } + self.access + .write(BatchDbWriter::new(batch), hash, data.clone())?; + self.compact_access.write( + BatchDbWriter::new(batch), + hash, + CompactGhostdagData { + blue_score: data.blue_score, + blue_work: data.blue_work, + selected_parent: data.selected_parent, + }, + )?; + Ok(()) + } +} + +impl GhostdagStoreReader for DbGhostdagStore { + fn get_blue_score(&self, hash: Hash) -> Result { + Ok(self.access.read(hash)?.blue_score) + } + + fn get_blue_work(&self, hash: Hash) -> Result { + Ok(self.access.read(hash)?.blue_work) + } + + fn get_selected_parent(&self, hash: Hash) -> Result { + Ok(self.access.read(hash)?.selected_parent) + } + + fn get_mergeset_blues(&self, hash: Hash) -> Result { + Ok(Arc::clone(&self.access.read(hash)?.mergeset_blues)) + } + + fn get_mergeset_reds(&self, hash: Hash) -> Result { + Ok(Arc::clone(&self.access.read(hash)?.mergeset_reds)) + } + + fn get_blues_anticone_sizes(&self, hash: Hash) -> Result { + Ok(Arc::clone(&self.access.read(hash)?.blues_anticone_sizes)) + } + + fn get_data(&self, hash: Hash) -> Result, StoreError> { + self.access.read(hash) + } + + fn get_compact_data(&self, hash: Hash) -> Result { + self.compact_access.read(hash) + } + + fn has(&self, hash: Hash) -> Result { + self.access.has(hash) + } +} + +impl GhostdagStore for DbGhostdagStore { + fn insert(&self, hash: Hash, data: Arc) -> Result<(), StoreError> { + if self.access.has(hash)? { + return Err(StoreError::KeyAlreadyExists(hash.to_string())); + } + self.access + .write(DirectDbWriter::new(&self.db), hash, data.clone())?; + if self.compact_access.has(hash)? { + return Err(StoreError::KeyAlreadyExists(hash.to_string())); + } + self.compact_access.write( + DirectDbWriter::new(&self.db), + hash, + CompactGhostdagData { + blue_score: data.blue_score, + blue_work: data.blue_work, + selected_parent: data.selected_parent, + }, + )?; + Ok(()) + } +} + +/// An in-memory implementation of `GhostdagStore` trait to be used for tests. +/// Uses `RefCell` for interior mutability in order to workaround `insert` +/// being non-mutable. +pub struct MemoryGhostdagStore { + blue_score_map: RefCell>, + blue_work_map: RefCell>, + selected_parent_map: RefCell>, + mergeset_blues_map: RefCell>, + mergeset_reds_map: RefCell>, + blues_anticone_sizes_map: RefCell>, +} + +impl MemoryGhostdagStore { + pub fn new() -> Self { + Self { + blue_score_map: RefCell::new(BlockHashMap::new()), + blue_work_map: RefCell::new(BlockHashMap::new()), + selected_parent_map: RefCell::new(BlockHashMap::new()), + mergeset_blues_map: RefCell::new(BlockHashMap::new()), + mergeset_reds_map: RefCell::new(BlockHashMap::new()), + blues_anticone_sizes_map: RefCell::new(BlockHashMap::new()), + } + } +} + +impl Default for MemoryGhostdagStore { + fn default() -> Self { + Self::new() + } +} + +impl GhostdagStore for MemoryGhostdagStore { + fn insert(&self, hash: Hash, data: Arc) -> Result<(), StoreError> { + if self.has(hash)? { + return Err(StoreError::KeyAlreadyExists(hash.to_string())); + } + self.blue_score_map + .borrow_mut() + .insert(hash, data.blue_score); + self.blue_work_map.borrow_mut().insert(hash, data.blue_work); + self.selected_parent_map + .borrow_mut() + .insert(hash, data.selected_parent); + self.mergeset_blues_map + .borrow_mut() + .insert(hash, data.mergeset_blues.clone()); + self.mergeset_reds_map + .borrow_mut() + .insert(hash, data.mergeset_reds.clone()); + self.blues_anticone_sizes_map + .borrow_mut() + .insert(hash, data.blues_anticone_sizes.clone()); + Ok(()) + } +} + +impl GhostdagStoreReader for MemoryGhostdagStore { + fn get_blue_score(&self, hash: Hash) -> Result { + match self.blue_score_map.borrow().get(&hash) { + Some(blue_score) => Ok(*blue_score), + None => Err(StoreError::KeyNotFound(hash.to_string())), + } + } + + fn get_blue_work(&self, hash: Hash) -> Result { + match self.blue_work_map.borrow().get(&hash) { + Some(blue_work) => Ok(*blue_work), + None => Err(StoreError::KeyNotFound(hash.to_string())), + } + } + + fn get_selected_parent(&self, hash: Hash) -> Result { + match self.selected_parent_map.borrow().get(&hash) { + Some(selected_parent) => Ok(*selected_parent), + None => Err(StoreError::KeyNotFound(hash.to_string())), + } + } + + fn get_mergeset_blues(&self, hash: Hash) -> Result { + match self.mergeset_blues_map.borrow().get(&hash) { + Some(mergeset_blues) => Ok(BlockHashes::clone(mergeset_blues)), + None => Err(StoreError::KeyNotFound(hash.to_string())), + } + } + + fn get_mergeset_reds(&self, hash: Hash) -> Result { + match self.mergeset_reds_map.borrow().get(&hash) { + Some(mergeset_reds) => Ok(BlockHashes::clone(mergeset_reds)), + None => Err(StoreError::KeyNotFound(hash.to_string())), + } + } + + fn get_blues_anticone_sizes(&self, hash: Hash) -> Result { + match self.blues_anticone_sizes_map.borrow().get(&hash) { + Some(sizes) => Ok(HashKTypeMap::clone(sizes)), + None => Err(StoreError::KeyNotFound(hash.to_string())), + } + } + + fn get_data(&self, hash: Hash) -> Result, StoreError> { + if !self.has(hash)? { + return Err(StoreError::KeyNotFound(hash.to_string())); + } + Ok(Arc::new(GhostdagData::new( + self.blue_score_map.borrow()[&hash], + self.blue_work_map.borrow()[&hash], + self.selected_parent_map.borrow()[&hash], + self.mergeset_blues_map.borrow()[&hash].clone(), + self.mergeset_reds_map.borrow()[&hash].clone(), + self.blues_anticone_sizes_map.borrow()[&hash].clone(), + ))) + } + + fn get_compact_data(&self, hash: Hash) -> Result { + Ok(self.get_data(hash)?.to_compact()) + } + + fn has(&self, hash: Hash) -> Result { + Ok(self.blue_score_map.borrow().contains_key(&hash)) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use starcoin_types::blockhash::BlockHashSet; + use std::iter::once; + + #[test] + fn test_mergeset_iterators() { + let store = MemoryGhostdagStore::new(); + + let factory = |w: u64| { + Arc::new(GhostdagData { + blue_score: Default::default(), + blue_work: w.into(), + selected_parent: Default::default(), + mergeset_blues: Default::default(), + mergeset_reds: Default::default(), + blues_anticone_sizes: Default::default(), + }) + }; + + // Blues + store.insert(1.into(), factory(2)).unwrap(); + store.insert(2.into(), factory(7)).unwrap(); + store.insert(3.into(), factory(11)).unwrap(); + + // Reds + store.insert(4.into(), factory(4)).unwrap(); + store.insert(5.into(), factory(9)).unwrap(); + store.insert(6.into(), factory(11)).unwrap(); // Tie-breaking case + + let mut data = GhostdagData::new_with_selected_parent(1.into(), 5); + data.add_blue(2.into(), Default::default(), &Default::default()); + data.add_blue(3.into(), Default::default(), &Default::default()); + + data.add_red(4.into()); + data.add_red(5.into()); + data.add_red(6.into()); + + let wrapper: GhostDagDataWrapper = data.clone().into(); + + let mut expected: Vec = vec![4.into(), 2.into(), 5.into(), 3.into(), 6.into()]; + assert_eq!( + expected, + wrapper + .ascending_mergeset_without_selected_parent(&store) + .filter_map(|b| b.map(|b| b.hash).ok()) + .collect::>() + ); + + itertools::assert_equal( + once(1.into()).chain(expected.iter().cloned()), + wrapper + .consensus_ordered_mergeset(&store) + .filter_map(|b| b.ok()), + ); + + expected.reverse(); + assert_eq!( + expected, + wrapper + .descending_mergeset_without_selected_parent(&store) + .filter_map(|b| b.map(|b| b.hash).ok()) + .collect::>() + ); + + // Use sets since the below functions have no order guarantee + let expected = BlockHashSet::from_iter([4.into(), 2.into(), 5.into(), 3.into(), 6.into()]); + assert_eq!( + expected, + data.unordered_mergeset_without_selected_parent() + .collect::() + ); + + let expected = + BlockHashSet::from_iter([1.into(), 4.into(), 2.into(), 5.into(), 3.into(), 6.into()]); + assert_eq!( + expected, + data.unordered_mergeset().collect::() + ); + } +} diff --git a/storage/dag-database/src/consensus_header.rs b/storage/dag-database/src/consensus_header.rs new file mode 100644 index 0000000000..75f09fb6c1 --- /dev/null +++ b/storage/dag-database/src/consensus_header.rs @@ -0,0 +1,168 @@ +use crate::{ + db::DBStorage, + errors::{StoreError, StoreResult}, + prelude::CachedDbAccess, + writer::{BatchDbWriter, DirectDbWriter}, +}; +use rocksdb::WriteBatch; +use starcoin_crypto::HashValue as Hash; +use starcoin_types::U256; +use starcoin_types::{ + blockhash::BlockLevel, + header::{CompactHeaderData, ConsensusHeader, Header, HeaderWithBlockLevel}, +}; +use std::sync::Arc; + +pub trait HeaderStoreReader { + fn get_daa_score(&self, hash: Hash) -> Result; + fn get_blue_score(&self, hash: Hash) -> Result; + fn get_timestamp(&self, hash: Hash) -> Result; + fn get_difficulty(&self, hash: Hash) -> Result; + fn get_header(&self, hash: Hash) -> Result, StoreError>; + fn get_header_with_block_level(&self, hash: Hash) -> Result; + fn get_compact_header_data(&self, hash: Hash) -> Result; +} + +pub trait HeaderStore: HeaderStoreReader { + // This is append only + fn insert( + &self, + hash: Hash, + header: Arc
, + block_level: BlockLevel, + ) -> Result<(), StoreError>; +} + +pub(crate) const HEADERS_STORE_CF: &str = "headers-store"; +pub(crate) const COMPACT_HEADER_DATA_STORE_CF: &str = "compact-header-data"; + +/// A DB + cache implementation of `HeaderStore` trait, with concurrency support. +#[derive(Clone)] +pub struct DbHeadersStore { + db: Arc, + compact_headers_access: CachedDbAccess, + headers_access: CachedDbAccess, +} + +impl DbHeadersStore { + pub fn new(db: Arc, cache_size: u64) -> Self { + Self { + db: Arc::clone(&db), + compact_headers_access: CachedDbAccess::new( + Arc::clone(&db), + cache_size, + COMPACT_HEADER_DATA_STORE_CF, + ), + headers_access: CachedDbAccess::new(db, cache_size, HEADERS_STORE_CF), + } + } + + pub fn clone_with_new_cache(&self, cache_size: u64) -> Self { + Self::new(Arc::clone(&self.db), cache_size) + } + + pub fn has(&self, hash: Hash) -> StoreResult { + self.headers_access.has(hash) + } + + pub fn get_header(&self, hash: Hash) -> Result { + let result = self.headers_access.read(hash)?; + Ok((*result.header).clone()) + } + + pub fn insert_batch( + &self, + batch: &mut WriteBatch, + hash: Hash, + header: Arc
, + block_level: BlockLevel, + ) -> Result<(), StoreError> { + if self.headers_access.has(hash)? { + return Err(StoreError::KeyAlreadyExists(hash.to_string())); + } + self.headers_access.write( + BatchDbWriter::new(batch), + hash, + HeaderWithBlockLevel { + header: header.clone(), + block_level, + }, + )?; + self.compact_headers_access.write( + BatchDbWriter::new(batch), + hash, + CompactHeaderData { + timestamp: header.timestamp(), + difficulty: header.difficulty(), + }, + )?; + Ok(()) + } +} + +impl HeaderStoreReader for DbHeadersStore { + fn get_daa_score(&self, _hash: Hash) -> Result { + unimplemented!() + } + + fn get_blue_score(&self, _hash: Hash) -> Result { + unimplemented!() + } + + fn get_timestamp(&self, hash: Hash) -> Result { + if let Some(header_with_block_level) = self.headers_access.read_from_cache(hash)? { + return Ok(header_with_block_level.header.timestamp()); + } + Ok(self.compact_headers_access.read(hash)?.timestamp) + } + + fn get_difficulty(&self, hash: Hash) -> Result { + if let Some(header_with_block_level) = self.headers_access.read_from_cache(hash)? { + return Ok(header_with_block_level.header.difficulty()); + } + Ok(self.compact_headers_access.read(hash)?.difficulty) + } + + fn get_header(&self, hash: Hash) -> Result, StoreError> { + Ok(self.headers_access.read(hash)?.header) + } + + fn get_header_with_block_level(&self, hash: Hash) -> Result { + self.headers_access.read(hash) + } + + fn get_compact_header_data(&self, hash: Hash) -> Result { + if let Some(header_with_block_level) = self.headers_access.read_from_cache(hash)? { + return Ok(CompactHeaderData { + timestamp: header_with_block_level.header.timestamp(), + difficulty: header_with_block_level.header.difficulty(), + }); + } + self.compact_headers_access.read(hash) + } +} + +impl HeaderStore for DbHeadersStore { + fn insert(&self, hash: Hash, header: Arc
, block_level: u8) -> Result<(), StoreError> { + if self.headers_access.has(hash)? { + return Err(StoreError::KeyAlreadyExists(hash.to_string())); + } + self.compact_headers_access.write( + DirectDbWriter::new(&self.db), + hash, + CompactHeaderData { + timestamp: header.timestamp(), + difficulty: header.difficulty(), + }, + )?; + self.headers_access.write( + DirectDbWriter::new(&self.db), + hash, + HeaderWithBlockLevel { + header, + block_level, + }, + )?; + Ok(()) + } +} diff --git a/storage/dag-database/src/consensus_reachability.rs b/storage/dag-database/src/consensus_reachability.rs new file mode 100644 index 0000000000..dfc3fd9b7a --- /dev/null +++ b/storage/dag-database/src/consensus_reachability.rs @@ -0,0 +1,497 @@ +use crate::{ + db::DBStorage, + prelude::{BatchDbWriter, CachedDbAccess, CachedDbItem, DirectDbWriter, StoreError}, +}; +use starcoin_crypto::HashValue as Hash; +use starcoin_storage::storage::RawDBStorage; + +use parking_lot::{RwLockUpgradableReadGuard, RwLockWriteGuard}; +use rocksdb::WriteBatch; +use starcoin_types::{ + blockhash::{self, BlockHashMap, BlockHashes}, + interval::Interval, + reachability::ReachabilityData, +}; +use std::{collections::hash_map::Entry::Vacant, sync::Arc}; + +/// Reader API for `ReachabilityStore`. +pub trait ReachabilityStoreReader { + fn has(&self, hash: Hash) -> Result; + fn get_interval(&self, hash: Hash) -> Result; + fn get_parent(&self, hash: Hash) -> Result; + fn get_children(&self, hash: Hash) -> Result; + fn get_future_covering_set(&self, hash: Hash) -> Result; +} + +/// Write API for `ReachabilityStore`. All write functions are deliberately `mut` +/// since reachability writes are not append-only and thus need to be guarded. +pub trait ReachabilityStore: ReachabilityStoreReader { + fn init(&mut self, origin: Hash, capacity: Interval) -> Result<(), StoreError>; + fn insert( + &mut self, + hash: Hash, + parent: Hash, + interval: Interval, + height: u64, + ) -> Result<(), StoreError>; + fn set_interval(&mut self, hash: Hash, interval: Interval) -> Result<(), StoreError>; + fn append_child(&mut self, hash: Hash, child: Hash) -> Result; + fn insert_future_covering_item( + &mut self, + hash: Hash, + fci: Hash, + insertion_index: usize, + ) -> Result<(), StoreError>; + fn get_height(&self, hash: Hash) -> Result; + fn set_reindex_root(&mut self, root: Hash) -> Result<(), StoreError>; + fn get_reindex_root(&self) -> Result; +} + +const REINDEX_ROOT_KEY: &str = "reachability-reindex-root"; +pub(crate) const REACHABILITY_DATA_CF: &str = "reachability-data"; +// TODO: explore perf to see if using fixed-length constants for store prefixes is preferable + +/// A DB + cache implementation of `ReachabilityStore` trait, with concurrent readers support. +#[derive(Clone)] +pub struct DbReachabilityStore { + db: Arc, + access: CachedDbAccess>, + reindex_root: CachedDbItem, +} + +impl DbReachabilityStore { + pub fn new(db: Arc, cache_size: u64) -> Self { + Self::new_with_prefix_end(db, cache_size) + } + + pub fn new_with_alternative_prefix_end(db: Arc, cache_size: u64) -> Self { + Self::new_with_prefix_end(db, cache_size) + } + + fn new_with_prefix_end(db: Arc, cache_size: u64) -> Self { + Self { + db: Arc::clone(&db), + access: CachedDbAccess::new(Arc::clone(&db), cache_size, REACHABILITY_DATA_CF), + reindex_root: CachedDbItem::new( + db, + REACHABILITY_DATA_CF, + REINDEX_ROOT_KEY.as_bytes().to_vec(), + ), + } + } + + pub fn clone_with_new_cache(&self, cache_size: u64) -> Self { + Self::new_with_prefix_end(Arc::clone(&self.db), cache_size) + } +} + +impl ReachabilityStore for DbReachabilityStore { + fn init(&mut self, origin: Hash, capacity: Interval) -> Result<(), StoreError> { + debug_assert!(!self.access.has(origin)?); + + let data = Arc::new(ReachabilityData::new( + Hash::new(blockhash::NONE), + capacity, + 0, + )); + let mut batch = WriteBatch::default(); + self.access + .write(BatchDbWriter::new(&mut batch), origin, data)?; + self.reindex_root + .write(BatchDbWriter::new(&mut batch), &origin)?; + self.db + .raw_write_batch(batch) + .map_err(|e| StoreError::DBIoError(e.to_string()))?; + + Ok(()) + } + + fn insert( + &mut self, + hash: Hash, + parent: Hash, + interval: Interval, + height: u64, + ) -> Result<(), StoreError> { + if self.access.has(hash)? { + return Err(StoreError::KeyAlreadyExists(hash.to_string())); + } + let data = Arc::new(ReachabilityData::new(parent, interval, height)); + self.access + .write(DirectDbWriter::new(&self.db), hash, data)?; + Ok(()) + } + + fn set_interval(&mut self, hash: Hash, interval: Interval) -> Result<(), StoreError> { + let mut data = self.access.read(hash)?; + Arc::make_mut(&mut data).interval = interval; + self.access + .write(DirectDbWriter::new(&self.db), hash, data)?; + Ok(()) + } + + fn append_child(&mut self, hash: Hash, child: Hash) -> Result { + let mut data = self.access.read(hash)?; + let height = data.height; + let mut_data = Arc::make_mut(&mut data); + Arc::make_mut(&mut mut_data.children).push(child); + self.access + .write(DirectDbWriter::new(&self.db), hash, data)?; + Ok(height) + } + + fn insert_future_covering_item( + &mut self, + hash: Hash, + fci: Hash, + insertion_index: usize, + ) -> Result<(), StoreError> { + let mut data = self.access.read(hash)?; + let mut_data = Arc::make_mut(&mut data); + Arc::make_mut(&mut mut_data.future_covering_set).insert(insertion_index, fci); + self.access + .write(DirectDbWriter::new(&self.db), hash, data)?; + Ok(()) + } + + fn get_height(&self, hash: Hash) -> Result { + Ok(self.access.read(hash)?.height) + } + + fn set_reindex_root(&mut self, root: Hash) -> Result<(), StoreError> { + self.reindex_root + .write(DirectDbWriter::new(&self.db), &root) + } + + fn get_reindex_root(&self) -> Result { + self.reindex_root.read() + } +} + +impl ReachabilityStoreReader for DbReachabilityStore { + fn has(&self, hash: Hash) -> Result { + self.access.has(hash) + } + + fn get_interval(&self, hash: Hash) -> Result { + Ok(self.access.read(hash)?.interval) + } + + fn get_parent(&self, hash: Hash) -> Result { + Ok(self.access.read(hash)?.parent) + } + + fn get_children(&self, hash: Hash) -> Result { + Ok(Arc::clone(&self.access.read(hash)?.children)) + } + + fn get_future_covering_set(&self, hash: Hash) -> Result { + Ok(Arc::clone(&self.access.read(hash)?.future_covering_set)) + } +} + +pub struct StagingReachabilityStore<'a> { + store_read: RwLockUpgradableReadGuard<'a, DbReachabilityStore>, + staging_writes: BlockHashMap, + staging_reindex_root: Option, +} + +impl<'a> StagingReachabilityStore<'a> { + pub fn new(store_read: RwLockUpgradableReadGuard<'a, DbReachabilityStore>) -> Self { + Self { + store_read, + staging_writes: BlockHashMap::new(), + staging_reindex_root: None, + } + } + + pub fn commit( + self, + batch: &mut WriteBatch, + ) -> Result, StoreError> { + let mut store_write = RwLockUpgradableReadGuard::upgrade(self.store_read); + for (k, v) in self.staging_writes { + let data = Arc::new(v); + store_write + .access + .write(BatchDbWriter::new(batch), k, data)? + } + if let Some(root) = self.staging_reindex_root { + store_write + .reindex_root + .write(BatchDbWriter::new(batch), &root)?; + } + Ok(store_write) + } +} + +impl ReachabilityStore for StagingReachabilityStore<'_> { + fn init(&mut self, origin: Hash, capacity: Interval) -> Result<(), StoreError> { + self.insert(origin, Hash::new(blockhash::NONE), capacity, 0)?; + self.set_reindex_root(origin)?; + Ok(()) + } + + fn insert( + &mut self, + hash: Hash, + parent: Hash, + interval: Interval, + height: u64, + ) -> Result<(), StoreError> { + if self.store_read.has(hash)? { + return Err(StoreError::KeyAlreadyExists(hash.to_string())); + } + if let Vacant(e) = self.staging_writes.entry(hash) { + e.insert(ReachabilityData::new(parent, interval, height)); + Ok(()) + } else { + Err(StoreError::KeyAlreadyExists(hash.to_string())) + } + } + + fn set_interval(&mut self, hash: Hash, interval: Interval) -> Result<(), StoreError> { + if let Some(data) = self.staging_writes.get_mut(&hash) { + data.interval = interval; + return Ok(()); + } + + let mut data = (*self.store_read.access.read(hash)?).clone(); + data.interval = interval; + self.staging_writes.insert(hash, data); + + Ok(()) + } + + fn append_child(&mut self, hash: Hash, child: Hash) -> Result { + if let Some(data) = self.staging_writes.get_mut(&hash) { + Arc::make_mut(&mut data.children).push(child); + return Ok(data.height); + } + + let mut data = (*self.store_read.access.read(hash)?).clone(); + let height = data.height; + Arc::make_mut(&mut data.children).push(child); + self.staging_writes.insert(hash, data); + + Ok(height) + } + + fn insert_future_covering_item( + &mut self, + hash: Hash, + fci: Hash, + insertion_index: usize, + ) -> Result<(), StoreError> { + if let Some(data) = self.staging_writes.get_mut(&hash) { + Arc::make_mut(&mut data.future_covering_set).insert(insertion_index, fci); + return Ok(()); + } + + let mut data = (*self.store_read.access.read(hash)?).clone(); + Arc::make_mut(&mut data.future_covering_set).insert(insertion_index, fci); + self.staging_writes.insert(hash, data); + + Ok(()) + } + + fn get_height(&self, hash: Hash) -> Result { + if let Some(data) = self.staging_writes.get(&hash) { + Ok(data.height) + } else { + Ok(self.store_read.access.read(hash)?.height) + } + } + + fn set_reindex_root(&mut self, root: Hash) -> Result<(), StoreError> { + self.staging_reindex_root = Some(root); + Ok(()) + } + + fn get_reindex_root(&self) -> Result { + if let Some(root) = self.staging_reindex_root { + Ok(root) + } else { + Ok(self.store_read.get_reindex_root()?) + } + } +} + +impl ReachabilityStoreReader for StagingReachabilityStore<'_> { + fn has(&self, hash: Hash) -> Result { + Ok(self.staging_writes.contains_key(&hash) || self.store_read.access.has(hash)?) + } + + fn get_interval(&self, hash: Hash) -> Result { + if let Some(data) = self.staging_writes.get(&hash) { + Ok(data.interval) + } else { + Ok(self.store_read.access.read(hash)?.interval) + } + } + + fn get_parent(&self, hash: Hash) -> Result { + if let Some(data) = self.staging_writes.get(&hash) { + Ok(data.parent) + } else { + Ok(self.store_read.access.read(hash)?.parent) + } + } + + fn get_children(&self, hash: Hash) -> Result { + if let Some(data) = self.staging_writes.get(&hash) { + Ok(BlockHashes::clone(&data.children)) + } else { + Ok(BlockHashes::clone( + &self.store_read.access.read(hash)?.children, + )) + } + } + + fn get_future_covering_set(&self, hash: Hash) -> Result { + if let Some(data) = self.staging_writes.get(&hash) { + Ok(BlockHashes::clone(&data.future_covering_set)) + } else { + Ok(BlockHashes::clone( + &self.store_read.access.read(hash)?.future_covering_set, + )) + } + } +} + +pub struct MemoryReachabilityStore { + map: BlockHashMap, + reindex_root: Option, +} + +impl Default for MemoryReachabilityStore { + fn default() -> Self { + Self::new() + } +} + +impl MemoryReachabilityStore { + pub fn new() -> Self { + Self { + map: BlockHashMap::new(), + reindex_root: None, + } + } + + fn get_data_mut(&mut self, hash: Hash) -> Result<&mut ReachabilityData, StoreError> { + match self.map.get_mut(&hash) { + Some(data) => Ok(data), + None => Err(StoreError::KeyNotFound(hash.to_string())), + } + } + + fn get_data(&self, hash: Hash) -> Result<&ReachabilityData, StoreError> { + match self.map.get(&hash) { + Some(data) => Ok(data), + None => Err(StoreError::KeyNotFound(hash.to_string())), + } + } +} + +impl ReachabilityStore for MemoryReachabilityStore { + fn init(&mut self, origin: Hash, capacity: Interval) -> Result<(), StoreError> { + self.insert(origin, Hash::new(blockhash::NONE), capacity, 0)?; + self.set_reindex_root(origin)?; + Ok(()) + } + + fn insert( + &mut self, + hash: Hash, + parent: Hash, + interval: Interval, + height: u64, + ) -> Result<(), StoreError> { + if let Vacant(e) = self.map.entry(hash) { + e.insert(ReachabilityData::new(parent, interval, height)); + Ok(()) + } else { + Err(StoreError::KeyAlreadyExists(hash.to_string())) + } + } + + fn set_interval(&mut self, hash: Hash, interval: Interval) -> Result<(), StoreError> { + let data = self.get_data_mut(hash)?; + data.interval = interval; + Ok(()) + } + + fn append_child(&mut self, hash: Hash, child: Hash) -> Result { + let data = self.get_data_mut(hash)?; + Arc::make_mut(&mut data.children).push(child); + Ok(data.height) + } + + fn insert_future_covering_item( + &mut self, + hash: Hash, + fci: Hash, + insertion_index: usize, + ) -> Result<(), StoreError> { + let data = self.get_data_mut(hash)?; + Arc::make_mut(&mut data.future_covering_set).insert(insertion_index, fci); + Ok(()) + } + + fn get_height(&self, hash: Hash) -> Result { + Ok(self.get_data(hash)?.height) + } + + fn set_reindex_root(&mut self, root: Hash) -> Result<(), StoreError> { + self.reindex_root = Some(root); + Ok(()) + } + + fn get_reindex_root(&self) -> Result { + match self.reindex_root { + Some(root) => Ok(root), + None => Err(StoreError::KeyNotFound(REINDEX_ROOT_KEY.to_string())), + } + } +} + +impl ReachabilityStoreReader for MemoryReachabilityStore { + fn has(&self, hash: Hash) -> Result { + Ok(self.map.contains_key(&hash)) + } + + fn get_interval(&self, hash: Hash) -> Result { + Ok(self.get_data(hash)?.interval) + } + + fn get_parent(&self, hash: Hash) -> Result { + Ok(self.get_data(hash)?.parent) + } + + fn get_children(&self, hash: Hash) -> Result { + Ok(Arc::clone(&self.get_data(hash)?.children)) + } + + fn get_future_covering_set(&self, hash: Hash) -> Result { + Ok(Arc::clone(&self.get_data(hash)?.future_covering_set)) + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_store_basics() { + let mut store: Box = Box::new(MemoryReachabilityStore::new()); + let (hash, parent) = (7.into(), 15.into()); + let interval = Interval::maximal(); + store.insert(hash, parent, interval, 5).unwrap(); + let height = store.append_child(hash, 31.into()).unwrap(); + assert_eq!(height, 5); + let children = store.get_children(hash).unwrap(); + println!("{children:?}"); + store.get_interval(7.into()).unwrap(); + println!("{children:?}"); + } +} diff --git a/storage/dag-database/src/consensus_relations.rs b/storage/dag-database/src/consensus_relations.rs new file mode 100644 index 0000000000..f2f5b20996 --- /dev/null +++ b/storage/dag-database/src/consensus_relations.rs @@ -0,0 +1,273 @@ +use crate::{ + db::DBStorage, + prelude::{BatchDbWriter, CachedDbAccess, DirectDbWriter, StoreError}, +}; +use rocksdb::WriteBatch; +use starcoin_crypto::HashValue as Hash; +use starcoin_types::blockhash::{BlockHashMap, BlockHashes, BlockLevel}; +use std::{collections::hash_map::Entry::Vacant, sync::Arc}; + +/// Reader API for `RelationsStore`. +pub trait RelationsStoreReader { + fn get_parents(&self, hash: Hash) -> Result; + fn get_children(&self, hash: Hash) -> Result; + fn has(&self, hash: Hash) -> Result; +} + +/// Write API for `RelationsStore`. The insert function is deliberately `mut` +/// since it modifies the children arrays for previously added parents which is +/// non-append-only and thus needs to be guarded. +pub trait RelationsStore: RelationsStoreReader { + /// Inserts `parents` into a new store entry for `hash`, and for each `parent ∈ parents` adds `hash` to `parent.children` + fn insert(&mut self, hash: Hash, parents: BlockHashes) -> Result<(), StoreError>; +} + +pub(crate) const PARENTS_CF: &str = "block-parents"; +pub(crate) const CHILDREN_CF: &str = "block-children"; + +/// A DB + cache implementation of `RelationsStore` trait, with concurrent readers support. +#[derive(Clone)] +pub struct DbRelationsStore { + db: Arc, + level: BlockLevel, + parents_access: CachedDbAccess>>, + children_access: CachedDbAccess>>, +} + +impl DbRelationsStore { + pub fn new(db: Arc, level: BlockLevel, cache_size: u64) -> Self { + Self { + db: Arc::clone(&db), + level, + parents_access: CachedDbAccess::new(Arc::clone(&db), cache_size, PARENTS_CF), + children_access: CachedDbAccess::new(db, cache_size, CHILDREN_CF), + } + } + + pub fn clone_with_new_cache(&self, cache_size: u64) -> Self { + Self::new(Arc::clone(&self.db), self.level, cache_size) + } + + pub fn insert_batch( + &mut self, + batch: &mut WriteBatch, + hash: Hash, + parents: BlockHashes, + ) -> Result<(), StoreError> { + if self.has(hash)? { + return Err(StoreError::KeyAlreadyExists(hash.to_string())); + } + + // Insert a new entry for `hash` + self.parents_access + .write(BatchDbWriter::new(batch), hash, parents.clone())?; + + // The new hash has no children yet + self.children_access.write( + BatchDbWriter::new(batch), + hash, + BlockHashes::new(Vec::new()), + )?; + + // Update `children` for each parent + for parent in parents.iter().cloned() { + let mut children = (*self.get_children(parent)?).clone(); + children.push(hash); + self.children_access.write( + BatchDbWriter::new(batch), + parent, + BlockHashes::new(children), + )?; + } + + Ok(()) + } +} + +impl RelationsStoreReader for DbRelationsStore { + fn get_parents(&self, hash: Hash) -> Result { + self.parents_access.read(hash) + } + + fn get_children(&self, hash: Hash) -> Result { + self.children_access.read(hash) + } + + fn has(&self, hash: Hash) -> Result { + if self.parents_access.has(hash)? { + debug_assert!(self.children_access.has(hash)?); + Ok(true) + } else { + Ok(false) + } + } +} + +impl RelationsStore for DbRelationsStore { + /// See `insert_batch` as well + /// TODO: use one function with DbWriter for both this function and insert_batch + fn insert(&mut self, hash: Hash, parents: BlockHashes) -> Result<(), StoreError> { + if self.has(hash)? { + return Err(StoreError::KeyAlreadyExists(hash.to_string())); + } + + // Insert a new entry for `hash` + self.parents_access + .write(DirectDbWriter::new(&self.db), hash, parents.clone())?; + + // The new hash has no children yet + self.children_access.write( + DirectDbWriter::new(&self.db), + hash, + BlockHashes::new(Vec::new()), + )?; + + // Update `children` for each parent + for parent in parents.iter().cloned() { + let mut children = (*self.get_children(parent)?).clone(); + children.push(hash); + self.children_access.write( + DirectDbWriter::new(&self.db), + parent, + BlockHashes::new(children), + )?; + } + + Ok(()) + } +} + +pub struct MemoryRelationsStore { + parents_map: BlockHashMap, + children_map: BlockHashMap, +} + +impl MemoryRelationsStore { + pub fn new() -> Self { + Self { + parents_map: BlockHashMap::new(), + children_map: BlockHashMap::new(), + } + } +} + +impl Default for MemoryRelationsStore { + fn default() -> Self { + Self::new() + } +} + +impl RelationsStoreReader for MemoryRelationsStore { + fn get_parents(&self, hash: Hash) -> Result { + match self.parents_map.get(&hash) { + Some(parents) => Ok(BlockHashes::clone(parents)), + None => Err(StoreError::KeyNotFound(hash.to_string())), + } + } + + fn get_children(&self, hash: Hash) -> Result { + match self.children_map.get(&hash) { + Some(children) => Ok(BlockHashes::clone(children)), + None => Err(StoreError::KeyNotFound(hash.to_string())), + } + } + + fn has(&self, hash: Hash) -> Result { + Ok(self.parents_map.contains_key(&hash)) + } +} + +impl RelationsStore for MemoryRelationsStore { + fn insert(&mut self, hash: Hash, parents: BlockHashes) -> Result<(), StoreError> { + if let Vacant(e) = self.parents_map.entry(hash) { + // Update the new entry for `hash` + e.insert(BlockHashes::clone(&parents)); + + // Update `children` for each parent + for parent in parents.iter().cloned() { + let mut children = (*self.get_children(parent)?).clone(); + children.push(hash); + self.children_map.insert(parent, BlockHashes::new(children)); + } + + // The new hash has no children yet + self.children_map.insert(hash, BlockHashes::new(Vec::new())); + Ok(()) + } else { + Err(StoreError::KeyAlreadyExists(hash.to_string())) + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::{ + db::{FlexiDagStorageConfig, RelationsStoreConfig}, + prelude::FlexiDagStorage, + }; + + #[test] + fn test_memory_relations_store() { + test_relations_store(MemoryRelationsStore::new()); + } + + #[test] + fn test_db_relations_store() { + let db_tempdir = tempfile::tempdir().unwrap(); + let rs_conf = RelationsStoreConfig { + block_level: 0, + cache_size: 2, + }; + let config = FlexiDagStorageConfig::new() + .update_parallelism(1) + .update_relations_conf(rs_conf); + + let db = FlexiDagStorage::create_from_path(db_tempdir.path(), config) + .expect("failed to create flexidag storage"); + test_relations_store(db.relations_store); + } + + fn test_relations_store(mut store: T) { + let parents = [ + (1, vec![]), + (2, vec![1]), + (3, vec![1]), + (4, vec![2, 3]), + (5, vec![1, 4]), + ]; + for (i, vec) in parents.iter().cloned() { + store + .insert( + i.into(), + BlockHashes::new(vec.iter().copied().map(Hash::from).collect()), + ) + .unwrap(); + } + + let expected_children = [ + (1, vec![2, 3, 5]), + (2, vec![4]), + (3, vec![4]), + (4, vec![5]), + (5, vec![]), + ]; + for (i, vec) in expected_children { + assert!(store + .get_children(i.into()) + .unwrap() + .iter() + .copied() + .eq(vec.iter().copied().map(Hash::from))); + } + + for (i, vec) in parents { + assert!(store + .get_parents(i.into()) + .unwrap() + .iter() + .copied() + .eq(vec.iter().copied().map(Hash::from))); + } + } +} diff --git a/storage/dag-database/src/db.rs b/storage/dag-database/src/db.rs new file mode 100644 index 0000000000..55a89d2e46 --- /dev/null +++ b/storage/dag-database/src/db.rs @@ -0,0 +1,147 @@ +use crate::consensus::{ + DbGhostdagStore, DbHeadersStore, DbReachabilityStore, DbRelationsStore, CHILDREN_CF, + COMPACT_GHOST_DAG_STORE_CF, COMPACT_HEADER_DATA_STORE_CF, GHOST_DAG_STORE_CF, HEADERS_STORE_CF, + PARENTS_CF, REACHABILITY_DATA_CF, +}; +use crate::errors::StoreError; +use starcoin_config::RocksdbConfig; +pub(crate) use starcoin_storage::db_storage::DBStorage; +use std::{path::Path, sync::Arc}; + +#[derive(Clone)] +pub struct FlexiDagStorage { + pub ghost_dag_store: DbGhostdagStore, + pub header_store: DbHeadersStore, + pub reachability_store: DbReachabilityStore, + pub relations_store: DbRelationsStore, +} + +#[derive(Clone, Default)] +pub struct GhostDagStoreConfig { + pub block_level: u8, + pub cache_size: u64, +} + +#[derive(Clone, Default)] +pub struct HeaderStoreConfig { + pub cache_size: u64, +} + +#[derive(Clone, Default)] +pub struct ReachabilityStoreConfig { + pub cache_size: u64, +} + +#[derive(Clone, Default)] +pub struct RelationsStoreConfig { + pub block_level: u8, + pub cache_size: u64, +} + +#[derive(Clone, Default)] +pub struct FlexiDagStorageConfig { + pub parallelism: u64, + pub gds_conf: GhostDagStoreConfig, + pub hs_conf: HeaderStoreConfig, + pub rbs_conf: ReachabilityStoreConfig, + pub rs_conf: RelationsStoreConfig, +} + +impl FlexiDagStorageConfig { + pub fn new() -> Self { + FlexiDagStorageConfig::default() + } + + pub fn create_with_params(parallelism: u64, block_level: u8, cache_size: u64) -> Self { + Self { + parallelism, + gds_conf: GhostDagStoreConfig { + block_level, + cache_size, + }, + hs_conf: HeaderStoreConfig { cache_size }, + rbs_conf: ReachabilityStoreConfig { cache_size }, + rs_conf: RelationsStoreConfig { + block_level, + cache_size, + }, + } + } + + pub fn update_parallelism(mut self, parallelism: u64) -> Self { + self.parallelism = parallelism; + self + } + + pub fn update_ghost_dag_conf(mut self, gds_conf: GhostDagStoreConfig) -> Self { + self.gds_conf = gds_conf; + self + } + + pub fn update_headers_conf(mut self, hs_conf: HeaderStoreConfig) -> Self { + self.hs_conf = hs_conf; + self + } + + pub fn update_reachability_conf(mut self, rbs_conf: ReachabilityStoreConfig) -> Self { + self.rbs_conf = rbs_conf; + self + } + + pub fn update_relations_conf(mut self, rs_conf: RelationsStoreConfig) -> Self { + self.rs_conf = rs_conf; + self + } +} + +impl FlexiDagStorage { + /// Creates or loads an existing storage from the provided directory path. + pub fn create_from_path>( + db_path: P, + config: FlexiDagStorageConfig, + ) -> Result { + let rocksdb_config = RocksdbConfig { + parallelism: config.parallelism, + ..Default::default() + }; + + let db = Arc::new( + DBStorage::open_with_cfs( + db_path, + vec![ + // consensus headers + HEADERS_STORE_CF, + COMPACT_HEADER_DATA_STORE_CF, + // consensus relations + PARENTS_CF, + CHILDREN_CF, + // consensus reachability + REACHABILITY_DATA_CF, + // consensus ghostdag + GHOST_DAG_STORE_CF, + COMPACT_GHOST_DAG_STORE_CF, + ], + false, + rocksdb_config, + None, + ) + .map_err(|e| StoreError::DBIoError(e.to_string()))?, + ); + + Ok(Self { + ghost_dag_store: DbGhostdagStore::new( + db.clone(), + config.gds_conf.block_level, + config.gds_conf.cache_size, + ), + + header_store: DbHeadersStore::new(db.clone(), config.hs_conf.cache_size), + reachability_store: DbReachabilityStore::new(db.clone(), config.rbs_conf.cache_size), + relations_store: DbRelationsStore::new( + db, + config.rs_conf.block_level, + config.rs_conf.cache_size, + ), + }) + } +} diff --git a/storage/dag-database/src/errors.rs b/storage/dag-database/src/errors.rs new file mode 100644 index 0000000000..1ca1683317 --- /dev/null +++ b/storage/dag-database/src/errors.rs @@ -0,0 +1,55 @@ +use thiserror::Error; + +#[derive(Error, Debug)] +pub enum StoreError { + #[error("key {0} not found in store")] + KeyNotFound(String), + + #[error("key {0} already exists in store")] + KeyAlreadyExists(String), + + #[error("column family {0} not exist in db")] + CFNotExist(String), + + #[error("IO error {0}")] + DBIoError(String), + + #[error("rocksdb error {0}")] + DbError(#[from] rocksdb::Error), + + #[error("bincode error {0}")] + DeserializationError(#[from] Box), + + #[error("ghostdag {0} duplicate blocks")] + DAGDupBlocksError(String), +} + +pub type StoreResult = std::result::Result; + +pub trait StoreResultExtensions { + fn unwrap_option(self) -> Option; +} + +impl StoreResultExtensions for StoreResult { + fn unwrap_option(self) -> Option { + match self { + Ok(value) => Some(value), + Err(StoreError::KeyNotFound(_)) => None, + Err(err) => panic!("Unexpected store error: {err:?}"), + } + } +} + +pub trait StoreResultEmptyTuple { + fn unwrap_and_ignore_key_already_exists(self); +} + +impl StoreResultEmptyTuple for StoreResult<()> { + fn unwrap_and_ignore_key_already_exists(self) { + match self { + Ok(_) => (), + Err(StoreError::KeyAlreadyExists(_)) => (), + Err(err) => panic!("Unexpected store error: {err:?}"), + } + } +} diff --git a/storage/dag-database/src/item.rs b/storage/dag-database/src/item.rs new file mode 100644 index 0000000000..570035810e --- /dev/null +++ b/storage/dag-database/src/item.rs @@ -0,0 +1,96 @@ +use crate::{db::DBStorage, errors::StoreError}; + +use super::prelude::DbWriter; +use parking_lot::RwLock; +use serde::{de::DeserializeOwned, Serialize}; +use starcoin_storage::storage::RawDBStorage; +use std::sync::Arc; + +/// A cached DB item with concurrency support +#[derive(Clone)] +pub struct CachedDbItem { + db: Arc, + key: Vec, + prefix: &'static str, + cached_item: Arc>>, +} + +impl CachedDbItem { + pub fn new(db: Arc, prefix: &'static str, key: Vec) -> Self { + Self { + db, + key, + prefix, + cached_item: Arc::new(RwLock::new(None)), + } + } + + pub fn read(&self) -> Result + where + T: Clone + DeserializeOwned, + { + if let Some(item) = self.cached_item.read().clone() { + return Ok(item); + } + if let Some(slice) = self + .db + .raw_get_pinned_cf(self.prefix, &self.key) + .map_err(|_| StoreError::CFNotExist(self.prefix.to_string()))? + { + let item: T = bincode::deserialize(&slice)?; + *self.cached_item.write() = Some(item.clone()); + Ok(item) + } else { + Err(StoreError::KeyNotFound( + String::from_utf8(self.key.clone()) + .unwrap_or(("unrecoverable key string").to_string()), + )) + } + } + + pub fn write(&mut self, mut writer: impl DbWriter, item: &T) -> Result<(), StoreError> + where + T: Clone + Serialize, + { + *self.cached_item.write() = Some(item.clone()); + let bin_data = bincode::serialize(item)?; + writer.put(self.prefix, &self.key, bin_data)?; + Ok(()) + } + + pub fn remove(&mut self, mut writer: impl DbWriter) -> Result<(), StoreError> +where { + *self.cached_item.write() = None; + writer.delete(self.prefix, &self.key)?; + Ok(()) + } + + pub fn update(&mut self, mut writer: impl DbWriter, op: F) -> Result + where + T: Clone + Serialize + DeserializeOwned, + F: Fn(T) -> T, + { + let mut guard = self.cached_item.write(); + let mut item = if let Some(item) = guard.take() { + item + } else if let Some(slice) = self + .db + .raw_get_pinned_cf(self.prefix, &self.key) + .map_err(|_| StoreError::CFNotExist(self.prefix.to_string()))? + { + let item: T = bincode::deserialize(&slice)?; + item + } else { + return Err(StoreError::KeyNotFound( + String::from_utf8(self.key.clone()) + .unwrap_or(("unrecoverable key string").to_string()), + )); + }; + + item = op(item); // Apply the update op + *guard = Some(item.clone()); + let bin_data = bincode::serialize(&item)?; + writer.put(self.prefix, &self.key, bin_data)?; + Ok(item) + } +} diff --git a/storage/dag-database/src/lib.rs b/storage/dag-database/src/lib.rs new file mode 100644 index 0000000000..1137b8b86c --- /dev/null +++ b/storage/dag-database/src/lib.rs @@ -0,0 +1,30 @@ +mod access; +mod cache; +mod consensus_ghostdag; +mod consensus_header; +mod consensus_reachability; +mod consensus_relations; +mod db; +mod errors; +mod item; +mod writer; + +pub mod prelude { + use crate::{db, errors}; + + pub use super::{ + access::CachedDbAccess, + cache::Cache, + item::CachedDbItem, + writer::{BatchDbWriter, DbWriter, DirectDbWriter}, + }; + pub use db::{FlexiDagStorage, FlexiDagStorageConfig}; + pub use errors::{StoreError, StoreResult, StoreResultEmptyTuple, StoreResultExtensions}; +} + +pub mod consensus { + pub use super::{ + consensus_ghostdag::*, consensus_header::*, consensus_reachability::*, + consensus_relations::*, + }; +} diff --git a/storage/dag-database/src/writer.rs b/storage/dag-database/src/writer.rs new file mode 100644 index 0000000000..ee66447642 --- /dev/null +++ b/storage/dag-database/src/writer.rs @@ -0,0 +1,68 @@ +use rocksdb::WriteBatch; +use starcoin_storage::storage::InnerStore; + +use crate::{db::DBStorage, errors::StoreError}; + +/// Abstraction over direct/batched DB writing +pub trait DbWriter { + fn put(&mut self, cf_name: &str, key: &[u8], value: Vec) -> Result<(), StoreError>; + fn delete(&mut self, cf_name: &str, key: &[u8]) -> Result<(), StoreError>; +} + +pub struct DirectDbWriter<'a> { + db: &'a DBStorage, +} + +impl<'a> DirectDbWriter<'a> { + pub fn new(db: &'a DBStorage) -> Self { + Self { db } + } +} + +impl DbWriter for DirectDbWriter<'_> { + fn put(&mut self, cf_name: &str, key: &[u8], value: Vec) -> Result<(), StoreError> { + self.db + .put(cf_name, key.to_owned(), value) + .map_err(|e| StoreError::DBIoError(e.to_string())) + } + + fn delete(&mut self, cf_name: &str, key: &[u8]) -> Result<(), StoreError> { + self.db + .remove(cf_name, key.to_owned()) + .map_err(|e| StoreError::DBIoError(e.to_string())) + } +} + +pub struct BatchDbWriter<'a> { + batch: &'a mut WriteBatch, +} + +impl<'a> BatchDbWriter<'a> { + pub fn new(batch: &'a mut WriteBatch) -> Self { + Self { batch } + } +} + +impl DbWriter for BatchDbWriter<'_> { + fn put(&mut self, _cf_name: &str, key: &[u8], value: Vec) -> Result<(), StoreError> { + self.batch.put(key, value); + Ok(()) + } + + fn delete(&mut self, _cf_name: &str, key: &[u8]) -> Result<(), StoreError> { + self.batch.delete(key); + Ok(()) + } +} + +impl DbWriter for &mut T { + #[inline] + fn put(&mut self, cf_name: &str, key: &[u8], value: Vec) -> Result<(), StoreError> { + (*self).put(cf_name, key, value) + } + + #[inline] + fn delete(&mut self, cf_name: &str, key: &[u8]) -> Result<(), StoreError> { + (*self).delete(cf_name, key) + } +} diff --git a/storage/src/lib.rs b/storage/src/lib.rs index 8fd446d9c4..60e6c93b6e 100644 --- a/storage/src/lib.rs +++ b/storage/src/lib.rs @@ -388,7 +388,8 @@ impl DagBlockStore for Storage { } fn save_flexi_dag_startup_info(&self, startup_info: StartupInfo) -> Result<()> { - self.chain_info_storage.save_flexi_dag_startup_info(startup_info) + self.chain_info_storage + .save_flexi_dag_startup_info(startup_info) } } @@ -637,6 +638,7 @@ impl SyncFlexiDagStore for Storage { pub trait Store: StateNodeStore + DagBlockStore + + SyncFlexiDagStore + BlockStore + BlockInfoStore + TransactionStore diff --git a/types/Cargo.toml b/types/Cargo.toml index 67656e3387..4e1301a8fb 100644 --- a/types/Cargo.toml +++ b/types/Cargo.toml @@ -18,7 +18,6 @@ starcoin-crypto = { workspace = true } starcoin-uint = { workspace = true } starcoin-vm-types = { workspace = true } thiserror = { workspace = true } -consensus-types = { workspace = true } [features] default = [] diff --git a/types/src/blockhash.rs b/types/src/blockhash.rs new file mode 100644 index 0000000000..f283d0f387 --- /dev/null +++ b/types/src/blockhash.rs @@ -0,0 +1,71 @@ +use starcoin_crypto::hash::HashValue; +use std::collections::{HashMap, HashSet}; + +pub const BLOCK_VERSION: u16 = 1; + +pub const HASH_LENGTH: usize = HashValue::LENGTH; + +use std::sync::Arc; + +pub type BlockHashes = Arc>; + +/// `blockhash::NONE` is a hash which is used in rare cases as the `None` block hash +pub const NONE: [u8; HASH_LENGTH] = [0u8; HASH_LENGTH]; + +/// `blockhash::VIRTUAL` is a special hash representing the `virtual` block. +pub const VIRTUAL: [u8; HASH_LENGTH] = [0xff; HASH_LENGTH]; + +/// `blockhash::ORIGIN` is a special hash representing a `virtual genesis` block. +/// It serves as a special local block which all locally-known +/// blocks are in its future. +pub const ORIGIN: [u8; HASH_LENGTH] = [0xfe; HASH_LENGTH]; + +pub trait BlockHashExtensions { + fn is_none(&self) -> bool; + fn is_virtual(&self) -> bool; + fn is_origin(&self) -> bool; +} + +impl BlockHashExtensions for HashValue { + fn is_none(&self) -> bool { + self.eq(&HashValue::new(NONE)) + } + + fn is_virtual(&self) -> bool { + self.eq(&HashValue::new(VIRTUAL)) + } + + fn is_origin(&self) -> bool { + self.eq(&HashValue::new(ORIGIN)) + } +} + +/// Generates a unique block hash for each call to this function. +/// To be used for test purposes only. +pub fn new_unique() -> HashValue { + use std::sync::atomic::{AtomicU64, Ordering}; + static COUNTER: AtomicU64 = AtomicU64::new(1); + let c = COUNTER.fetch_add(1, Ordering::Relaxed); + HashValue::from_u64(c) +} + +/// TODO:FIXME as u256 +pub type BlueWorkType = u128; + +/// The type used to represent the GHOSTDAG K parameter +pub type KType = u16; + +/// Map from Block hash to K type +pub type HashKTypeMap = std::sync::Arc>; + +pub type BlockHashMap = HashMap; + +/// Same as `BlockHashMap` but a `HashSet`. +pub type BlockHashSet = HashSet; + +pub struct ChainPath { + pub added: Vec, + pub removed: Vec, +} + +pub type BlockLevel = u8; diff --git a/types/src/dag_block.rs b/types/src/dag_block.rs index 40abaf2383..bc089a92e5 100644 --- a/types/src/dag_block.rs +++ b/types/src/dag_block.rs @@ -3,13 +3,12 @@ use crate::account_address::AccountAddress; use crate::block::BlockHeaderExtra; -use crate::block_metadata::BlockMetadata; +use crate::blockhash::ORIGIN; use crate::genesis_config::{ChainId, ConsensusStrategy}; use crate::language_storage::CORE_CODE_ADDRESS; use crate::transaction::SignedUserTransaction; use crate::U256; use bcs_ext::Sample; -use consensus_types::blockhash::ORIGIN; use schemars::{self, JsonSchema}; use serde::{Deserialize, Deserializer, Serialize}; pub use starcoin_accumulator::accumulator_info::AccumulatorInfo; @@ -142,7 +141,8 @@ impl DagBlockHeader { } pub fn id(&self) -> HashValue { - self.id.expect("DagBlockHeader id should be Some after init.") + self.id + .expect("DagBlockHeader id should be Some after init.") } pub fn parent_hash(&self) -> Vec { @@ -199,9 +199,9 @@ impl DagBlockHeader { pub fn is_genesis(&self) -> bool { if self.parent_hash.len() == 1 { - return self.parent_hash[0] == HashValue::new(ORIGIN); + return self.parent_hash[0] == HashValue::new(ORIGIN); } - return false; + false } pub fn genesis_block_header( @@ -483,7 +483,10 @@ pub struct BlockBody { } impl BlockBody { - pub fn new(transactions: Vec, uncles: Option>) -> Self { + pub fn new( + transactions: Vec, + uncles: Option>, + ) -> Self { Self { transactions, uncles, @@ -752,8 +755,7 @@ impl DagBlockTemplate { strategy: ConsensusStrategy, block_metadata: DagBlockMetadata, ) -> Self { - let (parent_hash, timestamp, author, _author_auth_key, _, _) = - block_metadata.into_inner(); + let (parent_hash, timestamp, author, _author_auth_key, _, _) = block_metadata.into_inner(); Self { parent_hash, block_accumulator_root: parent_block_accumulator_root, diff --git a/types/src/ghostdata.rs b/types/src/ghostdata.rs new file mode 100644 index 0000000000..02d1487dce --- /dev/null +++ b/types/src/ghostdata.rs @@ -0,0 +1,146 @@ +use crate::{ + blockhash::{BlockHashMap, BlockHashes, BlueWorkType, HashKTypeMap, KType}, + trusted::ExternalGhostdagData, +}; +use serde::{Deserialize, Serialize}; +use starcoin_crypto::HashValue as Hash; +use std::sync::Arc; + +#[derive(Clone, Serialize, Deserialize, Default, Debug)] +pub struct GhostdagData { + pub blue_score: u64, + pub blue_work: BlueWorkType, + pub selected_parent: Hash, + pub mergeset_blues: BlockHashes, + pub mergeset_reds: BlockHashes, + pub blues_anticone_sizes: HashKTypeMap, +} + +#[derive(Clone, Serialize, Deserialize, Copy)] +pub struct CompactGhostdagData { + pub blue_score: u64, + pub blue_work: BlueWorkType, + pub selected_parent: Hash, +} + +impl From for GhostdagData { + fn from(value: ExternalGhostdagData) -> Self { + Self { + blue_score: value.blue_score, + blue_work: value.blue_work, + selected_parent: value.selected_parent, + mergeset_blues: Arc::new(value.mergeset_blues), + mergeset_reds: Arc::new(value.mergeset_reds), + blues_anticone_sizes: Arc::new(value.blues_anticone_sizes), + } + } +} + +impl From<&GhostdagData> for ExternalGhostdagData { + fn from(value: &GhostdagData) -> Self { + Self { + blue_score: value.blue_score, + blue_work: value.blue_work, + selected_parent: value.selected_parent, + mergeset_blues: (*value.mergeset_blues).clone(), + mergeset_reds: (*value.mergeset_reds).clone(), + blues_anticone_sizes: (*value.blues_anticone_sizes).clone(), + } + } +} + +impl GhostdagData { + pub fn new( + blue_score: u64, + blue_work: BlueWorkType, + selected_parent: Hash, + mergeset_blues: BlockHashes, + mergeset_reds: BlockHashes, + blues_anticone_sizes: HashKTypeMap, + ) -> Self { + Self { + blue_score, + blue_work, + selected_parent, + mergeset_blues, + mergeset_reds, + blues_anticone_sizes, + } + } + + pub fn new_with_selected_parent(selected_parent: Hash, k: KType) -> Self { + let mut mergeset_blues: Vec = Vec::with_capacity((k + 1) as usize); + let mut blues_anticone_sizes: BlockHashMap = BlockHashMap::with_capacity(k as usize); + mergeset_blues.push(selected_parent); + blues_anticone_sizes.insert(selected_parent, 0); + + Self { + blue_score: Default::default(), + blue_work: Default::default(), + selected_parent, + mergeset_blues: BlockHashes::new(mergeset_blues), + mergeset_reds: Default::default(), + blues_anticone_sizes: HashKTypeMap::new(blues_anticone_sizes), + } + } + + pub fn mergeset_size(&self) -> usize { + self.mergeset_blues.len() + self.mergeset_reds.len() + } + + /// Returns an iterator to the mergeset with no specified order (excluding the selected parent) + pub fn unordered_mergeset_without_selected_parent(&self) -> impl Iterator + '_ { + self.mergeset_blues + .iter() + .skip(1) // Skip the selected parent + .cloned() + .chain(self.mergeset_reds.iter().cloned()) + } + + /// Returns an iterator to the mergeset with no specified order (including the selected parent) + pub fn unordered_mergeset(&self) -> impl Iterator + '_ { + self.mergeset_blues + .iter() + .cloned() + .chain(self.mergeset_reds.iter().cloned()) + } + + pub fn to_compact(&self) -> CompactGhostdagData { + CompactGhostdagData { + blue_score: self.blue_score, + blue_work: self.blue_work, + selected_parent: self.selected_parent, + } + } + + pub fn add_blue( + &mut self, + block: Hash, + blue_anticone_size: KType, + block_blues_anticone_sizes: &BlockHashMap, + ) { + // Add the new blue block to mergeset blues + BlockHashes::make_mut(&mut self.mergeset_blues).push(block); + + // Get a mut ref to internal anticone size map + let blues_anticone_sizes = HashKTypeMap::make_mut(&mut self.blues_anticone_sizes); + + // Insert the new blue block with its blue anticone size to the map + blues_anticone_sizes.insert(block, blue_anticone_size); + + // Insert/update map entries for blocks affected by this insertion + for (blue, size) in block_blues_anticone_sizes { + blues_anticone_sizes.insert(*blue, size + 1); + } + } + + pub fn add_red(&mut self, block: Hash) { + // Add the new red block to mergeset reds + BlockHashes::make_mut(&mut self.mergeset_reds).push(block); + } + + pub fn finalize_score_and_work(&mut self, blue_score: u64, blue_work: BlueWorkType) { + self.blue_score = blue_score; + self.blue_work = blue_work; + } +} diff --git a/types/src/header.rs b/types/src/header.rs new file mode 100644 index 0000000000..95f353411f --- /dev/null +++ b/types/src/header.rs @@ -0,0 +1,60 @@ +use crate::block::BlockHeader; +use crate::blockhash::{BlockLevel, ORIGIN}; +use crate::U256; +use serde::{Deserialize, Serialize}; +use starcoin_crypto::HashValue as Hash; +use std::sync::Arc; + +pub trait ConsensusHeader { + fn parents_hash(&self) -> &[Hash]; + fn difficulty(&self) -> U256; + fn hash(&self) -> Hash; + fn timestamp(&self) -> u64; +} + +#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq, Hash)] +pub struct Header { + block_header: BlockHeader, + parents_hash: Vec, +} + +impl Header { + pub fn new(block_header: BlockHeader, parents_hash: Vec) -> Self { + Self { + block_header, + parents_hash, + } + } + + pub fn genesis_hash(&self) -> Hash { + Hash::new(ORIGIN) + } +} + +impl ConsensusHeader for Header { + fn parents_hash(&self) -> &[Hash] { + &self.parents_hash + } + fn difficulty(&self) -> U256 { + self.block_header.difficulty() + } + fn hash(&self) -> Hash { + self.block_header.id() + } + + fn timestamp(&self) -> u64 { + self.block_header.timestamp() + } +} + +#[derive(Clone, Serialize, Deserialize)] +pub struct HeaderWithBlockLevel { + pub header: Arc
, + pub block_level: BlockLevel, +} + +#[derive(Clone, Copy, Serialize, Deserialize)] +pub struct CompactHeaderData { + pub timestamp: u64, + pub difficulty: U256, +} diff --git a/types/src/interval.rs b/types/src/interval.rs new file mode 100644 index 0000000000..6b09f68b31 --- /dev/null +++ b/types/src/interval.rs @@ -0,0 +1,361 @@ +use serde::{Deserialize, Serialize}; +use std::fmt::{Display, Formatter}; + +#[derive(Debug, PartialEq, Eq, Clone, Copy, Serialize, Deserialize)] +pub struct Interval { + pub start: u64, + pub end: u64, +} + +impl Display for Interval { + fn fmt(&self, f: &mut Formatter) -> std::fmt::Result { + write!(f, "[{}, {}]", self.start, self.end) + } +} + +impl From for (u64, u64) { + fn from(val: Interval) -> Self { + (val.start, val.end) + } +} + +impl Interval { + pub fn new(start: u64, end: u64) -> Self { + debug_assert!(end >= start - 1); // TODO: make sure this is actually debug-only + debug_assert!(start > 0); + debug_assert!(end < u64::MAX); + Interval { start, end } + } + + pub fn empty() -> Self { + Self::new(1, 0) + } + + /// Returns the maximally allowed `u64` interval. We leave a margin of 1 from + /// both `u64` bounds (`0` and `u64::MAX`) in order to support the reduction of any + /// legal interval to an empty one by setting `end = start - 1` or `start = end + 1` + pub fn maximal() -> Self { + Self::new(1, u64::MAX - 1) + } + + pub fn size(&self) -> u64 { + // Empty intervals are indicated by `self.end == self.start - 1`, so + // we avoid the overflow by first adding 1 + // Note: this function will panic if `self.end < self.start - 1` due to overflow + (self.end + 1) - self.start + } + + pub fn is_empty(&self) -> bool { + self.size() == 0 + } + + pub fn increase(&self, offset: u64) -> Self { + Self::new(self.start + offset, self.end + offset) + } + + pub fn decrease(&self, offset: u64) -> Self { + Self::new(self.start - offset, self.end - offset) + } + + pub fn increase_start(&self, offset: u64) -> Self { + Self::new(self.start + offset, self.end) + } + + pub fn decrease_start(&self, offset: u64) -> Self { + Self::new(self.start - offset, self.end) + } + + pub fn increase_end(&self, offset: u64) -> Self { + Self::new(self.start, self.end + offset) + } + + pub fn decrease_end(&self, offset: u64) -> Self { + Self::new(self.start, self.end - offset) + } + + pub fn split_half(&self) -> (Self, Self) { + self.split_fraction(0.5) + } + + /// Splits this interval to two parts such that their + /// union is equal to the original interval and the first (left) part + /// contains the given fraction of the original interval's size. + /// Note: if the split results in fractional parts, this method rounds + /// the first part up and the last part down. + fn split_fraction(&self, fraction: f32) -> (Self, Self) { + let left_size = f32::ceil(self.size() as f32 * fraction) as u64; + + ( + Self::new(self.start, self.start + left_size - 1), + Self::new(self.start + left_size, self.end), + ) + } + + /// Splits this interval to exactly |sizes| parts where + /// |part_i| = sizes[i]. This method expects sum(sizes) to be exactly + /// equal to the interval's size. + pub fn split_exact(&self, sizes: &[u64]) -> Vec { + assert_eq!( + sizes.iter().sum::(), + self.size(), + "sum of sizes must be equal to the interval's size" + ); + let mut start = self.start; + sizes + .iter() + .map(|size| { + let interval = Self::new(start, start + size - 1); + start += size; + interval + }) + .collect() + } + + /// Splits this interval to |sizes| parts + /// by the allocation rule described below. This method expects sum(sizes) + /// to be smaller or equal to the interval's size. Every part_i is + /// allocated at least sizes[i] capacity. The remaining budget is + /// split by an exponentially biased rule described below. + /// + /// This rule follows the GHOSTDAG protocol behavior where the child + /// with the largest subtree is expected to dominate the competition + /// for new blocks and thus grow the most. However, we may need to + /// add slack for non-largest subtrees in order to make CPU reindexing + /// attacks unworthy. + pub fn split_exponential(&self, sizes: &[u64]) -> Vec { + let interval_size = self.size(); + let sizes_sum = sizes.iter().sum::(); + assert!( + interval_size >= sizes_sum, + "interval's size must be greater than or equal to sum of sizes" + ); + assert!(sizes_sum > 0, "cannot split to 0 parts"); + if interval_size == sizes_sum { + return self.split_exact(sizes); + } + + // + // Add a fractional bias to every size in the provided sizes + // + + let mut remaining_bias = interval_size - sizes_sum; + let total_bias = remaining_bias as f64; + + let mut biased_sizes = Vec::::with_capacity(sizes.len()); + let exp_fractions = exponential_fractions(sizes); + for (i, fraction) in exp_fractions.iter().enumerate() { + let bias: u64 = if i == exp_fractions.len() - 1 { + remaining_bias + } else { + remaining_bias.min(f64::round(total_bias * fraction) as u64) + }; + biased_sizes.push(sizes[i] + bias); + remaining_bias -= bias; + } + + self.split_exact(biased_sizes.as_slice()) + } + + pub fn contains(&self, other: Self) -> bool { + self.start <= other.start && other.end <= self.end + } + + pub fn strictly_contains(&self, other: Self) -> bool { + self.start <= other.start && other.end < self.end + } +} + +/// Returns a fraction for each size in sizes +/// as follows: +/// fraction[i] = 2^size[i] / sum_j(2^size[j]) +/// In the code below the above equation is divided by 2^max(size) +/// to avoid exploding numbers. Note that in 1 / 2^(max(size)-size[i]) +/// we divide 1 by potentially a very large number, which will +/// result in loss of float precision. This is not a problem - all +/// numbers close to 0 bear effectively the same weight. +fn exponential_fractions(sizes: &[u64]) -> Vec { + let max_size = sizes.iter().copied().max().unwrap_or_default(); + + let mut fractions = sizes + .iter() + .map(|s| 1f64 / 2f64.powf((max_size - s) as f64)) + .collect::>(); + + let fractions_sum = fractions.iter().sum::(); + for item in &mut fractions { + *item /= fractions_sum; + } + + fractions +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_interval_basics() { + let interval = Interval::new(101, 164); + let increased = interval.increase(10); + let decreased = increased.decrease(5); + // println!("{}", interval.clone()); + + assert_eq!(interval.start + 10, increased.start); + assert_eq!(interval.end + 10, increased.end); + + assert_eq!(interval.start + 5, decreased.start); + assert_eq!(interval.end + 5, decreased.end); + + assert_eq!(interval.size(), 64); + assert_eq!(Interval::maximal().size(), u64::MAX - 1); + assert_eq!(Interval::empty().size(), 0); + + let (empty_left, empty_right) = Interval::empty().split_half(); + assert_eq!(empty_left.size(), 0); + assert_eq!(empty_right.size(), 0); + + assert_eq!(interval.start + 10, interval.increase_start(10).start); + assert_eq!(interval.start - 10, interval.decrease_start(10).start); + assert_eq!(interval.end + 10, interval.increase_end(10).end); + assert_eq!(interval.end - 10, interval.decrease_end(10).end); + + assert_eq!(interval.end, interval.increase_start(10).end); + assert_eq!(interval.end, interval.decrease_start(10).end); + assert_eq!(interval.start, interval.increase_end(10).start); + assert_eq!(interval.start, interval.decrease_end(10).start); + + // println!("{:?}", Interval::maximal()); + // println!("{:?}", Interval::maximal().split_half()); + } + + #[test] + fn test_split_exact() { + let sizes = vec![5u64, 10, 15, 20]; + let intervals = Interval::new(1, 50).split_exact(sizes.as_slice()); + assert_eq!(intervals.len(), sizes.len()); + for i in 0..sizes.len() { + assert_eq!(intervals[i].size(), sizes[i]) + } + } + + #[test] + fn test_exponential_fractions() { + let mut exp_fractions = exponential_fractions(vec![2, 4, 8, 16].as_slice()); + // println!("{:?}", exp_fractions); + for i in 0..exp_fractions.len() - 1 { + assert!(exp_fractions[i + 1] > exp_fractions[i]); + } + + exp_fractions = exponential_fractions(vec![].as_slice()); + assert_eq!(exp_fractions.len(), 0); + + exp_fractions = exponential_fractions(vec![0, 0].as_slice()); + assert_eq!(exp_fractions.len(), 2); + assert_eq!(0.5f64, exp_fractions[0]); + assert_eq!(exp_fractions[0], exp_fractions[1]); + } + + #[test] + fn test_contains() { + assert!(Interval::new(1, 100).contains(Interval::new(1, 100))); + assert!(Interval::new(1, 100).contains(Interval::new(1, 99))); + assert!(Interval::new(1, 100).contains(Interval::new(2, 100))); + assert!(Interval::new(1, 100).contains(Interval::new(2, 99))); + assert!(!Interval::new(1, 100).contains(Interval::new(50, 150))); + assert!(!Interval::new(1, 100).contains(Interval::new(150, 160))); + } + + #[test] + fn test_split_exponential() { + struct Test { + interval: Interval, + sizes: Vec, + expected: Vec, + } + + let tests = [ + Test { + interval: Interval::new(1, 100), + sizes: vec![100u64], + expected: vec![Interval::new(1, 100)], + }, + Test { + interval: Interval::new(1, 100), + sizes: vec![50u64, 50], + expected: vec![Interval::new(1, 50), Interval::new(51, 100)], + }, + Test { + interval: Interval::new(1, 100), + sizes: vec![10u64, 20, 30, 40], + expected: vec![ + Interval::new(1, 10), + Interval::new(11, 30), + Interval::new(31, 60), + Interval::new(61, 100), + ], + }, + Test { + interval: Interval::new(1, 100), + sizes: vec![25u64, 25], + expected: vec![Interval::new(1, 50), Interval::new(51, 100)], + }, + Test { + interval: Interval::new(1, 100), + sizes: vec![1u64, 1], + expected: vec![Interval::new(1, 50), Interval::new(51, 100)], + }, + Test { + interval: Interval::new(1, 100), + sizes: vec![33u64, 33, 33], + expected: vec![ + Interval::new(1, 33), + Interval::new(34, 66), + Interval::new(67, 100), + ], + }, + Test { + interval: Interval::new(1, 100), + sizes: vec![10u64, 15, 25], + expected: vec![ + Interval::new(1, 10), + Interval::new(11, 25), + Interval::new(26, 100), + ], + }, + Test { + interval: Interval::new(1, 100), + sizes: vec![25u64, 15, 10], + expected: vec![ + Interval::new(1, 75), + Interval::new(76, 90), + Interval::new(91, 100), + ], + }, + Test { + interval: Interval::new(1, 10_000), + sizes: vec![10u64, 10, 20], + expected: vec![ + Interval::new(1, 20), + Interval::new(21, 40), + Interval::new(41, 10_000), + ], + }, + Test { + interval: Interval::new(1, 100_000), + sizes: vec![31_000u64, 31_000, 30_001], + expected: vec![ + Interval::new(1, 35_000), + Interval::new(35_001, 69_999), + Interval::new(70_000, 100_000), + ], + }, + ]; + + for test in &tests { + assert_eq!( + test.expected, + test.interval.split_exponential(test.sizes.as_slice()) + ); + } + } +} diff --git a/types/src/lib.rs b/types/src/lib.rs index 67ac309280..f4a5488b01 100644 --- a/types/src/lib.rs +++ b/types/src/lib.rs @@ -105,3 +105,12 @@ pub mod sync_status; pub mod proof { pub use forkable_jellyfish_merkle::proof::SparseMerkleProof; } + +pub mod blockhash; +pub mod ghostdata; +pub mod header; +pub mod interval; +pub mod ordering; +pub mod perf; +pub mod reachability; +pub mod trusted; diff --git a/types/src/ordering.rs b/types/src/ordering.rs new file mode 100644 index 0000000000..1fd006defa --- /dev/null +++ b/types/src/ordering.rs @@ -0,0 +1,36 @@ +use crate::blockhash::BlueWorkType; +use serde::{Deserialize, Serialize}; +use starcoin_crypto::HashValue as Hash; +use std::cmp::Ordering; + +#[derive(Eq, Clone, Debug, Serialize, Deserialize)] +pub struct SortableBlock { + pub hash: Hash, + pub blue_work: BlueWorkType, +} + +impl SortableBlock { + pub fn new(hash: Hash, blue_work: BlueWorkType) -> Self { + Self { hash, blue_work } + } +} + +impl PartialEq for SortableBlock { + fn eq(&self, other: &Self) -> bool { + self.hash == other.hash + } +} + +impl PartialOrd for SortableBlock { + fn partial_cmp(&self, other: &Self) -> Option { + Some(self.cmp(other)) + } +} + +impl Ord for SortableBlock { + fn cmp(&self, other: &Self) -> Ordering { + self.blue_work + .cmp(&other.blue_work) + .then_with(|| self.hash.cmp(&other.hash)) + } +} diff --git a/types/src/perf.rs b/types/src/perf.rs new file mode 100644 index 0000000000..6da44d4cd7 --- /dev/null +++ b/types/src/perf.rs @@ -0,0 +1,51 @@ +//! +//! A module for performance critical constants which depend on consensus parameters. +//! The constants in this module should all be revisited if mainnet consensus parameters change. +//! + +/// The default target depth for reachability reindexes. +pub const DEFAULT_REINDEX_DEPTH: u64 = 100; + +/// The default slack interval used by the reachability +/// algorithm to encounter for blocks out of the selected chain. +pub const DEFAULT_REINDEX_SLACK: u64 = 1 << 12; + +#[derive(Clone, Debug)] +pub struct PerfParams { + // + // Cache sizes + // + /// Preferred cache size for header-related data + pub header_data_cache_size: u64, + + /// Preferred cache size for block-body-related data which + /// is typically orders-of magnitude larger than header data + /// (Note this cannot be set to high due to severe memory consumption) + pub block_data_cache_size: u64, + + /// Preferred cache size for UTXO-related data + pub utxo_set_cache_size: u64, + + /// Preferred cache size for block-window-related data + pub block_window_cache_size: u64, + + // + // Thread-pools + // + /// Defaults to 0 which indicates using system default + /// which is typically the number of logical CPU cores + pub block_processors_num_threads: usize, + + /// Defaults to 0 which indicates using system default + /// which is typically the number of logical CPU cores + pub virtual_processor_num_threads: usize, +} + +pub const PERF_PARAMS: PerfParams = PerfParams { + header_data_cache_size: 10_000, + block_data_cache_size: 200, + utxo_set_cache_size: 10_000, + block_window_cache_size: 2000, + block_processors_num_threads: 0, + virtual_processor_num_threads: 0, +}; diff --git a/types/src/reachability.rs b/types/src/reachability.rs new file mode 100644 index 0000000000..e79d485c17 --- /dev/null +++ b/types/src/reachability.rs @@ -0,0 +1,25 @@ +use crate::{blockhash::BlockHashes, interval::Interval}; +use serde::{Deserialize, Serialize}; +use starcoin_crypto::HashValue as Hash; +use std::sync::Arc; + +#[derive(Clone, Serialize, Deserialize)] +pub struct ReachabilityData { + pub children: BlockHashes, + pub parent: Hash, + pub interval: Interval, + pub height: u64, + pub future_covering_set: BlockHashes, +} + +impl ReachabilityData { + pub fn new(parent: Hash, interval: Interval, height: u64) -> Self { + Self { + children: Arc::new(vec![]), + parent, + interval, + height, + future_covering_set: Arc::new(vec![]), + } + } +} diff --git a/types/src/trusted.rs b/types/src/trusted.rs new file mode 100644 index 0000000000..213bfd50ef --- /dev/null +++ b/types/src/trusted.rs @@ -0,0 +1,26 @@ +use crate::blockhash::{BlockHashMap, BlueWorkType, KType}; +use serde::{Deserialize, Serialize}; +use starcoin_crypto::HashValue as Hash; + +/// Represents semi-trusted externally provided Ghostdag data (by a network peer) +#[derive(Clone, Serialize, Deserialize)] +pub struct ExternalGhostdagData { + pub blue_score: u64, + pub blue_work: BlueWorkType, + pub selected_parent: Hash, + pub mergeset_blues: Vec, + pub mergeset_reds: Vec, + pub blues_anticone_sizes: BlockHashMap, +} + +/// Represents externally provided Ghostdag data associated with a block Hash +pub struct TrustedGhostdagData { + pub hash: Hash, + pub ghostdag: ExternalGhostdagData, +} + +impl TrustedGhostdagData { + pub fn new(hash: Hash, ghostdag: ExternalGhostdagData) -> Self { + Self { hash, ghostdag } + } +} diff --git a/vm/types/src/lib.rs b/vm/types/src/lib.rs index 79775d65b9..6afe4cff0d 100644 --- a/vm/types/src/lib.rs +++ b/vm/types/src/lib.rs @@ -5,8 +5,8 @@ mod language_storage_ext; pub mod account_address; -pub mod gas_schedule; pub mod dag_block_metadata; +pub mod gas_schedule; pub mod location { pub use move_ir_types::location::Loc; } From 3b8fe036fe5ae9982d0f631fe81ea6a7a6702bda Mon Sep 17 00:00:00 2001 From: jackzhhuang Date: Wed, 26 Jul 2023 18:54:23 +0800 Subject: [PATCH 07/30] add get accumulator leaves --- Cargo.lock | 3 +++ chain/Cargo.toml | 2 ++ chain/api/Cargo.toml | 1 + chain/api/src/message.rs | 2 ++ chain/service/Cargo.toml | 1 + chain/service/src/chain_service.rs | 6 ++++- chain/src/dag_chain.rs | 41 +++++++++++++++++++++++++++--- 7 files changed, 52 insertions(+), 4 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index a3fc3be768..f6ea205c6c 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -9259,6 +9259,7 @@ dependencies = [ "starcoin-executor", "starcoin-genesis", "starcoin-logger", + "starcoin-network-rpc-api", "starcoin-open-block", "starcoin-resource-viewer", "starcoin-service-registry", @@ -9289,6 +9290,7 @@ dependencies = [ "serde 1.0.152", "starcoin-accumulator", "starcoin-crypto", + "starcoin-network-rpc-api", "starcoin-service-registry", "starcoin-state-api", "starcoin-statedb", @@ -9355,6 +9357,7 @@ dependencies = [ "starcoin-config", "starcoin-crypto", "starcoin-logger", + "starcoin-network-rpc-api", "starcoin-service-registry", "starcoin-state-api", "starcoin-storage", diff --git a/chain/Cargo.toml b/chain/Cargo.toml index f89cf8c8d2..53690eb5be 100644 --- a/chain/Cargo.toml +++ b/chain/Cargo.toml @@ -25,6 +25,7 @@ starcoin-storage = { workspace = true } thiserror = { workspace = true } dag-database = { workspace = true } dag-consensus = { workspace = true } +starcoin-network-rpc-api = { workspace = true } [dev-dependencies] proptest = { workspace = true } @@ -43,6 +44,7 @@ test-helper = { workspace = true } tokio = { features = ["full"], workspace = true } dag-database = { workspace = true } dag-consensus = { workspace = true } +starcoin-network-rpc-api = { workspace = true } [features] default = [] diff --git a/chain/api/Cargo.toml b/chain/api/Cargo.toml index 6dc36f0871..cdbd2a75d0 100644 --- a/chain/api/Cargo.toml +++ b/chain/api/Cargo.toml @@ -15,6 +15,7 @@ starcoin-time-service = { workspace = true } starcoin-types = { workspace = true } starcoin-vm-types = { workspace = true } thiserror = { workspace = true } +starcoin-network-rpc-api = { workspace = true } [dev-dependencies] diff --git a/chain/api/src/message.rs b/chain/api/src/message.rs index 04b91ba088..0ffbee0117 100644 --- a/chain/api/src/message.rs +++ b/chain/api/src/message.rs @@ -4,6 +4,7 @@ use crate::TransactionInfoWithProof; use anyhow::Result; use starcoin_crypto::HashValue; +use starcoin_network_rpc_api::dag_protocol::TargetDagAccumulatorLeaf; use starcoin_service_registry::ServiceRequest; use starcoin_types::transaction::RichTransactionInfo; use starcoin_types::{ @@ -92,4 +93,5 @@ pub enum ChainResponse { HashVec(Vec), TransactionProof(Box>), BlockInfoVec(Box>>), + TargetDagAccumulatorLeaf(Vec), } diff --git a/chain/service/Cargo.toml b/chain/service/Cargo.toml index 301d677160..2a2e1d4f50 100644 --- a/chain/service/Cargo.toml +++ b/chain/service/Cargo.toml @@ -18,6 +18,7 @@ starcoin-vm-runtime = { workspace = true } starcoin-vm-types = { workspace = true } thiserror = { workspace = true } tokio = { workspace = true } +starcoin-network-rpc-api = { workspace = true } [dev-dependencies] stest = { workspace = true } diff --git a/chain/service/src/chain_service.rs b/chain/service/src/chain_service.rs index f1bfb65eeb..5d930d673f 100644 --- a/chain/service/src/chain_service.rs +++ b/chain/service/src/chain_service.rs @@ -11,6 +11,7 @@ use starcoin_chain_api::{ use starcoin_config::NodeConfig; use starcoin_crypto::HashValue; use starcoin_logger::prelude::*; +use starcoin_network_rpc_api::dag_protocol::GetDagAccumulatorLeaves; use starcoin_service_registry::{ ActorService, EventHandler, ServiceContext, ServiceFactory, ServiceHandler, }; @@ -241,7 +242,10 @@ impl ServiceHandler for ChainReaderService { ChainRequest::GetBlockInfos(ids) => Ok(ChainResponse::BlockInfoVec(Box::new( self.inner.get_block_infos(ids)?, ))), - _ => todo!(), // ChainRequest::GetDagAccumulatorLeaves(start_index, batch_size) => Ok(ChainResponse::HashValue(self.dag_)), + ChainRequest::GetDagAccumulatorLeaves{start_index, batch_size} => Ok(ChainResponse::TargetDagAccumulatorLeaf(self.dag_chain.get_accumulator_leaves(GetDagAccumulatorLeaves { + accumulator_leaf_index: start_index, + batch_size, + })?)), } } } diff --git a/chain/src/dag_chain.rs b/chain/src/dag_chain.rs index c660a7ab9d..20b4164949 100644 --- a/chain/src/dag_chain.rs +++ b/chain/src/dag_chain.rs @@ -3,18 +3,21 @@ use std::sync::Arc; use anyhow::bail; use dag_consensus::blockdag::BlockDAG; use dag_database::prelude::{FlexiDagStorage, FlexiDagStorageConfig}; +use starcoin_accumulator::Accumulator; use starcoin_accumulator::{node::AccumulatorStoreType, MerkleAccumulator}; use starcoin_config::NodeConfig; use starcoin_crypto::HashValue; use starcoin_executor::VMMetrics; +use starcoin_storage::storage::CodecKVStore; use starcoin_storage::{flexi_dag::SyncFlexiDagSnapshotStorage, Store}; use starcoin_types::block::BlockHeader; use starcoin_types::{blockhash::ORIGIN, header::Header}; +use starcoin_network_rpc_api::dag_protocol::{TargetDagAccumulatorLeaf, GetDagAccumulatorLeaves}; pub struct DagBlockChain { dag: Option, dag_sync_accumulator: MerkleAccumulator, - sync_accumulator_snapshot: Arc, + dag_sync_accumulator_snapshot: Arc, } impl DagBlockChain { @@ -42,7 +45,7 @@ impl DagBlockChain { dag_sync_accumulator: MerkleAccumulator::new_empty( storage.get_accumulator_store(AccumulatorStoreType::SyncDag), ), - sync_accumulator_snapshot: storage.get_accumulator_snapshot_storage(), + dag_sync_accumulator_snapshot: storage.get_accumulator_snapshot_storage(), }) } }; @@ -62,7 +65,39 @@ impl DagBlockChain { accumulator_info, storage.get_accumulator_store(AccumulatorStoreType::SyncDag), ), - sync_accumulator_snapshot: storage.get_accumulator_snapshot_storage(), + dag_sync_accumulator_snapshot: storage.get_accumulator_snapshot_storage(), }) } + + pub fn get_accumulator_leaves(&self, req: GetDagAccumulatorLeaves) -> anyhow::Result> { + match self.dag_sync_accumulator.get_leaves(req.accumulator_leaf_index, true, req.batch_size) { + Ok(leaves) => Ok(leaves + .into_iter() + .enumerate() + .map( + |(index, leaf)| match self.dag_sync_accumulator_snapshot.get(leaf) { + Ok(op_snapshot) => { + let snapshot = op_snapshot.expect("snapshot must exist"); + TargetDagAccumulatorLeaf { + accumulator_root: snapshot.accumulator_info.accumulator_root, + leaf_index: req.accumulator_leaf_index.saturating_sub(index as u64), + } + } + Err(error) => { + panic!( + "error occured when query the accumulator snapshot: {}", + error.to_string() + ); + } + }, + ) + .collect()), + Err(error) => { + bail!( + "an error occured when getting the leaves of the accumulator, {}", + error.to_string() + ); + } + } + } } From e400a9a0ce6ba28fe9370db8657cc4088fcdaaab Mon Sep 17 00:00:00 2001 From: jackzhhuang Date: Wed, 26 Jul 2023 19:27:09 +0800 Subject: [PATCH 08/30] add gene::server to get the dag accumulator leaves --- chain/api/src/message.rs | 9 +++- chain/api/src/service.rs | 15 +++++++ chain/service/src/chain_service.rs | 24 +++++++++-- chain/src/dag_chain.rs | 69 ++++++++++++++++++++++++++++-- network-rpc/src/rpc.rs | 8 +++- 5 files changed, 114 insertions(+), 11 deletions(-) diff --git a/chain/api/src/message.rs b/chain/api/src/message.rs index 0ffbee0117..9314e99a39 100644 --- a/chain/api/src/message.rs +++ b/chain/api/src/message.rs @@ -4,7 +4,9 @@ use crate::TransactionInfoWithProof; use anyhow::Result; use starcoin_crypto::HashValue; -use starcoin_network_rpc_api::dag_protocol::TargetDagAccumulatorLeaf; +use starcoin_network_rpc_api::dag_protocol::{ + TargetDagAccumulatorLeaf, TargetDagAccumulatorLeafDetail, +}; use starcoin_service_registry::ServiceRequest; use starcoin_types::transaction::RichTransactionInfo; use starcoin_types::{ @@ -65,6 +67,10 @@ pub enum ChainRequest { start_index: u64, batch_size: u64, }, + GetTargetDagAccumulatorLeafDetail { + leaf_index: u64, + batch_size: u64, + }, } impl ServiceRequest for ChainRequest { @@ -94,4 +100,5 @@ pub enum ChainResponse { TransactionProof(Box>), BlockInfoVec(Box>>), TargetDagAccumulatorLeaf(Vec), + TargetDagAccumulatorLeafDetail(Vec), } diff --git a/chain/api/src/service.rs b/chain/api/src/service.rs index 8ba6adce0e..55b441a946 100644 --- a/chain/api/src/service.rs +++ b/chain/api/src/service.rs @@ -5,6 +5,7 @@ use crate::message::{ChainRequest, ChainResponse}; use crate::TransactionInfoWithProof; use anyhow::{bail, Result}; use starcoin_crypto::HashValue; +use starcoin_network_rpc_api::dag_protocol; use starcoin_service_registry::{ActorService, ServiceHandler, ServiceRef}; use starcoin_types::contract_event::{ContractEvent, ContractEventInfo}; use starcoin_types::filter::Filter; @@ -139,6 +140,7 @@ pub trait ChainAsyncService: ) -> Result>; async fn get_block_infos(&self, hashes: Vec) -> Result>>; + async fn get_dag_accumulator_leaves(&self, req: dag_protocol::GetDagAccumulatorLeaves) -> Result>; } #[async_trait::async_trait] @@ -180,6 +182,19 @@ where } } + async fn get_dag_accumulator_leaves(&self, req: dag_protocol::GetDagAccumulatorLeaves) -> Result> { + if let ChainResponse::TargetDagAccumulatorLeaf(leaves) = + self.send(ChainRequest::GetDagAccumulatorLeaves { + start_index: req.accumulator_leaf_index, + batch_size: req.batch_size, + }).await?? + { + Ok(leaves) + } else { + bail!("get_blocks response type error.") + } + } + async fn get_headers(&self, ids: Vec) -> Result>> { if let ChainResponse::BlockHeaderVec(headers) = self.send(ChainRequest::GetHeaders(ids)).await?? diff --git a/chain/service/src/chain_service.rs b/chain/service/src/chain_service.rs index 5d930d673f..6e0db0c4b9 100644 --- a/chain/service/src/chain_service.rs +++ b/chain/service/src/chain_service.rs @@ -11,7 +11,7 @@ use starcoin_chain_api::{ use starcoin_config::NodeConfig; use starcoin_crypto::HashValue; use starcoin_logger::prelude::*; -use starcoin_network_rpc_api::dag_protocol::GetDagAccumulatorLeaves; +use starcoin_network_rpc_api::dag_protocol::{GetDagAccumulatorLeaves, GetTargetDagAccumulatorLeafDetail}; use starcoin_service_registry::{ ActorService, EventHandler, ServiceContext, ServiceFactory, ServiceHandler, }; @@ -242,10 +242,26 @@ impl ServiceHandler for ChainReaderService { ChainRequest::GetBlockInfos(ids) => Ok(ChainResponse::BlockInfoVec(Box::new( self.inner.get_block_infos(ids)?, ))), - ChainRequest::GetDagAccumulatorLeaves{start_index, batch_size} => Ok(ChainResponse::TargetDagAccumulatorLeaf(self.dag_chain.get_accumulator_leaves(GetDagAccumulatorLeaves { - accumulator_leaf_index: start_index, + ChainRequest::GetDagAccumulatorLeaves { + start_index, + batch_size, + } => Ok(ChainResponse::TargetDagAccumulatorLeaf( + self.dag_chain + .get_accumulator_leaves(GetDagAccumulatorLeaves { + accumulator_leaf_index: start_index, + batch_size, + })?, + )), + ChainRequest::GetTargetDagAccumulatorLeafDetail { + leaf_index, batch_size, - })?)), + } => Ok(ChainResponse::TargetDagAccumulatorLeafDetail( + self.dag_chain + .get_target_dag_accumulator_leaf_detail(GetTargetDagAccumulatorLeafDetail { + leaf_index, + batch_size, + })?, + )), } } } diff --git a/chain/src/dag_chain.rs b/chain/src/dag_chain.rs index 20b4164949..042c059b55 100644 --- a/chain/src/dag_chain.rs +++ b/chain/src/dag_chain.rs @@ -8,11 +8,13 @@ use starcoin_accumulator::{node::AccumulatorStoreType, MerkleAccumulator}; use starcoin_config::NodeConfig; use starcoin_crypto::HashValue; use starcoin_executor::VMMetrics; +use starcoin_network_rpc_api::dag_protocol::{ + GetDagAccumulatorLeaves, TargetDagAccumulatorLeaf, TargetDagAccumulatorLeafDetail, GetTargetDagAccumulatorLeafDetail, RelationshipPair, +}; use starcoin_storage::storage::CodecKVStore; use starcoin_storage::{flexi_dag::SyncFlexiDagSnapshotStorage, Store}; use starcoin_types::block::BlockHeader; use starcoin_types::{blockhash::ORIGIN, header::Header}; -use starcoin_network_rpc_api::dag_protocol::{TargetDagAccumulatorLeaf, GetDagAccumulatorLeaves}; pub struct DagBlockChain { dag: Option, @@ -24,7 +26,7 @@ impl DagBlockChain { pub fn new( config: Arc, storage: Arc, - vm_metrics: Option, + _vm_metrics: Option, ) -> anyhow::Result { // initialize the dag let db_path = config.storage.dir(); @@ -69,8 +71,17 @@ impl DagBlockChain { }) } - pub fn get_accumulator_leaves(&self, req: GetDagAccumulatorLeaves) -> anyhow::Result> { - match self.dag_sync_accumulator.get_leaves(req.accumulator_leaf_index, true, req.batch_size) { + pub fn get_accumulator_leaves( + &self, + req: GetDagAccumulatorLeaves, + ) -> anyhow::Result> { + if self.dag.is_none() { + bail!("the dag is None"); + } + match self + .dag_sync_accumulator + .get_leaves(req.accumulator_leaf_index, true, req.batch_size) + { Ok(leaves) => Ok(leaves .into_iter() .enumerate() @@ -100,4 +111,54 @@ impl DagBlockChain { } } } + + pub fn get_target_dag_accumulator_leaf_detail( + &self, + req: GetTargetDagAccumulatorLeafDetail, + ) -> anyhow::Result> { + let dag = if self.dag.is_some() { + self.dag.as_ref().unwrap() + } else { + bail!("the dag is None"); + }; + let end_index = std::cmp::min( + req.leaf_index + req.batch_size - 1, + self.dag_sync_accumulator.get_info().num_leaves - 1, + ); + let mut details = [].to_vec(); + for index in req.leaf_index..=end_index { + let leaf_hash = self + .dag_sync_accumulator + .get_leaf(index) + .unwrap_or(None) + .expect("leaf hash should not be None"); + let snapshot = self + .dag_sync_accumulator_snapshot + .get(leaf_hash) + .unwrap_or(None) + .expect("the snapshot should not be None"); + let mut relationship_pair = [].to_vec(); + relationship_pair.extend( + snapshot + .child_hashes + .into_iter() + .fold([].to_vec(), |mut pairs, child| { + let parents = dag + .get_parents(child) + .expect("a child must have parents"); + parents.into_iter().for_each(|parent| { + pairs.push(RelationshipPair { parent, child }); + }); + pairs + }) + .into_iter(), + ); + + details.push(TargetDagAccumulatorLeafDetail { + accumulator_root: snapshot.accumulator_info.accumulator_root, + relationship_pair, + }); + } + Ok(details) + } } diff --git a/network-rpc/src/rpc.rs b/network-rpc/src/rpc.rs index d28ea6ed38..e67c0578b5 100644 --- a/network-rpc/src/rpc.rs +++ b/network-rpc/src/rpc.rs @@ -310,9 +310,13 @@ impl gen_server::NetworkRpc for NetworkRpcImpl { fn get_dag_accumulator_leaves( &self, _peer_id: PeerId, - _req: dag_protocol::GetDagAccumulatorLeaves, + req: dag_protocol::GetDagAccumulatorLeaves, ) -> BoxFuture>> { - todo!() + let chain_service = self.chain_service.clone(); + let fut = async move { + chain_service.get_dag_accumulator_leaves(req).await + }; + Box::pin(fut) } fn get_accumulator_leaf_detail( From 01ff8fe8b4a4b8d06831c012e8b22d732dd8d6e8 Mon Sep 17 00:00:00 2001 From: simonjiao Date: Wed, 26 Jul 2023 18:29:46 +0800 Subject: [PATCH 09/30] move database to consensusdb --- .../src/consensusdb}/access.rs | 0 .../src/consensusdb}/cache/mod.rs | 0 .../src/consensusdb}/cache/stc_cache.rs | 0 .../src/consensusdb}/consensus_ghostdag.rs | 0 .../src/consensusdb}/consensus_header.rs | 0 .../consensusdb}/consensus_reachability.rs | 0 .../src/consensusdb}/consensus_relations.rs | 0 .../src => consensus/src/consensusdb}/db.rs | 0 .../src/consensusdb}/errors.rs | 0 .../src => consensus/src/consensusdb}/item.rs | 0 .../src => consensus/src/consensusdb}/lib.rs | 0 .../src/consensusdb}/writer.rs | 0 storage/dag-database/Cargo.toml | 23 ------------------- 13 files changed, 23 deletions(-) rename {storage/dag-database/src => consensus/src/consensusdb}/access.rs (100%) rename {storage/dag-database/src => consensus/src/consensusdb}/cache/mod.rs (100%) rename {storage/dag-database/src => consensus/src/consensusdb}/cache/stc_cache.rs (100%) rename {storage/dag-database/src => consensus/src/consensusdb}/consensus_ghostdag.rs (100%) rename {storage/dag-database/src => consensus/src/consensusdb}/consensus_header.rs (100%) rename {storage/dag-database/src => consensus/src/consensusdb}/consensus_reachability.rs (100%) rename {storage/dag-database/src => consensus/src/consensusdb}/consensus_relations.rs (100%) rename {storage/dag-database/src => consensus/src/consensusdb}/db.rs (100%) rename {storage/dag-database/src => consensus/src/consensusdb}/errors.rs (100%) rename {storage/dag-database/src => consensus/src/consensusdb}/item.rs (100%) rename {storage/dag-database/src => consensus/src/consensusdb}/lib.rs (100%) rename {storage/dag-database/src => consensus/src/consensusdb}/writer.rs (100%) delete mode 100644 storage/dag-database/Cargo.toml diff --git a/storage/dag-database/src/access.rs b/consensus/src/consensusdb/access.rs similarity index 100% rename from storage/dag-database/src/access.rs rename to consensus/src/consensusdb/access.rs diff --git a/storage/dag-database/src/cache/mod.rs b/consensus/src/consensusdb/cache/mod.rs similarity index 100% rename from storage/dag-database/src/cache/mod.rs rename to consensus/src/consensusdb/cache/mod.rs diff --git a/storage/dag-database/src/cache/stc_cache.rs b/consensus/src/consensusdb/cache/stc_cache.rs similarity index 100% rename from storage/dag-database/src/cache/stc_cache.rs rename to consensus/src/consensusdb/cache/stc_cache.rs diff --git a/storage/dag-database/src/consensus_ghostdag.rs b/consensus/src/consensusdb/consensus_ghostdag.rs similarity index 100% rename from storage/dag-database/src/consensus_ghostdag.rs rename to consensus/src/consensusdb/consensus_ghostdag.rs diff --git a/storage/dag-database/src/consensus_header.rs b/consensus/src/consensusdb/consensus_header.rs similarity index 100% rename from storage/dag-database/src/consensus_header.rs rename to consensus/src/consensusdb/consensus_header.rs diff --git a/storage/dag-database/src/consensus_reachability.rs b/consensus/src/consensusdb/consensus_reachability.rs similarity index 100% rename from storage/dag-database/src/consensus_reachability.rs rename to consensus/src/consensusdb/consensus_reachability.rs diff --git a/storage/dag-database/src/consensus_relations.rs b/consensus/src/consensusdb/consensus_relations.rs similarity index 100% rename from storage/dag-database/src/consensus_relations.rs rename to consensus/src/consensusdb/consensus_relations.rs diff --git a/storage/dag-database/src/db.rs b/consensus/src/consensusdb/db.rs similarity index 100% rename from storage/dag-database/src/db.rs rename to consensus/src/consensusdb/db.rs diff --git a/storage/dag-database/src/errors.rs b/consensus/src/consensusdb/errors.rs similarity index 100% rename from storage/dag-database/src/errors.rs rename to consensus/src/consensusdb/errors.rs diff --git a/storage/dag-database/src/item.rs b/consensus/src/consensusdb/item.rs similarity index 100% rename from storage/dag-database/src/item.rs rename to consensus/src/consensusdb/item.rs diff --git a/storage/dag-database/src/lib.rs b/consensus/src/consensusdb/lib.rs similarity index 100% rename from storage/dag-database/src/lib.rs rename to consensus/src/consensusdb/lib.rs diff --git a/storage/dag-database/src/writer.rs b/consensus/src/consensusdb/writer.rs similarity index 100% rename from storage/dag-database/src/writer.rs rename to consensus/src/consensusdb/writer.rs diff --git a/storage/dag-database/Cargo.toml b/storage/dag-database/Cargo.toml deleted file mode 100644 index dbc41e5a69..0000000000 --- a/storage/dag-database/Cargo.toml +++ /dev/null @@ -1,23 +0,0 @@ -[package] -name = "dag-database" -version = "1.13.5" -edition.workspace = true - -[dependencies] -starcoin-storage.workspace = true -starcoin-config.workspace = true -starcoin-crypto.workspace = true -starcoin-types.workspace = true -rocksdb.workspace = true -serde.workspace = true -bincode.workspace = true -indexmap.workspace = true -parking_lot.workspace = true -thiserror.workspace = true -rand.workspace = true -faster-hex.workspace = true -itertools.workspace = true -num_cpus.workspace = true - -[dev-dependencies] -tempfile.workspace = true From ac8739822b569fbb54442a67692306b1c1685460 Mon Sep 17 00:00:00 2001 From: simonjiao Date: Wed, 26 Jul 2023 19:20:18 +0800 Subject: [PATCH 10/30] make consensus module buildable --- Cargo.toml | 1 - chain/Cargo.toml | 2 -- consensus/Cargo.toml | 7 +++++++ consensus/src/consensusdb/access.rs | 2 +- consensus/src/consensusdb/consensus_ghostdag.rs | 2 +- consensus/src/consensusdb/consensus_header.rs | 2 +- consensus/src/consensusdb/consensus_reachability.rs | 2 +- consensus/src/consensusdb/consensus_relations.rs | 2 +- consensus/src/consensusdb/db.rs | 4 ++-- consensus/src/consensusdb/item.rs | 2 +- consensus/src/consensusdb/{lib.rs => mod.rs} | 2 +- consensus/src/consensusdb/writer.rs | 2 +- consensus/src/lib.rs | 1 + 13 files changed, 18 insertions(+), 13 deletions(-) rename consensus/src/consensusdb/{lib.rs => mod.rs} (95%) diff --git a/Cargo.toml b/Cargo.toml index 0c3fd21478..e654e651c0 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -516,7 +516,6 @@ walkdir = "2.3.1" wasm-timer = "0.2" which = "4.1.0" zeroize = "1.3.0" -dag-database = { path = "storage/dag-database" } dag-consensus = { path = "consensus/dag-consensus" } ghostdag = { path = "consensus/dag-consensus/ghostdag" } reachability = { path = "consensus/dag-consensus/reachability" } diff --git a/chain/Cargo.toml b/chain/Cargo.toml index 53690eb5be..c9b563639b 100644 --- a/chain/Cargo.toml +++ b/chain/Cargo.toml @@ -23,7 +23,6 @@ starcoin-types = { package = "starcoin-types", workspace = true } starcoin-vm-types = { workspace = true } starcoin-storage = { workspace = true } thiserror = { workspace = true } -dag-database = { workspace = true } dag-consensus = { workspace = true } starcoin-network-rpc-api = { workspace = true } @@ -42,7 +41,6 @@ stdlib = { workspace = true } stest = { workspace = true } test-helper = { workspace = true } tokio = { features = ["full"], workspace = true } -dag-database = { workspace = true } dag-consensus = { workspace = true } starcoin-network-rpc-api = { workspace = true } diff --git a/consensus/Cargo.toml b/consensus/Cargo.toml index b466d4536b..bcba969dd2 100644 --- a/consensus/Cargo.toml +++ b/consensus/Cargo.toml @@ -19,6 +19,13 @@ starcoin-time-service = { workspace = true } starcoin-types = { workspace = true } starcoin-vm-types = { workspace = true } thiserror = { workspace = true } +rocksdb = { workspace = true } +bincode = { workspace = true } +serde = { workspace = true } +starcoin-storage = { workspace = true } +parking_lot = { workspace = true } +itertools = { workspace = true } +starcoin-config = { workspace = true } [dev-dependencies] proptest = { workspace = true } diff --git a/consensus/src/consensusdb/access.rs b/consensus/src/consensusdb/access.rs index 3e074cb190..999da966ca 100644 --- a/consensus/src/consensusdb/access.rs +++ b/consensus/src/consensusdb/access.rs @@ -1,4 +1,4 @@ -use crate::{cache::DagCache, db::DBStorage, errors::StoreError}; +use super::{cache::DagCache, db::DBStorage, errors::StoreError}; use super::prelude::{Cache, DbWriter}; use itertools::Itertools; diff --git a/consensus/src/consensusdb/consensus_ghostdag.rs b/consensus/src/consensusdb/consensus_ghostdag.rs index 63d6b20dbd..a67efe69f0 100644 --- a/consensus/src/consensusdb/consensus_ghostdag.rs +++ b/consensus/src/consensusdb/consensus_ghostdag.rs @@ -1,4 +1,4 @@ -use crate::{ +use super::{ db::DBStorage, errors::StoreError, prelude::{CachedDbAccess, DirectDbWriter}, diff --git a/consensus/src/consensusdb/consensus_header.rs b/consensus/src/consensusdb/consensus_header.rs index 75f09fb6c1..97c3d49a98 100644 --- a/consensus/src/consensusdb/consensus_header.rs +++ b/consensus/src/consensusdb/consensus_header.rs @@ -1,4 +1,4 @@ -use crate::{ +use super::{ db::DBStorage, errors::{StoreError, StoreResult}, prelude::CachedDbAccess, diff --git a/consensus/src/consensusdb/consensus_reachability.rs b/consensus/src/consensusdb/consensus_reachability.rs index dfc3fd9b7a..cdaf2d37dc 100644 --- a/consensus/src/consensusdb/consensus_reachability.rs +++ b/consensus/src/consensusdb/consensus_reachability.rs @@ -1,4 +1,4 @@ -use crate::{ +use super::{ db::DBStorage, prelude::{BatchDbWriter, CachedDbAccess, CachedDbItem, DirectDbWriter, StoreError}, }; diff --git a/consensus/src/consensusdb/consensus_relations.rs b/consensus/src/consensusdb/consensus_relations.rs index f2f5b20996..9d55644009 100644 --- a/consensus/src/consensusdb/consensus_relations.rs +++ b/consensus/src/consensusdb/consensus_relations.rs @@ -1,4 +1,4 @@ -use crate::{ +use super::{ db::DBStorage, prelude::{BatchDbWriter, CachedDbAccess, DirectDbWriter, StoreError}, }; diff --git a/consensus/src/consensusdb/db.rs b/consensus/src/consensusdb/db.rs index 55a89d2e46..0728f17e32 100644 --- a/consensus/src/consensusdb/db.rs +++ b/consensus/src/consensusdb/db.rs @@ -1,9 +1,9 @@ -use crate::consensus::{ +use super::consensus::{ DbGhostdagStore, DbHeadersStore, DbReachabilityStore, DbRelationsStore, CHILDREN_CF, COMPACT_GHOST_DAG_STORE_CF, COMPACT_HEADER_DATA_STORE_CF, GHOST_DAG_STORE_CF, HEADERS_STORE_CF, PARENTS_CF, REACHABILITY_DATA_CF, }; -use crate::errors::StoreError; +use super::errors::StoreError; use starcoin_config::RocksdbConfig; pub(crate) use starcoin_storage::db_storage::DBStorage; use std::{path::Path, sync::Arc}; diff --git a/consensus/src/consensusdb/item.rs b/consensus/src/consensusdb/item.rs index 570035810e..14cff7469b 100644 --- a/consensus/src/consensusdb/item.rs +++ b/consensus/src/consensusdb/item.rs @@ -1,4 +1,4 @@ -use crate::{db::DBStorage, errors::StoreError}; +use super::{db::DBStorage, errors::StoreError}; use super::prelude::DbWriter; use parking_lot::RwLock; diff --git a/consensus/src/consensusdb/lib.rs b/consensus/src/consensusdb/mod.rs similarity index 95% rename from consensus/src/consensusdb/lib.rs rename to consensus/src/consensusdb/mod.rs index 1137b8b86c..f3d77a61e4 100644 --- a/consensus/src/consensusdb/lib.rs +++ b/consensus/src/consensusdb/mod.rs @@ -10,7 +10,7 @@ mod item; mod writer; pub mod prelude { - use crate::{db, errors}; + use super::{db, errors}; pub use super::{ access::CachedDbAccess, diff --git a/consensus/src/consensusdb/writer.rs b/consensus/src/consensusdb/writer.rs index ee66447642..0d692c2859 100644 --- a/consensus/src/consensusdb/writer.rs +++ b/consensus/src/consensusdb/writer.rs @@ -1,7 +1,7 @@ use rocksdb::WriteBatch; use starcoin_storage::storage::InnerStore; -use crate::{db::DBStorage, errors::StoreError}; +use super::{db::DBStorage, errors::StoreError}; /// Abstraction over direct/batched DB writing pub trait DbWriter { diff --git a/consensus/src/lib.rs b/consensus/src/lib.rs index 8b870c6d2e..3c00eb74e9 100644 --- a/consensus/src/lib.rs +++ b/consensus/src/lib.rs @@ -23,6 +23,7 @@ pub mod cn; mod consensus; #[cfg(test)] mod consensus_test; +pub mod consensusdb; pub mod difficulty; pub mod dummy; pub mod keccak; From 397a7e9f7925849e8012d335225b47ad0ff0e4a4 Mon Sep 17 00:00:00 2001 From: simonjiao Date: Wed, 26 Jul 2023 19:31:09 +0800 Subject: [PATCH 11/30] make whole project buildable --- chain/src/dag_chain.rs | 9 ++++----- consensus/dag-consensus/Cargo.toml | 2 +- consensus/dag-consensus/ghostdag/Cargo.toml | 2 +- consensus/dag-consensus/ghostdag/src/mergeset.rs | 4 +++- consensus/dag-consensus/ghostdag/src/protocol.rs | 4 +++- consensus/dag-consensus/reachability/Cargo.toml | 2 +- consensus/dag-consensus/reachability/src/extensions.rs | 2 +- consensus/dag-consensus/reachability/src/inquirer.rs | 2 +- consensus/dag-consensus/reachability/src/lib.rs | 2 +- .../reachability/src/reachability_service.rs | 2 +- consensus/dag-consensus/reachability/src/reindex.rs | 2 +- .../reachability/src/relations_service.rs | 2 +- consensus/dag-consensus/reachability/src/tests.rs | 2 +- consensus/dag-consensus/reachability/src/tree.rs | 2 +- consensus/dag-consensus/src/blockdag.rs | 10 +++++----- 15 files changed, 26 insertions(+), 23 deletions(-) diff --git a/chain/src/dag_chain.rs b/chain/src/dag_chain.rs index 042c059b55..729fa1bb55 100644 --- a/chain/src/dag_chain.rs +++ b/chain/src/dag_chain.rs @@ -2,14 +2,15 @@ use std::sync::Arc; use anyhow::bail; use dag_consensus::blockdag::BlockDAG; -use dag_database::prelude::{FlexiDagStorage, FlexiDagStorageConfig}; use starcoin_accumulator::Accumulator; use starcoin_accumulator::{node::AccumulatorStoreType, MerkleAccumulator}; use starcoin_config::NodeConfig; +use starcoin_consensus::consensusdb::prelude::{FlexiDagStorage, FlexiDagStorageConfig}; use starcoin_crypto::HashValue; use starcoin_executor::VMMetrics; use starcoin_network_rpc_api::dag_protocol::{ - GetDagAccumulatorLeaves, TargetDagAccumulatorLeaf, TargetDagAccumulatorLeafDetail, GetTargetDagAccumulatorLeafDetail, RelationshipPair, + GetDagAccumulatorLeaves, GetTargetDagAccumulatorLeafDetail, RelationshipPair, + TargetDagAccumulatorLeaf, TargetDagAccumulatorLeafDetail, }; use starcoin_storage::storage::CodecKVStore; use starcoin_storage::{flexi_dag::SyncFlexiDagSnapshotStorage, Store}; @@ -143,9 +144,7 @@ impl DagBlockChain { .child_hashes .into_iter() .fold([].to_vec(), |mut pairs, child| { - let parents = dag - .get_parents(child) - .expect("a child must have parents"); + let parents = dag.get_parents(child).expect("a child must have parents"); parents.into_iter().for_each(|parent| { pairs.push(RelationshipPair { parent, child }); }); diff --git a/consensus/dag-consensus/Cargo.toml b/consensus/dag-consensus/Cargo.toml index ac54a0d90d..c5c7a405ab 100644 --- a/consensus/dag-consensus/Cargo.toml +++ b/consensus/dag-consensus/Cargo.toml @@ -8,7 +8,7 @@ edition.workspace = true [dependencies] ghostdag.workspace = true reachability.workspace = true -dag-database.workspace = true +starcoin-consensus.workspace = true parking_lot.workspace = true starcoin-crypto.workspace = true starcoin-types.workspace = true diff --git a/consensus/dag-consensus/ghostdag/Cargo.toml b/consensus/dag-consensus/ghostdag/Cargo.toml index a38b4d4809..534ab72a4c 100644 --- a/consensus/dag-consensus/ghostdag/Cargo.toml +++ b/consensus/dag-consensus/ghostdag/Cargo.toml @@ -7,7 +7,7 @@ edition.workspace = true [dependencies] thiserror.workspace = true -dag-database.workspace = true +starcoin-consensus.workspace = true starcoin-crypto.workspace = true starcoin-types.workspace = true serde.workspace = true diff --git a/consensus/dag-consensus/ghostdag/src/mergeset.rs b/consensus/dag-consensus/ghostdag/src/mergeset.rs index f47a221777..f2799b12ff 100644 --- a/consensus/dag-consensus/ghostdag/src/mergeset.rs +++ b/consensus/dag-consensus/ghostdag/src/mergeset.rs @@ -1,6 +1,8 @@ use super::protocol::GhostdagManager; -use dag_database::consensus::{GhostdagStoreReader, HeaderStoreReader, RelationsStoreReader}; use reachability::reachability_service::ReachabilityService; +use starcoin_consensus::consensusdb::consensus::{ + GhostdagStoreReader, HeaderStoreReader, RelationsStoreReader, +}; use starcoin_crypto::HashValue as Hash; use starcoin_types::blockhash::BlockHashSet; use std::collections::VecDeque; diff --git a/consensus/dag-consensus/ghostdag/src/protocol.rs b/consensus/dag-consensus/ghostdag/src/protocol.rs index 4f6a0fb3fd..ce12c33dfe 100644 --- a/consensus/dag-consensus/ghostdag/src/protocol.rs +++ b/consensus/dag-consensus/ghostdag/src/protocol.rs @@ -1,6 +1,8 @@ use crate::util::Refs; -use dag_database::consensus::{GhostdagStoreReader, HeaderStoreReader, RelationsStoreReader}; use reachability::reachability_service::ReachabilityService; +use starcoin_consensus::consensusdb::consensus::{ + GhostdagStoreReader, HeaderStoreReader, RelationsStoreReader, +}; use starcoin_crypto::HashValue as Hash; use starcoin_types::{ blockhash::{ diff --git a/consensus/dag-consensus/reachability/Cargo.toml b/consensus/dag-consensus/reachability/Cargo.toml index f65bd6e222..aa9e610917 100644 --- a/consensus/dag-consensus/reachability/Cargo.toml +++ b/consensus/dag-consensus/reachability/Cargo.toml @@ -6,7 +6,7 @@ edition.workspace = true [dependencies] thiserror.workspace = true -dag-database.workspace = true +starcoin-consensus.workspace = true starcoin-crypto.workspace = true starcoin-types.workspace = true serde.workspace = true diff --git a/consensus/dag-consensus/reachability/src/extensions.rs b/consensus/dag-consensus/reachability/src/extensions.rs index 829d1a855e..c8a9d3a8c8 100644 --- a/consensus/dag-consensus/reachability/src/extensions.rs +++ b/consensus/dag-consensus/reachability/src/extensions.rs @@ -1,4 +1,4 @@ -use dag_database::{consensus::ReachabilityStoreReader, prelude::StoreResult}; +use starcoin_consensus::consensusdb::{consensus::ReachabilityStoreReader, prelude::StoreResult}; use starcoin_crypto::hash::HashValue as Hash; use starcoin_types::interval::Interval; diff --git a/consensus/dag-consensus/reachability/src/inquirer.rs b/consensus/dag-consensus/reachability/src/inquirer.rs index 57f0960c2f..0441c66485 100644 --- a/consensus/dag-consensus/reachability/src/inquirer.rs +++ b/consensus/dag-consensus/reachability/src/inquirer.rs @@ -1,5 +1,5 @@ use super::{tree::*, *}; -use dag_database::consensus::{ReachabilityStore, ReachabilityStoreReader}; +use starcoin_consensus::consensusdb::consensus::{ReachabilityStore, ReachabilityStoreReader}; use starcoin_crypto::HashValue as Hash; use starcoin_types::{blockhash, interval::Interval, perf}; diff --git a/consensus/dag-consensus/reachability/src/lib.rs b/consensus/dag-consensus/reachability/src/lib.rs index 69510709e6..b4c5938190 100644 --- a/consensus/dag-consensus/reachability/src/lib.rs +++ b/consensus/dag-consensus/reachability/src/lib.rs @@ -8,7 +8,7 @@ pub mod relations_service; mod tests; mod tree; -use dag_database::prelude::StoreError; +use starcoin_consensus::consensusdb::prelude::StoreError; use thiserror::Error; #[derive(Error, Debug)] diff --git a/consensus/dag-consensus/reachability/src/reachability_service.rs b/consensus/dag-consensus/reachability/src/reachability_service.rs index bf5c4e1a8b..5a4cb37193 100644 --- a/consensus/dag-consensus/reachability/src/reachability_service.rs +++ b/consensus/dag-consensus/reachability/src/reachability_service.rs @@ -1,6 +1,6 @@ use crate::{inquirer, Result}; -use dag_database::consensus::ReachabilityStoreReader; use parking_lot::RwLock; +use starcoin_consensus::consensusdb::consensus::ReachabilityStoreReader; use starcoin_crypto::{HashValue as Hash, HashValue}; use starcoin_types::blockhash; use std::{ops::Deref, sync::Arc}; diff --git a/consensus/dag-consensus/reachability/src/reindex.rs b/consensus/dag-consensus/reachability/src/reindex.rs index 342517e86a..8ad4010527 100644 --- a/consensus/dag-consensus/reachability/src/reindex.rs +++ b/consensus/dag-consensus/reachability/src/reindex.rs @@ -1,7 +1,7 @@ use crate::{ extensions::ReachabilityStoreIntervalExtensions, inquirer::get_next_chain_ancestor_unchecked, *, }; -use dag_database::consensus::ReachabilityStore; +use starcoin_consensus::consensusdb::consensus::ReachabilityStore; use starcoin_crypto::HashValue as Hash; use starcoin_types::{ blockhash::{BlockHashExtensions, BlockHashMap}, diff --git a/consensus/dag-consensus/reachability/src/relations_service.rs b/consensus/dag-consensus/reachability/src/relations_service.rs index 9020f307c2..44b26f3cfa 100644 --- a/consensus/dag-consensus/reachability/src/relations_service.rs +++ b/consensus/dag-consensus/reachability/src/relations_service.rs @@ -1,5 +1,5 @@ -use dag_database::{consensus::RelationsStoreReader, prelude::StoreError}; use parking_lot::RwLock; +use starcoin_consensus::consensusdb::{consensus::RelationsStoreReader, prelude::StoreError}; use starcoin_crypto::HashValue as Hash; use starcoin_types::blockhash::BlockHashes; use std::sync::Arc; diff --git a/consensus/dag-consensus/reachability/src/tests.rs b/consensus/dag-consensus/reachability/src/tests.rs index 80812bee99..7b4c17017b 100644 --- a/consensus/dag-consensus/reachability/src/tests.rs +++ b/consensus/dag-consensus/reachability/src/tests.rs @@ -2,7 +2,7 @@ //! Test utils for reachability //! use super::{inquirer::*, tree::*}; -use dag_database::{ +use starcoin_consensus::consensusdb::{ consensus::{ReachabilityStore, ReachabilityStoreReader}, prelude::StoreError, }; diff --git a/consensus/dag-consensus/reachability/src/tree.rs b/consensus/dag-consensus/reachability/src/tree.rs index 46c7cc28db..b29caa10b7 100644 --- a/consensus/dag-consensus/reachability/src/tree.rs +++ b/consensus/dag-consensus/reachability/src/tree.rs @@ -5,7 +5,7 @@ use super::{ extensions::ReachabilityStoreIntervalExtensions, inquirer::*, reindex::ReindexOperationContext, *, }; -use dag_database::consensus::ReachabilityStore; +use starcoin_consensus::consensusdb::consensus::ReachabilityStore; use starcoin_crypto::HashValue as Hash; /// Adds `new_block` as a child of `parent` in the tree structure. If this block diff --git a/consensus/dag-consensus/src/blockdag.rs b/consensus/dag-consensus/src/blockdag.rs index 8d8b95920c..db6633d21e 100644 --- a/consensus/dag-consensus/src/blockdag.rs +++ b/consensus/dag-consensus/src/blockdag.rs @@ -1,12 +1,12 @@ use anyhow::bail; -use dag_database::consensus::{ - DbGhostdagStore, DbHeadersStore, DbReachabilityStore, DbRelationsStore, GhostdagStore, - HeaderStore, ReachabilityStoreReader, RelationsStore, RelationsStoreReader, -}; -use dag_database::prelude::FlexiDagStorage; use ghostdag::protocol::GhostdagManager; use parking_lot::RwLock; use reachability::{inquirer, reachability_service::MTReachabilityService}; +use starcoin_consensus::consensusdb::consensus::{ + DbGhostdagStore, DbHeadersStore, DbReachabilityStore, DbRelationsStore, GhostdagStore, + HeaderStore, ReachabilityStoreReader, RelationsStore, RelationsStoreReader, +}; +use starcoin_consensus::consensusdb::prelude::FlexiDagStorage; use starcoin_crypto::HashValue as Hash; use starcoin_types::{ blockhash::{BlockHashes, KType, ORIGIN}, From 4f2449498e7d5c7c1851590fcabc1e5eb7ddf618 Mon Sep 17 00:00:00 2001 From: simonjiao Date: Wed, 26 Jul 2023 19:39:18 +0800 Subject: [PATCH 12/30] rename consensusdb module --- Cargo.lock | 41 +++++-------------- .../dag-consensus/ghostdag/src/mergeset.rs | 2 +- .../dag-consensus/ghostdag/src/protocol.rs | 2 +- .../reachability/src/extensions.rs | 2 +- .../reachability/src/inquirer.rs | 2 +- .../reachability/src/reachability_service.rs | 2 +- .../dag-consensus/reachability/src/reindex.rs | 2 +- .../reachability/src/relations_service.rs | 2 +- .../dag-consensus/reachability/src/tests.rs | 2 +- .../dag-consensus/reachability/src/tree.rs | 2 +- consensus/dag-consensus/src/blockdag.rs | 4 +- consensus/src/consensusdb/db.rs | 4 +- consensus/src/consensusdb/mod.rs | 2 +- 13 files changed, 24 insertions(+), 45 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index f6ea205c6c..b945420517 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1849,35 +1849,14 @@ name = "dag-consensus" version = "1.13.5" dependencies = [ "anyhow", - "dag-database", "ghostdag", "parking_lot 0.12.1", "reachability", + "starcoin-consensus", "starcoin-crypto", "starcoin-types", ] -[[package]] -name = "dag-database" -version = "1.13.5" -dependencies = [ - "bincode", - "faster-hex", - "indexmap", - "itertools", - "num_cpus", - "parking_lot 0.12.1", - "rand 0.8.5", - "rocksdb", - "serde 1.0.152", - "starcoin-config", - "starcoin-crypto", - "starcoin-storage", - "starcoin-types", - "tempfile", - "thiserror", -] - [[package]] name = "darling" version = "0.9.0" @@ -2693,12 +2672,6 @@ version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e88a8acf291dafb59c2d96e8f59828f3838bb1a70398823ade51a84de6a6deed" -[[package]] -name = "faster-hex" -version = "0.6.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "51e2ce894d53b295cf97b05685aa077950ff3e8541af83217fc720a6437169f8" - [[package]] name = "fastrand" version = "1.9.0" @@ -3146,12 +3119,12 @@ dependencies = [ name = "ghostdag" version = "1.13.5" dependencies = [ - "dag-database", "itertools", "parking_lot 0.12.1", "reachability", "rocksdb", "serde 1.0.152", + "starcoin-consensus", "starcoin-crypto", "starcoin-types", "thiserror", @@ -7811,11 +7784,11 @@ dependencies = [ name = "reachability" version = "1.13.5" dependencies = [ - "dag-database", "itertools", "parking_lot 0.12.1", "rocksdb", "serde 1.0.152", + "starcoin-consensus", "starcoin-crypto", "starcoin-storage", "starcoin-types", @@ -9243,7 +9216,6 @@ dependencies = [ "bcs-ext", "clap 3.2.23", "dag-consensus", - "dag-database", "proptest", "proptest-derive", "rand 0.8.5", @@ -9472,21 +9444,28 @@ name = "starcoin-consensus" version = "1.13.5" dependencies = [ "anyhow", + "bincode", "byteorder", "cryptonight-rs", "futures 0.3.26", "hex", + "itertools", "once_cell", + "parking_lot 0.12.1", "proptest", "proptest-derive", "rand 0.8.5", "rand_core 0.6.4", + "rocksdb", "rust-argon2", + "serde 1.0.152", "sha3", "starcoin-chain-api", + "starcoin-config", "starcoin-crypto", "starcoin-logger", "starcoin-state-api", + "starcoin-storage", "starcoin-time-service", "starcoin-types", "starcoin-vm-types", diff --git a/consensus/dag-consensus/ghostdag/src/mergeset.rs b/consensus/dag-consensus/ghostdag/src/mergeset.rs index f2799b12ff..2607049d70 100644 --- a/consensus/dag-consensus/ghostdag/src/mergeset.rs +++ b/consensus/dag-consensus/ghostdag/src/mergeset.rs @@ -1,6 +1,6 @@ use super::protocol::GhostdagManager; use reachability::reachability_service::ReachabilityService; -use starcoin_consensus::consensusdb::consensus::{ +use starcoin_consensus::consensusdb::schema::{ GhostdagStoreReader, HeaderStoreReader, RelationsStoreReader, }; use starcoin_crypto::HashValue as Hash; diff --git a/consensus/dag-consensus/ghostdag/src/protocol.rs b/consensus/dag-consensus/ghostdag/src/protocol.rs index ce12c33dfe..5c481c0dfa 100644 --- a/consensus/dag-consensus/ghostdag/src/protocol.rs +++ b/consensus/dag-consensus/ghostdag/src/protocol.rs @@ -1,6 +1,6 @@ use crate::util::Refs; use reachability::reachability_service::ReachabilityService; -use starcoin_consensus::consensusdb::consensus::{ +use starcoin_consensus::consensusdb::schema::{ GhostdagStoreReader, HeaderStoreReader, RelationsStoreReader, }; use starcoin_crypto::HashValue as Hash; diff --git a/consensus/dag-consensus/reachability/src/extensions.rs b/consensus/dag-consensus/reachability/src/extensions.rs index c8a9d3a8c8..7416b7fa68 100644 --- a/consensus/dag-consensus/reachability/src/extensions.rs +++ b/consensus/dag-consensus/reachability/src/extensions.rs @@ -1,4 +1,4 @@ -use starcoin_consensus::consensusdb::{consensus::ReachabilityStoreReader, prelude::StoreResult}; +use starcoin_consensus::consensusdb::{prelude::StoreResult, schema::ReachabilityStoreReader}; use starcoin_crypto::hash::HashValue as Hash; use starcoin_types::interval::Interval; diff --git a/consensus/dag-consensus/reachability/src/inquirer.rs b/consensus/dag-consensus/reachability/src/inquirer.rs index 0441c66485..a2c55d7884 100644 --- a/consensus/dag-consensus/reachability/src/inquirer.rs +++ b/consensus/dag-consensus/reachability/src/inquirer.rs @@ -1,5 +1,5 @@ use super::{tree::*, *}; -use starcoin_consensus::consensusdb::consensus::{ReachabilityStore, ReachabilityStoreReader}; +use starcoin_consensus::consensusdb::schema::{ReachabilityStore, ReachabilityStoreReader}; use starcoin_crypto::HashValue as Hash; use starcoin_types::{blockhash, interval::Interval, perf}; diff --git a/consensus/dag-consensus/reachability/src/reachability_service.rs b/consensus/dag-consensus/reachability/src/reachability_service.rs index 5a4cb37193..44beb8d6e0 100644 --- a/consensus/dag-consensus/reachability/src/reachability_service.rs +++ b/consensus/dag-consensus/reachability/src/reachability_service.rs @@ -1,6 +1,6 @@ use crate::{inquirer, Result}; use parking_lot::RwLock; -use starcoin_consensus::consensusdb::consensus::ReachabilityStoreReader; +use starcoin_consensus::consensusdb::schema::ReachabilityStoreReader; use starcoin_crypto::{HashValue as Hash, HashValue}; use starcoin_types::blockhash; use std::{ops::Deref, sync::Arc}; diff --git a/consensus/dag-consensus/reachability/src/reindex.rs b/consensus/dag-consensus/reachability/src/reindex.rs index 8ad4010527..56c5ee8028 100644 --- a/consensus/dag-consensus/reachability/src/reindex.rs +++ b/consensus/dag-consensus/reachability/src/reindex.rs @@ -1,7 +1,7 @@ use crate::{ extensions::ReachabilityStoreIntervalExtensions, inquirer::get_next_chain_ancestor_unchecked, *, }; -use starcoin_consensus::consensusdb::consensus::ReachabilityStore; +use starcoin_consensus::consensusdb::schema::ReachabilityStore; use starcoin_crypto::HashValue as Hash; use starcoin_types::{ blockhash::{BlockHashExtensions, BlockHashMap}, diff --git a/consensus/dag-consensus/reachability/src/relations_service.rs b/consensus/dag-consensus/reachability/src/relations_service.rs index 44b26f3cfa..2730407e0d 100644 --- a/consensus/dag-consensus/reachability/src/relations_service.rs +++ b/consensus/dag-consensus/reachability/src/relations_service.rs @@ -1,5 +1,5 @@ use parking_lot::RwLock; -use starcoin_consensus::consensusdb::{consensus::RelationsStoreReader, prelude::StoreError}; +use starcoin_consensus::consensusdb::{prelude::StoreError, schema::RelationsStoreReader}; use starcoin_crypto::HashValue as Hash; use starcoin_types::blockhash::BlockHashes; use std::sync::Arc; diff --git a/consensus/dag-consensus/reachability/src/tests.rs b/consensus/dag-consensus/reachability/src/tests.rs index 7b4c17017b..d6e3398257 100644 --- a/consensus/dag-consensus/reachability/src/tests.rs +++ b/consensus/dag-consensus/reachability/src/tests.rs @@ -3,8 +3,8 @@ //! use super::{inquirer::*, tree::*}; use starcoin_consensus::consensusdb::{ - consensus::{ReachabilityStore, ReachabilityStoreReader}, prelude::StoreError, + schema::{ReachabilityStore, ReachabilityStoreReader}, }; use starcoin_crypto::HashValue as Hash; use starcoin_types::{ diff --git a/consensus/dag-consensus/reachability/src/tree.rs b/consensus/dag-consensus/reachability/src/tree.rs index b29caa10b7..18a21ae8d8 100644 --- a/consensus/dag-consensus/reachability/src/tree.rs +++ b/consensus/dag-consensus/reachability/src/tree.rs @@ -5,7 +5,7 @@ use super::{ extensions::ReachabilityStoreIntervalExtensions, inquirer::*, reindex::ReindexOperationContext, *, }; -use starcoin_consensus::consensusdb::consensus::ReachabilityStore; +use starcoin_consensus::consensusdb::schema::ReachabilityStore; use starcoin_crypto::HashValue as Hash; /// Adds `new_block` as a child of `parent` in the tree structure. If this block diff --git a/consensus/dag-consensus/src/blockdag.rs b/consensus/dag-consensus/src/blockdag.rs index db6633d21e..ecb82dd28d 100644 --- a/consensus/dag-consensus/src/blockdag.rs +++ b/consensus/dag-consensus/src/blockdag.rs @@ -2,11 +2,11 @@ use anyhow::bail; use ghostdag::protocol::GhostdagManager; use parking_lot::RwLock; use reachability::{inquirer, reachability_service::MTReachabilityService}; -use starcoin_consensus::consensusdb::consensus::{ +use starcoin_consensus::consensusdb::prelude::FlexiDagStorage; +use starcoin_consensus::consensusdb::schema::{ DbGhostdagStore, DbHeadersStore, DbReachabilityStore, DbRelationsStore, GhostdagStore, HeaderStore, ReachabilityStoreReader, RelationsStore, RelationsStoreReader, }; -use starcoin_consensus::consensusdb::prelude::FlexiDagStorage; use starcoin_crypto::HashValue as Hash; use starcoin_types::{ blockhash::{BlockHashes, KType, ORIGIN}, diff --git a/consensus/src/consensusdb/db.rs b/consensus/src/consensusdb/db.rs index 0728f17e32..be6fc35cac 100644 --- a/consensus/src/consensusdb/db.rs +++ b/consensus/src/consensusdb/db.rs @@ -1,9 +1,9 @@ -use super::consensus::{ +use super::errors::StoreError; +use super::schema::{ DbGhostdagStore, DbHeadersStore, DbReachabilityStore, DbRelationsStore, CHILDREN_CF, COMPACT_GHOST_DAG_STORE_CF, COMPACT_HEADER_DATA_STORE_CF, GHOST_DAG_STORE_CF, HEADERS_STORE_CF, PARENTS_CF, REACHABILITY_DATA_CF, }; -use super::errors::StoreError; use starcoin_config::RocksdbConfig; pub(crate) use starcoin_storage::db_storage::DBStorage; use std::{path::Path, sync::Arc}; diff --git a/consensus/src/consensusdb/mod.rs b/consensus/src/consensusdb/mod.rs index f3d77a61e4..f15f665a74 100644 --- a/consensus/src/consensusdb/mod.rs +++ b/consensus/src/consensusdb/mod.rs @@ -22,7 +22,7 @@ pub mod prelude { pub use errors::{StoreError, StoreResult, StoreResultEmptyTuple, StoreResultExtensions}; } -pub mod consensus { +pub mod schema { pub use super::{ consensus_ghostdag::*, consensus_header::*, consensus_reachability::*, consensus_relations::*, From e57ff8e916e54c332969bbc3fff309736fd18301 Mon Sep 17 00:00:00 2001 From: simonjiao Date: Wed, 26 Jul 2023 20:03:47 +0800 Subject: [PATCH 13/30] downgrade ghostdag and reachability crates --- Cargo.toml | 2 -- consensus/dag-consensus/Cargo.toml | 5 ++--- consensus/dag-consensus/ghostdag/Cargo.toml | 17 ----------------- consensus/dag-consensus/reachability/Cargo.toml | 16 ---------------- consensus/dag-consensus/src/blockdag.rs | 4 ++-- .../{ghostdag/src => src/ghostdag}/mergeset.rs | 2 +- .../src/lib.rs => src/ghostdag/mod.rs} | 0 .../{ghostdag/src => src/ghostdag}/protocol.rs | 4 ++-- .../{ghostdag/src => src/ghostdag}/util.rs | 0 consensus/dag-consensus/src/lib.rs | 2 ++ .../src => src/reachability}/extensions.rs | 0 .../src => src/reachability}/inquirer.rs | 0 .../src/lib.rs => src/reachability/mod.rs} | 0 .../reachability}/reachability_service.rs | 2 +- .../src => src/reachability}/reindex.rs | 2 +- .../reachability}/relations_service.rs | 0 .../src => src/reachability}/tests.rs | 0 .../src => src/reachability}/tree.rs | 0 18 files changed, 11 insertions(+), 45 deletions(-) delete mode 100644 consensus/dag-consensus/ghostdag/Cargo.toml delete mode 100644 consensus/dag-consensus/reachability/Cargo.toml rename consensus/dag-consensus/{ghostdag/src => src/ghostdag}/mergeset.rs (97%) rename consensus/dag-consensus/{ghostdag/src/lib.rs => src/ghostdag/mod.rs} (100%) rename consensus/dag-consensus/{ghostdag/src => src/ghostdag}/protocol.rs (99%) rename consensus/dag-consensus/{ghostdag/src => src/ghostdag}/util.rs (100%) rename consensus/dag-consensus/{reachability/src => src/reachability}/extensions.rs (100%) rename consensus/dag-consensus/{reachability/src => src/reachability}/inquirer.rs (100%) rename consensus/dag-consensus/{reachability/src/lib.rs => src/reachability/mod.rs} (100%) rename consensus/dag-consensus/{reachability/src => src/reachability}/reachability_service.rs (99%) rename consensus/dag-consensus/{reachability/src => src/reachability}/reindex.rs (99%) rename consensus/dag-consensus/{reachability/src => src/reachability}/relations_service.rs (100%) rename consensus/dag-consensus/{reachability/src => src/reachability}/tests.rs (100%) rename consensus/dag-consensus/{reachability/src => src/reachability}/tree.rs (100%) diff --git a/Cargo.toml b/Cargo.toml index e654e651c0..426d46e45f 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -517,8 +517,6 @@ wasm-timer = "0.2" which = "4.1.0" zeroize = "1.3.0" dag-consensus = { path = "consensus/dag-consensus" } -ghostdag = { path = "consensus/dag-consensus/ghostdag" } -reachability = { path = "consensus/dag-consensus/reachability" } [profile.release.package] starcoin-service-registry.debug = 1 diff --git a/consensus/dag-consensus/Cargo.toml b/consensus/dag-consensus/Cargo.toml index c5c7a405ab..3aa0a9215f 100644 --- a/consensus/dag-consensus/Cargo.toml +++ b/consensus/dag-consensus/Cargo.toml @@ -6,10 +6,9 @@ edition.workspace = true # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] -ghostdag.workspace = true -reachability.workspace = true starcoin-consensus.workspace = true parking_lot.workspace = true starcoin-crypto.workspace = true starcoin-types.workspace = true -anyhow.workspace = true \ No newline at end of file +anyhow.workspace = true +thiserror.workspace = true \ No newline at end of file diff --git a/consensus/dag-consensus/ghostdag/Cargo.toml b/consensus/dag-consensus/ghostdag/Cargo.toml deleted file mode 100644 index 534ab72a4c..0000000000 --- a/consensus/dag-consensus/ghostdag/Cargo.toml +++ /dev/null @@ -1,17 +0,0 @@ -[package] -name = "ghostdag" -version = "1.13.5" -edition.workspace = true - -# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html - -[dependencies] -thiserror.workspace = true -starcoin-consensus.workspace = true -starcoin-crypto.workspace = true -starcoin-types.workspace = true -serde.workspace = true -itertools.workspace = true -parking_lot.workspace = true -rocksdb.workspace = true -reachability.workspace = true \ No newline at end of file diff --git a/consensus/dag-consensus/reachability/Cargo.toml b/consensus/dag-consensus/reachability/Cargo.toml deleted file mode 100644 index aa9e610917..0000000000 --- a/consensus/dag-consensus/reachability/Cargo.toml +++ /dev/null @@ -1,16 +0,0 @@ -[package] -name = "reachability" -version = "1.13.5" -edition.workspace = true -# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html - -[dependencies] -thiserror.workspace = true -starcoin-consensus.workspace = true -starcoin-crypto.workspace = true -starcoin-types.workspace = true -serde.workspace = true -itertools.workspace = true -parking_lot.workspace = true -rocksdb.workspace = true -starcoin-storage.workspace = true diff --git a/consensus/dag-consensus/src/blockdag.rs b/consensus/dag-consensus/src/blockdag.rs index ecb82dd28d..fcf574f7bf 100644 --- a/consensus/dag-consensus/src/blockdag.rs +++ b/consensus/dag-consensus/src/blockdag.rs @@ -1,7 +1,7 @@ +use super::ghostdag::protocol::GhostdagManager; +use super::reachability::{inquirer, reachability_service::MTReachabilityService}; use anyhow::bail; -use ghostdag::protocol::GhostdagManager; use parking_lot::RwLock; -use reachability::{inquirer, reachability_service::MTReachabilityService}; use starcoin_consensus::consensusdb::prelude::FlexiDagStorage; use starcoin_consensus::consensusdb::schema::{ DbGhostdagStore, DbHeadersStore, DbReachabilityStore, DbRelationsStore, GhostdagStore, diff --git a/consensus/dag-consensus/ghostdag/src/mergeset.rs b/consensus/dag-consensus/src/ghostdag/mergeset.rs similarity index 97% rename from consensus/dag-consensus/ghostdag/src/mergeset.rs rename to consensus/dag-consensus/src/ghostdag/mergeset.rs index 2607049d70..70247c9b9e 100644 --- a/consensus/dag-consensus/ghostdag/src/mergeset.rs +++ b/consensus/dag-consensus/src/ghostdag/mergeset.rs @@ -1,5 +1,5 @@ use super::protocol::GhostdagManager; -use reachability::reachability_service::ReachabilityService; +use crate::reachability::reachability_service::ReachabilityService; use starcoin_consensus::consensusdb::schema::{ GhostdagStoreReader, HeaderStoreReader, RelationsStoreReader, }; diff --git a/consensus/dag-consensus/ghostdag/src/lib.rs b/consensus/dag-consensus/src/ghostdag/mod.rs similarity index 100% rename from consensus/dag-consensus/ghostdag/src/lib.rs rename to consensus/dag-consensus/src/ghostdag/mod.rs diff --git a/consensus/dag-consensus/ghostdag/src/protocol.rs b/consensus/dag-consensus/src/ghostdag/protocol.rs similarity index 99% rename from consensus/dag-consensus/ghostdag/src/protocol.rs rename to consensus/dag-consensus/src/ghostdag/protocol.rs index 5c481c0dfa..c19f273169 100644 --- a/consensus/dag-consensus/ghostdag/src/protocol.rs +++ b/consensus/dag-consensus/src/ghostdag/protocol.rs @@ -1,5 +1,5 @@ -use crate::util::Refs; -use reachability::reachability_service::ReachabilityService; +use super::util::Refs; +use crate::reachability::reachability_service::ReachabilityService; use starcoin_consensus::consensusdb::schema::{ GhostdagStoreReader, HeaderStoreReader, RelationsStoreReader, }; diff --git a/consensus/dag-consensus/ghostdag/src/util.rs b/consensus/dag-consensus/src/ghostdag/util.rs similarity index 100% rename from consensus/dag-consensus/ghostdag/src/util.rs rename to consensus/dag-consensus/src/ghostdag/util.rs diff --git a/consensus/dag-consensus/src/lib.rs b/consensus/dag-consensus/src/lib.rs index 89210c01fe..a342a41e3b 100644 --- a/consensus/dag-consensus/src/lib.rs +++ b/consensus/dag-consensus/src/lib.rs @@ -1 +1,3 @@ pub mod blockdag; +mod ghostdag; +mod reachability; diff --git a/consensus/dag-consensus/reachability/src/extensions.rs b/consensus/dag-consensus/src/reachability/extensions.rs similarity index 100% rename from consensus/dag-consensus/reachability/src/extensions.rs rename to consensus/dag-consensus/src/reachability/extensions.rs diff --git a/consensus/dag-consensus/reachability/src/inquirer.rs b/consensus/dag-consensus/src/reachability/inquirer.rs similarity index 100% rename from consensus/dag-consensus/reachability/src/inquirer.rs rename to consensus/dag-consensus/src/reachability/inquirer.rs diff --git a/consensus/dag-consensus/reachability/src/lib.rs b/consensus/dag-consensus/src/reachability/mod.rs similarity index 100% rename from consensus/dag-consensus/reachability/src/lib.rs rename to consensus/dag-consensus/src/reachability/mod.rs diff --git a/consensus/dag-consensus/reachability/src/reachability_service.rs b/consensus/dag-consensus/src/reachability/reachability_service.rs similarity index 99% rename from consensus/dag-consensus/reachability/src/reachability_service.rs rename to consensus/dag-consensus/src/reachability/reachability_service.rs index 44beb8d6e0..368a75ec64 100644 --- a/consensus/dag-consensus/reachability/src/reachability_service.rs +++ b/consensus/dag-consensus/src/reachability/reachability_service.rs @@ -1,4 +1,4 @@ -use crate::{inquirer, Result}; +use super::{inquirer, Result}; use parking_lot::RwLock; use starcoin_consensus::consensusdb::schema::ReachabilityStoreReader; use starcoin_crypto::{HashValue as Hash, HashValue}; diff --git a/consensus/dag-consensus/reachability/src/reindex.rs b/consensus/dag-consensus/src/reachability/reindex.rs similarity index 99% rename from consensus/dag-consensus/reachability/src/reindex.rs rename to consensus/dag-consensus/src/reachability/reindex.rs index 56c5ee8028..45a3485852 100644 --- a/consensus/dag-consensus/reachability/src/reindex.rs +++ b/consensus/dag-consensus/src/reachability/reindex.rs @@ -1,4 +1,4 @@ -use crate::{ +use super::{ extensions::ReachabilityStoreIntervalExtensions, inquirer::get_next_chain_ancestor_unchecked, *, }; use starcoin_consensus::consensusdb::schema::ReachabilityStore; diff --git a/consensus/dag-consensus/reachability/src/relations_service.rs b/consensus/dag-consensus/src/reachability/relations_service.rs similarity index 100% rename from consensus/dag-consensus/reachability/src/relations_service.rs rename to consensus/dag-consensus/src/reachability/relations_service.rs diff --git a/consensus/dag-consensus/reachability/src/tests.rs b/consensus/dag-consensus/src/reachability/tests.rs similarity index 100% rename from consensus/dag-consensus/reachability/src/tests.rs rename to consensus/dag-consensus/src/reachability/tests.rs diff --git a/consensus/dag-consensus/reachability/src/tree.rs b/consensus/dag-consensus/src/reachability/tree.rs similarity index 100% rename from consensus/dag-consensus/reachability/src/tree.rs rename to consensus/dag-consensus/src/reachability/tree.rs From affce35e95600371e3c0561415450f8985f6bcd9 Mon Sep 17 00:00:00 2001 From: simonjiao Date: Wed, 26 Jul 2023 20:24:47 +0800 Subject: [PATCH 14/30] downgrade dag-consensus crate --- Cargo.lock | 44 ------------------- Cargo.toml | 1 - chain/Cargo.toml | 2 - chain/src/dag_chain.rs | 3 +- consensus/dag-consensus/Cargo.toml | 14 ------ .../src => src/dag}/blockdag.rs | 12 ++--- .../src => src/dag}/ghostdag/mergeset.rs | 6 +-- .../src => src/dag}/ghostdag/mod.rs | 0 .../src => src/dag}/ghostdag/protocol.rs | 6 +-- .../src => src/dag}/ghostdag/util.rs | 0 .../src/lib.rs => src/dag/mod.rs} | 0 .../dag}/reachability/extensions.rs | 2 +- .../src => src/dag}/reachability/inquirer.rs | 2 +- .../src => src/dag}/reachability/mod.rs | 2 +- .../dag}/reachability/reachability_service.rs | 2 +- .../src => src/dag}/reachability/reindex.rs | 2 +- .../dag}/reachability/relations_service.rs | 2 +- .../src => src/dag}/reachability/tests.rs | 2 +- .../src => src/dag}/reachability/tree.rs | 2 +- consensus/src/lib.rs | 5 ++- 20 files changed, 24 insertions(+), 85 deletions(-) delete mode 100644 consensus/dag-consensus/Cargo.toml rename consensus/{dag-consensus/src => src/dag}/blockdag.rs (96%) rename consensus/{dag-consensus/src => src/dag}/ghostdag/mergeset.rs (92%) rename consensus/{dag-consensus/src => src/dag}/ghostdag/mod.rs (100%) rename consensus/{dag-consensus/src => src/dag}/ghostdag/protocol.rs (98%) rename consensus/{dag-consensus/src => src/dag}/ghostdag/util.rs (100%) rename consensus/{dag-consensus/src/lib.rs => src/dag/mod.rs} (100%) rename consensus/{dag-consensus/src => src/dag}/reachability/extensions.rs (95%) rename consensus/{dag-consensus/src => src/dag}/reachability/inquirer.rs (99%) rename consensus/{dag-consensus/src => src/dag}/reachability/mod.rs (95%) rename consensus/{dag-consensus/src => src/dag}/reachability/reachability_service.rs (99%) rename consensus/{dag-consensus/src => src/dag}/reachability/reindex.rs (99%) rename consensus/{dag-consensus/src => src/dag}/reachability/relations_service.rs (91%) rename consensus/{dag-consensus/src => src/dag}/reachability/tests.rs (99%) rename consensus/{dag-consensus/src => src/dag}/reachability/tree.rs (98%) diff --git a/Cargo.lock b/Cargo.lock index b945420517..66f6b94850 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1844,19 +1844,6 @@ dependencies = [ "syn 1.0.107", ] -[[package]] -name = "dag-consensus" -version = "1.13.5" -dependencies = [ - "anyhow", - "ghostdag", - "parking_lot 0.12.1", - "reachability", - "starcoin-consensus", - "starcoin-crypto", - "starcoin-types", -] - [[package]] name = "darling" version = "0.9.0" @@ -3115,21 +3102,6 @@ dependencies = [ "textwrap 0.11.0", ] -[[package]] -name = "ghostdag" -version = "1.13.5" -dependencies = [ - "itertools", - "parking_lot 0.12.1", - "reachability", - "rocksdb", - "serde 1.0.152", - "starcoin-consensus", - "starcoin-crypto", - "starcoin-types", - "thiserror", -] - [[package]] name = "gimli" version = "0.27.2" @@ -7780,21 +7752,6 @@ dependencies = [ "rand_core 0.3.1", ] -[[package]] -name = "reachability" -version = "1.13.5" -dependencies = [ - "itertools", - "parking_lot 0.12.1", - "rocksdb", - "serde 1.0.152", - "starcoin-consensus", - "starcoin-crypto", - "starcoin-storage", - "starcoin-types", - "thiserror", -] - [[package]] name = "read-write-set" version = "0.1.0" @@ -9215,7 +9172,6 @@ dependencies = [ "anyhow", "bcs-ext", "clap 3.2.23", - "dag-consensus", "proptest", "proptest-derive", "rand 0.8.5", diff --git a/Cargo.toml b/Cargo.toml index 426d46e45f..48cc7c5481 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -516,7 +516,6 @@ walkdir = "2.3.1" wasm-timer = "0.2" which = "4.1.0" zeroize = "1.3.0" -dag-consensus = { path = "consensus/dag-consensus" } [profile.release.package] starcoin-service-registry.debug = 1 diff --git a/chain/Cargo.toml b/chain/Cargo.toml index c9b563639b..fc4f409341 100644 --- a/chain/Cargo.toml +++ b/chain/Cargo.toml @@ -23,7 +23,6 @@ starcoin-types = { package = "starcoin-types", workspace = true } starcoin-vm-types = { workspace = true } starcoin-storage = { workspace = true } thiserror = { workspace = true } -dag-consensus = { workspace = true } starcoin-network-rpc-api = { workspace = true } [dev-dependencies] @@ -41,7 +40,6 @@ stdlib = { workspace = true } stest = { workspace = true } test-helper = { workspace = true } tokio = { features = ["full"], workspace = true } -dag-consensus = { workspace = true } starcoin-network-rpc-api = { workspace = true } [features] diff --git a/chain/src/dag_chain.rs b/chain/src/dag_chain.rs index 729fa1bb55..386597b635 100644 --- a/chain/src/dag_chain.rs +++ b/chain/src/dag_chain.rs @@ -1,11 +1,10 @@ use std::sync::Arc; use anyhow::bail; -use dag_consensus::blockdag::BlockDAG; use starcoin_accumulator::Accumulator; use starcoin_accumulator::{node::AccumulatorStoreType, MerkleAccumulator}; use starcoin_config::NodeConfig; -use starcoin_consensus::consensusdb::prelude::{FlexiDagStorage, FlexiDagStorageConfig}; +use starcoin_consensus::{BlockDAG, FlexiDagStorage, FlexiDagStorageConfig}; use starcoin_crypto::HashValue; use starcoin_executor::VMMetrics; use starcoin_network_rpc_api::dag_protocol::{ diff --git a/consensus/dag-consensus/Cargo.toml b/consensus/dag-consensus/Cargo.toml deleted file mode 100644 index 3aa0a9215f..0000000000 --- a/consensus/dag-consensus/Cargo.toml +++ /dev/null @@ -1,14 +0,0 @@ -[package] -name = "dag-consensus" -version = "1.13.5" -edition.workspace = true - -# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html - -[dependencies] -starcoin-consensus.workspace = true -parking_lot.workspace = true -starcoin-crypto.workspace = true -starcoin-types.workspace = true -anyhow.workspace = true -thiserror.workspace = true \ No newline at end of file diff --git a/consensus/dag-consensus/src/blockdag.rs b/consensus/src/dag/blockdag.rs similarity index 96% rename from consensus/dag-consensus/src/blockdag.rs rename to consensus/src/dag/blockdag.rs index fcf574f7bf..23880c0424 100644 --- a/consensus/dag-consensus/src/blockdag.rs +++ b/consensus/src/dag/blockdag.rs @@ -1,12 +1,14 @@ use super::ghostdag::protocol::GhostdagManager; use super::reachability::{inquirer, reachability_service::MTReachabilityService}; +use crate::consensusdb::{ + prelude::FlexiDagStorage, + schema::{ + DbGhostdagStore, DbHeadersStore, DbReachabilityStore, DbRelationsStore, GhostdagStore, + HeaderStore, ReachabilityStoreReader, RelationsStore, RelationsStoreReader, + }, +}; use anyhow::bail; use parking_lot::RwLock; -use starcoin_consensus::consensusdb::prelude::FlexiDagStorage; -use starcoin_consensus::consensusdb::schema::{ - DbGhostdagStore, DbHeadersStore, DbReachabilityStore, DbRelationsStore, GhostdagStore, - HeaderStore, ReachabilityStoreReader, RelationsStore, RelationsStoreReader, -}; use starcoin_crypto::HashValue as Hash; use starcoin_types::{ blockhash::{BlockHashes, KType, ORIGIN}, diff --git a/consensus/dag-consensus/src/ghostdag/mergeset.rs b/consensus/src/dag/ghostdag/mergeset.rs similarity index 92% rename from consensus/dag-consensus/src/ghostdag/mergeset.rs rename to consensus/src/dag/ghostdag/mergeset.rs index 70247c9b9e..a674cd5a65 100644 --- a/consensus/dag-consensus/src/ghostdag/mergeset.rs +++ b/consensus/src/dag/ghostdag/mergeset.rs @@ -1,8 +1,6 @@ use super::protocol::GhostdagManager; -use crate::reachability::reachability_service::ReachabilityService; -use starcoin_consensus::consensusdb::schema::{ - GhostdagStoreReader, HeaderStoreReader, RelationsStoreReader, -}; +use crate::consensusdb::schema::{GhostdagStoreReader, HeaderStoreReader, RelationsStoreReader}; +use crate::dag::reachability::reachability_service::ReachabilityService; use starcoin_crypto::HashValue as Hash; use starcoin_types::blockhash::BlockHashSet; use std::collections::VecDeque; diff --git a/consensus/dag-consensus/src/ghostdag/mod.rs b/consensus/src/dag/ghostdag/mod.rs similarity index 100% rename from consensus/dag-consensus/src/ghostdag/mod.rs rename to consensus/src/dag/ghostdag/mod.rs diff --git a/consensus/dag-consensus/src/ghostdag/protocol.rs b/consensus/src/dag/ghostdag/protocol.rs similarity index 98% rename from consensus/dag-consensus/src/ghostdag/protocol.rs rename to consensus/src/dag/ghostdag/protocol.rs index c19f273169..6a4b27a39f 100644 --- a/consensus/dag-consensus/src/ghostdag/protocol.rs +++ b/consensus/src/dag/ghostdag/protocol.rs @@ -1,8 +1,6 @@ use super::util::Refs; -use crate::reachability::reachability_service::ReachabilityService; -use starcoin_consensus::consensusdb::schema::{ - GhostdagStoreReader, HeaderStoreReader, RelationsStoreReader, -}; +use crate::consensusdb::schema::{GhostdagStoreReader, HeaderStoreReader, RelationsStoreReader}; +use crate::dag::reachability::reachability_service::ReachabilityService; use starcoin_crypto::HashValue as Hash; use starcoin_types::{ blockhash::{ diff --git a/consensus/dag-consensus/src/ghostdag/util.rs b/consensus/src/dag/ghostdag/util.rs similarity index 100% rename from consensus/dag-consensus/src/ghostdag/util.rs rename to consensus/src/dag/ghostdag/util.rs diff --git a/consensus/dag-consensus/src/lib.rs b/consensus/src/dag/mod.rs similarity index 100% rename from consensus/dag-consensus/src/lib.rs rename to consensus/src/dag/mod.rs diff --git a/consensus/dag-consensus/src/reachability/extensions.rs b/consensus/src/dag/reachability/extensions.rs similarity index 95% rename from consensus/dag-consensus/src/reachability/extensions.rs rename to consensus/src/dag/reachability/extensions.rs index 7416b7fa68..c3d7d87adc 100644 --- a/consensus/dag-consensus/src/reachability/extensions.rs +++ b/consensus/src/dag/reachability/extensions.rs @@ -1,4 +1,4 @@ -use starcoin_consensus::consensusdb::{prelude::StoreResult, schema::ReachabilityStoreReader}; +use crate::consensusdb::{prelude::StoreResult, schema::ReachabilityStoreReader}; use starcoin_crypto::hash::HashValue as Hash; use starcoin_types::interval::Interval; diff --git a/consensus/dag-consensus/src/reachability/inquirer.rs b/consensus/src/dag/reachability/inquirer.rs similarity index 99% rename from consensus/dag-consensus/src/reachability/inquirer.rs rename to consensus/src/dag/reachability/inquirer.rs index a2c55d7884..00c56f00e8 100644 --- a/consensus/dag-consensus/src/reachability/inquirer.rs +++ b/consensus/src/dag/reachability/inquirer.rs @@ -1,5 +1,5 @@ use super::{tree::*, *}; -use starcoin_consensus::consensusdb::schema::{ReachabilityStore, ReachabilityStoreReader}; +use crate::consensusdb::schema::{ReachabilityStore, ReachabilityStoreReader}; use starcoin_crypto::HashValue as Hash; use starcoin_types::{blockhash, interval::Interval, perf}; diff --git a/consensus/dag-consensus/src/reachability/mod.rs b/consensus/src/dag/reachability/mod.rs similarity index 95% rename from consensus/dag-consensus/src/reachability/mod.rs rename to consensus/src/dag/reachability/mod.rs index b4c5938190..ceb2905b03 100644 --- a/consensus/dag-consensus/src/reachability/mod.rs +++ b/consensus/src/dag/reachability/mod.rs @@ -8,7 +8,7 @@ pub mod relations_service; mod tests; mod tree; -use starcoin_consensus::consensusdb::prelude::StoreError; +use crate::consensusdb::prelude::StoreError; use thiserror::Error; #[derive(Error, Debug)] diff --git a/consensus/dag-consensus/src/reachability/reachability_service.rs b/consensus/src/dag/reachability/reachability_service.rs similarity index 99% rename from consensus/dag-consensus/src/reachability/reachability_service.rs rename to consensus/src/dag/reachability/reachability_service.rs index 368a75ec64..ed38fffc6d 100644 --- a/consensus/dag-consensus/src/reachability/reachability_service.rs +++ b/consensus/src/dag/reachability/reachability_service.rs @@ -1,6 +1,6 @@ use super::{inquirer, Result}; +use crate::consensusdb::schema::ReachabilityStoreReader; use parking_lot::RwLock; -use starcoin_consensus::consensusdb::schema::ReachabilityStoreReader; use starcoin_crypto::{HashValue as Hash, HashValue}; use starcoin_types::blockhash; use std::{ops::Deref, sync::Arc}; diff --git a/consensus/dag-consensus/src/reachability/reindex.rs b/consensus/src/dag/reachability/reindex.rs similarity index 99% rename from consensus/dag-consensus/src/reachability/reindex.rs rename to consensus/src/dag/reachability/reindex.rs index 45a3485852..a24e9bc91d 100644 --- a/consensus/dag-consensus/src/reachability/reindex.rs +++ b/consensus/src/dag/reachability/reindex.rs @@ -1,7 +1,7 @@ use super::{ extensions::ReachabilityStoreIntervalExtensions, inquirer::get_next_chain_ancestor_unchecked, *, }; -use starcoin_consensus::consensusdb::schema::ReachabilityStore; +use crate::consensusdb::schema::ReachabilityStore; use starcoin_crypto::HashValue as Hash; use starcoin_types::{ blockhash::{BlockHashExtensions, BlockHashMap}, diff --git a/consensus/dag-consensus/src/reachability/relations_service.rs b/consensus/src/dag/reachability/relations_service.rs similarity index 91% rename from consensus/dag-consensus/src/reachability/relations_service.rs rename to consensus/src/dag/reachability/relations_service.rs index 2730407e0d..848391d2ee 100644 --- a/consensus/dag-consensus/src/reachability/relations_service.rs +++ b/consensus/src/dag/reachability/relations_service.rs @@ -1,5 +1,5 @@ +use crate::consensusdb::{prelude::StoreError, schema::RelationsStoreReader}; use parking_lot::RwLock; -use starcoin_consensus::consensusdb::{prelude::StoreError, schema::RelationsStoreReader}; use starcoin_crypto::HashValue as Hash; use starcoin_types::blockhash::BlockHashes; use std::sync::Arc; diff --git a/consensus/dag-consensus/src/reachability/tests.rs b/consensus/src/dag/reachability/tests.rs similarity index 99% rename from consensus/dag-consensus/src/reachability/tests.rs rename to consensus/src/dag/reachability/tests.rs index d6e3398257..b41dd799e5 100644 --- a/consensus/dag-consensus/src/reachability/tests.rs +++ b/consensus/src/dag/reachability/tests.rs @@ -2,7 +2,7 @@ //! Test utils for reachability //! use super::{inquirer::*, tree::*}; -use starcoin_consensus::consensusdb::{ +use crate::consensusdb::{ prelude::StoreError, schema::{ReachabilityStore, ReachabilityStoreReader}, }; diff --git a/consensus/dag-consensus/src/reachability/tree.rs b/consensus/src/dag/reachability/tree.rs similarity index 98% rename from consensus/dag-consensus/src/reachability/tree.rs rename to consensus/src/dag/reachability/tree.rs index 18a21ae8d8..cc8357dc18 100644 --- a/consensus/dag-consensus/src/reachability/tree.rs +++ b/consensus/src/dag/reachability/tree.rs @@ -5,7 +5,7 @@ use super::{ extensions::ReachabilityStoreIntervalExtensions, inquirer::*, reindex::ReindexOperationContext, *, }; -use starcoin_consensus::consensusdb::schema::ReachabilityStore; +use crate::consensusdb::schema::ReachabilityStore; use starcoin_crypto::HashValue as Hash; /// Adds `new_block` as a child of `parent` in the tree structure. If this block diff --git a/consensus/src/lib.rs b/consensus/src/lib.rs index 3c00eb74e9..731aa6e235 100644 --- a/consensus/src/lib.rs +++ b/consensus/src/lib.rs @@ -23,12 +23,15 @@ pub mod cn; mod consensus; #[cfg(test)] mod consensus_test; -pub mod consensusdb; +mod consensusdb; +mod dag; pub mod difficulty; pub mod dummy; pub mod keccak; pub use consensus::{Consensus, ConsensusVerifyError}; +pub use consensusdb::prelude::{FlexiDagStorage, FlexiDagStorageConfig}; +pub use dag::blockdag::BlockDAG; pub use starcoin_time_service::duration_since_epoch; pub fn target_to_difficulty(target: U256) -> U256 { From 73f240ada7ffdaeec533ef08f77747fac14544bd Mon Sep 17 00:00:00 2001 From: simonjiao Date: Wed, 26 Jul 2023 20:50:26 +0800 Subject: [PATCH 15/30] fix consensus tests --- Cargo.lock | 1 + consensus/Cargo.toml | 1 + consensus/src/consensusdb/consensus_relations.rs | 2 +- consensus/src/dag/blockdag.rs | 2 +- consensus/src/dag/reachability/inquirer.rs | 2 +- consensus/src/dag/reachability/reachability_service.rs | 4 ++-- consensus/src/dag/reachability/reindex.rs | 2 +- 7 files changed, 8 insertions(+), 6 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 66f6b94850..1bbf21a1fb 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -9426,6 +9426,7 @@ dependencies = [ "starcoin-types", "starcoin-vm-types", "stest", + "tempfile", "thiserror", ] diff --git a/consensus/Cargo.toml b/consensus/Cargo.toml index bcba969dd2..4bbd48dd3e 100644 --- a/consensus/Cargo.toml +++ b/consensus/Cargo.toml @@ -31,6 +31,7 @@ starcoin-config = { workspace = true } proptest = { workspace = true } proptest-derive = { workspace = true } stest = { workspace = true } +tempfile = { workspace = true } [features] default = [] diff --git a/consensus/src/consensusdb/consensus_relations.rs b/consensus/src/consensusdb/consensus_relations.rs index 9d55644009..ec1456ccf4 100644 --- a/consensus/src/consensusdb/consensus_relations.rs +++ b/consensus/src/consensusdb/consensus_relations.rs @@ -202,7 +202,7 @@ impl RelationsStore for MemoryRelationsStore { #[cfg(test)] mod tests { use super::*; - use crate::{ + use crate::consensusdb::{ db::{FlexiDagStorageConfig, RelationsStoreConfig}, prelude::FlexiDagStorage, }; diff --git a/consensus/src/dag/blockdag.rs b/consensus/src/dag/blockdag.rs index 23880c0424..5b8c359765 100644 --- a/consensus/src/dag/blockdag.rs +++ b/consensus/src/dag/blockdag.rs @@ -209,7 +209,7 @@ impl BlockDAG { #[cfg(test)] mod tests { use super::*; - use dag_database::prelude::{FlexiDagStorage, FlexiDagStorageConfig}; + use crate::consensusdb::prelude::{FlexiDagStorage, FlexiDagStorageConfig}; use starcoin_types::block::BlockHeader; use std::{env, fs}; #[test] diff --git a/consensus/src/dag/reachability/inquirer.rs b/consensus/src/dag/reachability/inquirer.rs index 00c56f00e8..e072070849 100644 --- a/consensus/src/dag/reachability/inquirer.rs +++ b/consensus/src/dag/reachability/inquirer.rs @@ -240,7 +240,7 @@ fn assert_hashes_ordered(store: &(impl ReachabilityStoreReader + ?Sized), ordere #[cfg(test)] mod tests { use super::{super::tests::*, *}; - use dag_database::consensus::MemoryReachabilityStore; + use crate::consensusdb::schema::MemoryReachabilityStore; use starcoin_types::blockhash::ORIGIN; #[test] diff --git a/consensus/src/dag/reachability/reachability_service.rs b/consensus/src/dag/reachability/reachability_service.rs index ed38fffc6d..f755d1ce92 100644 --- a/consensus/src/dag/reachability/reachability_service.rs +++ b/consensus/src/dag/reachability/reachability_service.rs @@ -228,8 +228,8 @@ impl Iterator for ForwardChainIterator { #[cfg(test)] mod tests { use super::*; - use crate::tests::TreeBuilder; - use dag_database::consensus::MemoryReachabilityStore; + use crate::consensusdb::schema::MemoryReachabilityStore; + use crate::dag::reachability::tests::TreeBuilder; use starcoin_types::interval::Interval; #[test] diff --git a/consensus/src/dag/reachability/reindex.rs b/consensus/src/dag/reachability/reindex.rs index a24e9bc91d..2cc7d9a946 100644 --- a/consensus/src/dag/reachability/reindex.rs +++ b/consensus/src/dag/reachability/reindex.rs @@ -571,7 +571,7 @@ fn split_children(children: &std::sync::Arc>, pivot: Hash) -> Result<( #[cfg(test)] mod tests { use super::{super::tests::*, *}; - use dag_database::consensus::{MemoryReachabilityStore, ReachabilityStoreReader}; + use crate::consensusdb::schema::{MemoryReachabilityStore, ReachabilityStoreReader}; use starcoin_types::{blockhash, interval::Interval}; #[test] From 221d453cae7969e9cc99fb8dcc81304a4fcf26c7 Mon Sep 17 00:00:00 2001 From: simonjiao Date: Wed, 26 Jul 2023 21:43:54 +0800 Subject: [PATCH 16/30] expose neccessary dag-consensus types --- consensus/src/consensusdb/consensus_ghostdag.rs | 10 ++++++---- consensus/src/consensusdb/consensus_reachability.rs | 7 ++----- consensus/src/dag/ghostdag/protocol.rs | 9 +++------ consensus/src/dag/mod.rs | 1 + consensus/src/dag/reachability/extensions.rs | 2 +- consensus/src/dag/reachability/inquirer.rs | 3 ++- consensus/src/dag/reachability/reachability_service.rs | 3 +-- consensus/src/dag/reachability/reindex.rs | 9 ++++----- consensus/src/dag/reachability/tests.rs | 7 ++----- {types/src => consensus/src/dag/types}/ghostdata.rs | 6 ++---- {types/src => consensus/src/dag/types}/interval.rs | 0 consensus/src/dag/types/mod.rs | 6 ++++++ {types/src => consensus/src/dag/types}/ordering.rs | 2 +- {types/src => consensus/src/dag/types}/perf.rs | 0 {types/src => consensus/src/dag/types}/reachability.rs | 3 ++- {types/src => consensus/src/dag/types}/trusted.rs | 2 +- types/src/lib.rs | 6 ------ 17 files changed, 34 insertions(+), 42 deletions(-) rename {types/src => consensus/src/dag/types}/ghostdata.rs (97%) rename {types/src => consensus/src/dag/types}/interval.rs (100%) create mode 100644 consensus/src/dag/types/mod.rs rename {types/src => consensus/src/dag/types}/ordering.rs (94%) rename {types/src => consensus/src/dag/types}/perf.rs (100%) rename {types/src => consensus/src/dag/types}/reachability.rs (88%) rename {types/src => consensus/src/dag/types}/trusted.rs (91%) diff --git a/consensus/src/consensusdb/consensus_ghostdag.rs b/consensus/src/consensusdb/consensus_ghostdag.rs index a67efe69f0..c66caaace0 100644 --- a/consensus/src/consensusdb/consensus_ghostdag.rs +++ b/consensus/src/consensusdb/consensus_ghostdag.rs @@ -4,16 +4,18 @@ use super::{ prelude::{CachedDbAccess, DirectDbWriter}, writer::BatchDbWriter, }; +use crate::dag::types::{ + ghostdata::{CompactGhostdagData, GhostdagData}, + ordering::SortableBlock, +}; use itertools::{ EitherOrBoth::{Both, Left, Right}, Itertools, }; use rocksdb::WriteBatch; use starcoin_crypto::HashValue as Hash; -use starcoin_types::{ - blockhash::{BlockHashMap, BlockHashes, BlockLevel, BlueWorkType, HashKTypeMap}, - ghostdata::{CompactGhostdagData, GhostdagData}, - ordering::SortableBlock, +use starcoin_types::blockhash::{ + BlockHashMap, BlockHashes, BlockLevel, BlueWorkType, HashKTypeMap, }; use std::{cell::RefCell, cmp, iter::once, sync::Arc}; diff --git a/consensus/src/consensusdb/consensus_reachability.rs b/consensus/src/consensusdb/consensus_reachability.rs index cdaf2d37dc..551606c11f 100644 --- a/consensus/src/consensusdb/consensus_reachability.rs +++ b/consensus/src/consensusdb/consensus_reachability.rs @@ -5,13 +5,10 @@ use super::{ use starcoin_crypto::HashValue as Hash; use starcoin_storage::storage::RawDBStorage; +use crate::dag::types::{interval::Interval, reachability::ReachabilityData}; use parking_lot::{RwLockUpgradableReadGuard, RwLockWriteGuard}; use rocksdb::WriteBatch; -use starcoin_types::{ - blockhash::{self, BlockHashMap, BlockHashes}, - interval::Interval, - reachability::ReachabilityData, -}; +use starcoin_types::blockhash::{self, BlockHashMap, BlockHashes}; use std::{collections::hash_map::Entry::Vacant, sync::Arc}; /// Reader API for `ReachabilityStore`. diff --git a/consensus/src/dag/ghostdag/protocol.rs b/consensus/src/dag/ghostdag/protocol.rs index 6a4b27a39f..812b245cd7 100644 --- a/consensus/src/dag/ghostdag/protocol.rs +++ b/consensus/src/dag/ghostdag/protocol.rs @@ -1,13 +1,10 @@ use super::util::Refs; use crate::consensusdb::schema::{GhostdagStoreReader, HeaderStoreReader, RelationsStoreReader}; use crate::dag::reachability::reachability_service::ReachabilityService; +use crate::dag::types::{ghostdata::GhostdagData, ordering::*}; use starcoin_crypto::HashValue as Hash; -use starcoin_types::{ - blockhash::{ - self, BlockHashExtensions, BlockHashMap, BlockHashes, BlueWorkType, HashKTypeMap, KType, - }, - ghostdata::GhostdagData, - ordering::*, +use starcoin_types::blockhash::{ + self, BlockHashExtensions, BlockHashMap, BlockHashes, BlueWorkType, HashKTypeMap, KType, }; use std::sync::Arc; // For GhostdagStoreReader-related functions, use GhostDagDataWrapper instead. diff --git a/consensus/src/dag/mod.rs b/consensus/src/dag/mod.rs index a342a41e3b..948ac57ec4 100644 --- a/consensus/src/dag/mod.rs +++ b/consensus/src/dag/mod.rs @@ -1,3 +1,4 @@ pub mod blockdag; mod ghostdag; mod reachability; +pub mod types; diff --git a/consensus/src/dag/reachability/extensions.rs b/consensus/src/dag/reachability/extensions.rs index c3d7d87adc..497fe2c4a5 100644 --- a/consensus/src/dag/reachability/extensions.rs +++ b/consensus/src/dag/reachability/extensions.rs @@ -1,6 +1,6 @@ use crate::consensusdb::{prelude::StoreResult, schema::ReachabilityStoreReader}; +use crate::dag::types::interval::Interval; use starcoin_crypto::hash::HashValue as Hash; -use starcoin_types::interval::Interval; pub(super) trait ReachabilityStoreIntervalExtensions { fn interval_children_capacity(&self, block: Hash) -> StoreResult; diff --git a/consensus/src/dag/reachability/inquirer.rs b/consensus/src/dag/reachability/inquirer.rs index e072070849..2638989b08 100644 --- a/consensus/src/dag/reachability/inquirer.rs +++ b/consensus/src/dag/reachability/inquirer.rs @@ -1,7 +1,8 @@ use super::{tree::*, *}; use crate::consensusdb::schema::{ReachabilityStore, ReachabilityStoreReader}; +use crate::dag::types::{interval::Interval, perf}; use starcoin_crypto::HashValue as Hash; -use starcoin_types::{blockhash, interval::Interval, perf}; +use starcoin_types::blockhash; /// Init the reachability store to match the state required by the algorithmic layer. /// The function first checks the store for possibly being initialized already. diff --git a/consensus/src/dag/reachability/reachability_service.rs b/consensus/src/dag/reachability/reachability_service.rs index f755d1ce92..20e8edeeed 100644 --- a/consensus/src/dag/reachability/reachability_service.rs +++ b/consensus/src/dag/reachability/reachability_service.rs @@ -229,8 +229,7 @@ impl Iterator for ForwardChainIterator { mod tests { use super::*; use crate::consensusdb::schema::MemoryReachabilityStore; - use crate::dag::reachability::tests::TreeBuilder; - use starcoin_types::interval::Interval; + use crate::dag::{reachability::tests::TreeBuilder, types::interval::Interval}; #[test] fn test_forward_iterator() { diff --git a/consensus/src/dag/reachability/reindex.rs b/consensus/src/dag/reachability/reindex.rs index 2cc7d9a946..c05c639d98 100644 --- a/consensus/src/dag/reachability/reindex.rs +++ b/consensus/src/dag/reachability/reindex.rs @@ -2,11 +2,9 @@ use super::{ extensions::ReachabilityStoreIntervalExtensions, inquirer::get_next_chain_ancestor_unchecked, *, }; use crate::consensusdb::schema::ReachabilityStore; +use crate::dag::types::interval::Interval; use starcoin_crypto::HashValue as Hash; -use starcoin_types::{ - blockhash::{BlockHashExtensions, BlockHashMap}, - interval::Interval, -}; +use starcoin_types::blockhash::{BlockHashExtensions, BlockHashMap}; use std::collections::VecDeque; /// A struct used during reindex operations. It represents a temporary context @@ -572,7 +570,8 @@ fn split_children(children: &std::sync::Arc>, pivot: Hash) -> Result<( mod tests { use super::{super::tests::*, *}; use crate::consensusdb::schema::{MemoryReachabilityStore, ReachabilityStoreReader}; - use starcoin_types::{blockhash, interval::Interval}; + use crate::dag::types::interval::Interval; + use starcoin_types::blockhash; #[test] fn test_count_subtrees() { diff --git a/consensus/src/dag/reachability/tests.rs b/consensus/src/dag/reachability/tests.rs index b41dd799e5..658cca74b5 100644 --- a/consensus/src/dag/reachability/tests.rs +++ b/consensus/src/dag/reachability/tests.rs @@ -6,12 +6,9 @@ use crate::consensusdb::{ prelude::StoreError, schema::{ReachabilityStore, ReachabilityStoreReader}, }; +use crate::dag::types::{interval::Interval, perf}; use starcoin_crypto::HashValue as Hash; -use starcoin_types::{ - blockhash::{BlockHashExtensions, BlockHashMap, BlockHashSet}, - interval::Interval, - perf, -}; +use starcoin_types::blockhash::{BlockHashExtensions, BlockHashMap, BlockHashSet}; use std::collections::VecDeque; use thiserror::Error; diff --git a/types/src/ghostdata.rs b/consensus/src/dag/types/ghostdata.rs similarity index 97% rename from types/src/ghostdata.rs rename to consensus/src/dag/types/ghostdata.rs index 02d1487dce..d11f630827 100644 --- a/types/src/ghostdata.rs +++ b/consensus/src/dag/types/ghostdata.rs @@ -1,9 +1,7 @@ -use crate::{ - blockhash::{BlockHashMap, BlockHashes, BlueWorkType, HashKTypeMap, KType}, - trusted::ExternalGhostdagData, -}; +use super::trusted::ExternalGhostdagData; use serde::{Deserialize, Serialize}; use starcoin_crypto::HashValue as Hash; +use starcoin_types::blockhash::{BlockHashMap, BlockHashes, BlueWorkType, HashKTypeMap, KType}; use std::sync::Arc; #[derive(Clone, Serialize, Deserialize, Default, Debug)] diff --git a/types/src/interval.rs b/consensus/src/dag/types/interval.rs similarity index 100% rename from types/src/interval.rs rename to consensus/src/dag/types/interval.rs diff --git a/consensus/src/dag/types/mod.rs b/consensus/src/dag/types/mod.rs new file mode 100644 index 0000000000..d3acae1c23 --- /dev/null +++ b/consensus/src/dag/types/mod.rs @@ -0,0 +1,6 @@ +pub mod ghostdata; +pub mod interval; +pub mod ordering; +pub mod perf; +pub mod reachability; +pub mod trusted; diff --git a/types/src/ordering.rs b/consensus/src/dag/types/ordering.rs similarity index 94% rename from types/src/ordering.rs rename to consensus/src/dag/types/ordering.rs index 1fd006defa..a1ed8c2561 100644 --- a/types/src/ordering.rs +++ b/consensus/src/dag/types/ordering.rs @@ -1,6 +1,6 @@ -use crate::blockhash::BlueWorkType; use serde::{Deserialize, Serialize}; use starcoin_crypto::HashValue as Hash; +use starcoin_types::blockhash::BlueWorkType; use std::cmp::Ordering; #[derive(Eq, Clone, Debug, Serialize, Deserialize)] diff --git a/types/src/perf.rs b/consensus/src/dag/types/perf.rs similarity index 100% rename from types/src/perf.rs rename to consensus/src/dag/types/perf.rs diff --git a/types/src/reachability.rs b/consensus/src/dag/types/reachability.rs similarity index 88% rename from types/src/reachability.rs rename to consensus/src/dag/types/reachability.rs index e79d485c17..62c84c3d6e 100644 --- a/types/src/reachability.rs +++ b/consensus/src/dag/types/reachability.rs @@ -1,6 +1,7 @@ -use crate::{blockhash::BlockHashes, interval::Interval}; +use super::interval::Interval; use serde::{Deserialize, Serialize}; use starcoin_crypto::HashValue as Hash; +use starcoin_types::blockhash::BlockHashes; use std::sync::Arc; #[derive(Clone, Serialize, Deserialize)] diff --git a/types/src/trusted.rs b/consensus/src/dag/types/trusted.rs similarity index 91% rename from types/src/trusted.rs rename to consensus/src/dag/types/trusted.rs index 213bfd50ef..9a4cf37bbd 100644 --- a/types/src/trusted.rs +++ b/consensus/src/dag/types/trusted.rs @@ -1,6 +1,6 @@ -use crate::blockhash::{BlockHashMap, BlueWorkType, KType}; use serde::{Deserialize, Serialize}; use starcoin_crypto::HashValue as Hash; +use starcoin_types::blockhash::{BlockHashMap, BlueWorkType, KType}; /// Represents semi-trusted externally provided Ghostdag data (by a network peer) #[derive(Clone, Serialize, Deserialize)] diff --git a/types/src/lib.rs b/types/src/lib.rs index f4a5488b01..9ff354a624 100644 --- a/types/src/lib.rs +++ b/types/src/lib.rs @@ -107,10 +107,4 @@ pub mod proof { } pub mod blockhash; -pub mod ghostdata; pub mod header; -pub mod interval; -pub mod ordering; -pub mod perf; -pub mod reachability; -pub mod trusted; From 727fcd5561a014fcdbb33550b1d0bef9220efbf8 Mon Sep 17 00:00:00 2001 From: jackzhhuang Date: Thu, 27 Jul 2023 10:34:29 +0800 Subject: [PATCH 17/30] add get dag accumulator details --- chain/api/src/service.rs | 40 +++++++++++++++++++++++++----- chain/service/src/chain_service.rs | 11 +++++--- network-rpc/api/src/lib.rs | 2 +- network-rpc/src/rpc.rs | 12 ++++----- 4 files changed, 48 insertions(+), 17 deletions(-) diff --git a/chain/api/src/service.rs b/chain/api/src/service.rs index 55b441a946..1529b22e28 100644 --- a/chain/api/src/service.rs +++ b/chain/api/src/service.rs @@ -140,7 +140,14 @@ pub trait ChainAsyncService: ) -> Result>; async fn get_block_infos(&self, hashes: Vec) -> Result>>; - async fn get_dag_accumulator_leaves(&self, req: dag_protocol::GetDagAccumulatorLeaves) -> Result>; + async fn get_dag_accumulator_leaves( + &self, + req: dag_protocol::GetDagAccumulatorLeaves, + ) -> Result>; + async fn get_dag_accumulator_leaves_detail( + &self, + req: dag_protocol::GetTargetDagAccumulatorLeafDetail, + ) -> Result>; } #[async_trait::async_trait] @@ -182,16 +189,37 @@ where } } - async fn get_dag_accumulator_leaves(&self, req: dag_protocol::GetDagAccumulatorLeaves) -> Result> { - if let ChainResponse::TargetDagAccumulatorLeaf(leaves) = - self.send(ChainRequest::GetDagAccumulatorLeaves { + async fn get_dag_accumulator_leaves( + &self, + req: dag_protocol::GetDagAccumulatorLeaves, + ) -> Result> { + if let ChainResponse::TargetDagAccumulatorLeaf(leaves) = self + .send(ChainRequest::GetDagAccumulatorLeaves { start_index: req.accumulator_leaf_index, batch_size: req.batch_size, - }).await?? + }) + .await?? { Ok(leaves) } else { - bail!("get_blocks response type error.") + bail!("get_dag_accumulator_leaves response type error.") + } + } + + async fn get_dag_accumulator_leaves_detail( + &self, + req: dag_protocol::GetTargetDagAccumulatorLeafDetail, + ) -> Result> { + if let ChainResponse::TargetDagAccumulatorLeafDetail(details) = self + .send(ChainRequest::GetTargetDagAccumulatorLeafDetail { + leaf_index: req.leaf_index, + batch_size: req.batch_size, + }) + .await?? + { + Ok(details) + } else { + bail!("get_dag_accumulator_leaves response type error.") } } diff --git a/chain/service/src/chain_service.rs b/chain/service/src/chain_service.rs index 6e0db0c4b9..8a551e0a14 100644 --- a/chain/service/src/chain_service.rs +++ b/chain/service/src/chain_service.rs @@ -11,7 +11,9 @@ use starcoin_chain_api::{ use starcoin_config::NodeConfig; use starcoin_crypto::HashValue; use starcoin_logger::prelude::*; -use starcoin_network_rpc_api::dag_protocol::{GetDagAccumulatorLeaves, GetTargetDagAccumulatorLeafDetail}; +use starcoin_network_rpc_api::dag_protocol::{ + GetDagAccumulatorLeaves, GetTargetDagAccumulatorLeafDetail, +}; use starcoin_service_registry::{ ActorService, EventHandler, ServiceContext, ServiceFactory, ServiceHandler, }; @@ -256,11 +258,12 @@ impl ServiceHandler for ChainReaderService { leaf_index, batch_size, } => Ok(ChainResponse::TargetDagAccumulatorLeafDetail( - self.dag_chain - .get_target_dag_accumulator_leaf_detail(GetTargetDagAccumulatorLeafDetail { + self.dag_chain.get_target_dag_accumulator_leaf_detail( + GetTargetDagAccumulatorLeafDetail { leaf_index, batch_size, - })?, + }, + )?, )), } } diff --git a/network-rpc/api/src/lib.rs b/network-rpc/api/src/lib.rs index 7dfb73bfe5..1dafb55f58 100644 --- a/network-rpc/api/src/lib.rs +++ b/network-rpc/api/src/lib.rs @@ -297,7 +297,7 @@ pub trait NetworkRpc: Sized + Send + Sync + 'static { &self, peer_id: PeerId, req: dag_protocol::GetTargetDagAccumulatorLeafDetail, - ) -> BoxFuture>>>; + ) -> BoxFuture>>; fn get_dag_block_info( &self, peer_id: PeerId, diff --git a/network-rpc/src/rpc.rs b/network-rpc/src/rpc.rs index e67c0578b5..5c80f47d9d 100644 --- a/network-rpc/src/rpc.rs +++ b/network-rpc/src/rpc.rs @@ -313,18 +313,18 @@ impl gen_server::NetworkRpc for NetworkRpcImpl { req: dag_protocol::GetDagAccumulatorLeaves, ) -> BoxFuture>> { let chain_service = self.chain_service.clone(); - let fut = async move { - chain_service.get_dag_accumulator_leaves(req).await - }; + let fut = async move { chain_service.get_dag_accumulator_leaves(req).await }; Box::pin(fut) } fn get_accumulator_leaf_detail( &self, _peer_id: PeerId, - _req: dag_protocol::GetTargetDagAccumulatorLeafDetail, - ) -> BoxFuture>>> { - todo!() + req: dag_protocol::GetTargetDagAccumulatorLeafDetail, + ) -> BoxFuture>> { + let chain_service = self.chain_service.clone(); + let fut = async move { chain_service.get_dag_accumulator_leaves_detail(req).await }; + Box::pin(fut) } fn get_dag_block_info( From 9568ba7df98e96dfe3318ab466f24e2228587e09 Mon Sep 17 00:00:00 2001 From: jackzhhuang Date: Thu, 27 Jul 2023 10:48:23 +0800 Subject: [PATCH 18/30] add verified client for dag --- sync/src/verified_rpc_client.rs | 24 ++++++++++++++++++++++++ 1 file changed, 24 insertions(+) diff --git a/sync/src/verified_rpc_client.rs b/sync/src/verified_rpc_client.rs index fc4bc6f8f5..8f28b79eaa 100644 --- a/sync/src/verified_rpc_client.rs +++ b/sync/src/verified_rpc_client.rs @@ -10,6 +10,7 @@ use starcoin_accumulator::node::AccumulatorStoreType; use starcoin_accumulator::AccumulatorNode; use starcoin_crypto::hash::HashValue; use starcoin_logger::prelude::*; +use starcoin_network_rpc_api::dag_protocol; use starcoin_network_rpc_api::{ gen_client::NetworkRpcClient, BlockBody, GetAccumulatorNodeByNodeHash, GetBlockHeadersByNumber, GetBlockIds, GetTxnsWithHash, RawRpcClient, @@ -411,4 +412,27 @@ impl VerifiedRpcClient { }) .collect()) } + + pub async fn get_dag_accumulator_leaves( + &self, + req: dag_protocol::GetDagAccumulatorLeaves, + ) -> Result> { + let peer_id = self.select_a_peer()?; + self.client.get_dag_accumulator_leaves (peer_id, req).await + } + + pub async fn get_accumulator_leaf_detail( + &self, + req: dag_protocol::GetTargetDagAccumulatorLeafDetail, + ) -> Result> { + let peer_id = self.select_a_peer()?; + self.client.get_accumulator_leaf_detail(peer_id, req).await + } + + pub async fn get_dag_block_info( + &self, + _req: dag_protocol::GetSyncDagBlockInfo, + ) -> Result>> { + todo!() + } } From ae324252b8ea978f4f5ee73f47c962317aa60148 Mon Sep 17 00:00:00 2001 From: jackzhhuang Date: Thu, 27 Jul 2023 11:04:03 +0800 Subject: [PATCH 19/30] add sync task for getting the leaves of an accumulator --- sync/src/tasks/mod.rs | 2 + sync/src/tasks/sync_dag_protocol_trait.rs | 27 ++++++ sync/src/tasks/sync_find_ancestor_task.rs | 113 ++++++++++++++++++++++ 3 files changed, 142 insertions(+) create mode 100644 sync/src/tasks/sync_dag_protocol_trait.rs create mode 100644 sync/src/tasks/sync_find_ancestor_task.rs diff --git a/sync/src/tasks/mod.rs b/sync/src/tasks/mod.rs index 1ed2424924..e49b24909d 100644 --- a/sync/src/tasks/mod.rs +++ b/sync/src/tasks/mod.rs @@ -506,6 +506,8 @@ mod accumulator_sync_task; mod block_sync_task; mod find_ancestor_task; mod inner_sync_task; +mod sync_dag_protocol_trait; +mod sync_find_ancestor_task; #[cfg(test)] pub(crate) mod mock; #[cfg(test)] diff --git a/sync/src/tasks/sync_dag_protocol_trait.rs b/sync/src/tasks/sync_dag_protocol_trait.rs new file mode 100644 index 0000000000..9eb7533075 --- /dev/null +++ b/sync/src/tasks/sync_dag_protocol_trait.rs @@ -0,0 +1,27 @@ +use anyhow::Result; +use futures::future::BoxFuture; +use network_p2p_core::PeerId; +use starcoin_network_rpc_api::dag_protocol::{TargetDagAccumulatorLeaf, TargetDagAccumulatorLeafDetail, SyncDagBlockInfo}; + +pub trait PeerSynDagAccumulator: Send + Sync { + fn get_sync_dag_asccumulator_leaves( + &self, + peer_id: Option, + leaf_index: u64, + batch_size: u64, + ) -> BoxFuture>>; + + fn get_accumulator_leaf_detail( + &self, + peer_id: Option, + leaf_index: u64, + batch_size: u64, + ) -> BoxFuture>>>; + + fn get_dag_block_info( + &self, + peer: Option, + leaf_index: u64, + batch_size: u64, + ) -> BoxFuture>>>; +} diff --git a/sync/src/tasks/sync_find_ancestor_task.rs b/sync/src/tasks/sync_find_ancestor_task.rs new file mode 100644 index 0000000000..e2be9e71dc --- /dev/null +++ b/sync/src/tasks/sync_find_ancestor_task.rs @@ -0,0 +1,113 @@ +use anyhow::{format_err, Result}; +use futures::{FutureExt, future::BoxFuture}; +use starcoin_accumulator::{accumulator_info::AccumulatorInfo, Accumulator, MerkleAccumulator}; +use starcoin_network_rpc_api::dag_protocol::TargetDagAccumulatorLeaf; +use starcoin_storage::{flexi_dag::SyncFlexiDagSnapshotStorage, storage::CodecKVStore}; +use std::sync::Arc; +use stream_task::{CollectorState, TaskResultCollector, TaskState}; + +use super::sync_dag_protocol_trait::PeerSynDagAccumulator; + +#[derive(Clone)] +pub struct FindAncestorTask { + start_leaf_number: u64, + fetcher: Arc, + batch_size: u64, +} +impl FindAncestorTask { + pub(crate) fn new( + current_leaf_numeber: u64, + target_leaf_numeber: u64, + fetcher: F, + ) -> Self where F: PeerSynDagAccumulator + 'static { + FindAncestorTask { + start_leaf_number: std::cmp::min(current_leaf_numeber, target_leaf_numeber), + fetcher: Arc::new(fetcher), + batch_size: 3, + } + } +} + +impl TaskState for FindAncestorTask { + type Item = TargetDagAccumulatorLeaf; + + fn new_sub_task(self) -> BoxFuture<'static, Result>> { + async move { + let current_number = self.start_leaf_number; + let target_accumulator_leaves = self + .fetcher + .get_sync_dag_asccumulator_leaves(None, self.start_leaf_number, self.batch_size) + .await?; + Ok(target_accumulator_leaves) + } + .boxed() + } + + fn next(&self) -> Option { + //this should never happen, because all node's genesis block should same. + if self.start_leaf_number == 0 { + return None; + } + + let next_number = self.start_leaf_number.saturating_sub(self.batch_size); + Some(Self { + start_leaf_number: next_number, + batch_size: self.batch_size, + fetcher: self.fetcher.clone(), + }) + } +} + +pub struct AncestorCollector { + accumulator: Arc, + ancestor: Option, + accumulator_snapshot: Arc, +} + +impl AncestorCollector { + pub fn new( + accumulator: Arc, + accumulator_snapshot: Arc, + ) -> Self { + Self { + accumulator, + ancestor: None, + accumulator_snapshot, + } + } +} + +impl TaskResultCollector for AncestorCollector { + type Output = AccumulatorInfo; + + fn collect(&mut self, item: TargetDagAccumulatorLeaf) -> anyhow::Result { + if self.ancestor.is_some() { + return Ok(CollectorState::Enough); + } + + let accumulator_leaf = self.accumulator.get_leaf(item.leaf_index)?.ok_or_else(|| { + format_err!( + "Cannot find accumulator leaf by number: {}", + item.leaf_index + ) + })?; + + let accumulator_info = match self.accumulator_snapshot.get(accumulator_leaf)? { + Some(snapshot) => snapshot.accumulator_info, + None => panic!("failed to get the snapshot, it is none."), + }; + + if item.accumulator_root == accumulator_info.accumulator_root { + self.ancestor = Some(accumulator_info); + return anyhow::Result::Ok(CollectorState::Enough); + } else { + Ok(CollectorState::Need) + } + } + + fn finish(mut self) -> Result { + self.ancestor + .take() + .ok_or_else(|| format_err!("Unexpect state, collector finished by ancestor is None")) + } +} From 7a448cf6227c41b445927a0a24c9c4d092390efd Mon Sep 17 00:00:00 2001 From: jackzhhuang Date: Thu, 27 Jul 2023 11:15:14 +0800 Subject: [PATCH 20/30] add sync dag accumulator task --- sync/src/tasks/mod.rs | 1 + sync/src/tasks/sync_dag_accumulator_task.rs | 154 ++++++++++++++++++++ 2 files changed, 155 insertions(+) create mode 100644 sync/src/tasks/sync_dag_accumulator_task.rs diff --git a/sync/src/tasks/mod.rs b/sync/src/tasks/mod.rs index e49b24909d..7299235c8d 100644 --- a/sync/src/tasks/mod.rs +++ b/sync/src/tasks/mod.rs @@ -508,6 +508,7 @@ mod find_ancestor_task; mod inner_sync_task; mod sync_dag_protocol_trait; mod sync_find_ancestor_task; +mod sync_dag_accumulator_task; #[cfg(test)] pub(crate) mod mock; #[cfg(test)] diff --git a/sync/src/tasks/sync_dag_accumulator_task.rs b/sync/src/tasks/sync_dag_accumulator_task.rs new file mode 100644 index 0000000000..116705b778 --- /dev/null +++ b/sync/src/tasks/sync_dag_accumulator_task.rs @@ -0,0 +1,154 @@ +use anyhow::{bail, ensure, format_err, Result}; +use bcs_ext::BCSCodec; +use futures::{FutureExt, future::BoxFuture}; +use starcoin_accumulator::{accumulator_info::AccumulatorInfo, Accumulator, MerkleAccumulator}; +use starcoin_crypto::HashValue; +use starcoin_network_rpc_api::dag_protocol::TargetDagAccumulatorLeafDetail; +use starcoin_storage::{ + flexi_dag::{SyncFlexiDagSnapshot, SyncFlexiDagSnapshotStorage}, + storage::CodecKVStore, +}; +use std::sync::Arc; +use stream_task::{CollectorState, TaskResultCollector, TaskState}; + +use super::sync_dag_protocol_trait::PeerSynDagAccumulator; + +#[derive(Clone)] +pub struct SyncDagAccumulatorTask { + leaf_index: u64, + batch_size: u64, + target_index: u64, + fetcher: Arc, +} +impl SyncDagAccumulatorTask { + pub fn new( + leaf_index: u64, + batch_size: u64, + target_index: u64, + fetcher: F, + ) -> Self where F: PeerSynDagAccumulator + 'static { + SyncDagAccumulatorTask { + leaf_index, + batch_size, + target_index, + fetcher: Arc::new(fetcher), + } + } +} + +impl TaskState for SyncDagAccumulatorTask { + type Item = TargetDagAccumulatorLeafDetail; + + fn new_sub_task(self) -> BoxFuture<'static, Result>> { + async move { + let target_details = match self + .fetcher + .get_accumulator_leaf_detail(None, self.leaf_index, self.batch_size) + .await? + { + Some(details) => details, + None => { + bail!("return None when sync accumulator for dag"); + } + }; + Ok(target_details) + } + .boxed() + } + + fn next(&self) -> Option { + //this should never happen, because all node's genesis block should same. + if self.leaf_index == 0 { + // it is genesis + return None; + } + + let next_number = self.leaf_index.saturating_add(self.batch_size); + if next_number > self.target_index - 1 { // genesis leaf doesn't need synchronization + return None; + } + Some(Self { + fetcher: self.fetcher.clone(), + leaf_index: next_number, + batch_size: self.batch_size, + target_index: self.target_index, + }) + } +} + +pub struct SyncDagAccumulatorCollector { + accumulator: MerkleAccumulator, + accumulator_snapshot: Arc, + target: AccumulatorInfo, + start_leaf_index: u64, +} + +impl SyncDagAccumulatorCollector { + pub fn new( + accumulator: MerkleAccumulator, + accumulator_snapshot: Arc, + target: AccumulatorInfo, + start_leaf_index: u64, + ) -> Self { + Self { + accumulator, + accumulator_snapshot, + target, + start_leaf_index, + } + } +} + +impl TaskResultCollector for SyncDagAccumulatorCollector { + type Output = (u64, MerkleAccumulator); + + fn collect(&mut self, mut item: TargetDagAccumulatorLeafDetail) -> anyhow::Result { + item.relationship_pair.sort(); + let accumulator_leaf = HashValue::sha3_256_of( + &item.relationship_pair + .encode() + .expect("encoding the sorted relatship set must be successful"), + ); + self.accumulator.append(&[accumulator_leaf])?; + println!("item: {}", item.relationship_pair.len()); + + let accumulator_info = self.accumulator.get_info(); + if accumulator_info.accumulator_root != item.accumulator_root { + bail!("sync occurs error for the accumulator root differs from other!, local {}, peer {}", accumulator_info.accumulator_root, item.accumulator_root) + } + self.accumulator.flush()?; + + let num_leaves = accumulator_info.num_leaves; + self.accumulator_snapshot.put( + accumulator_leaf, + SyncFlexiDagSnapshot { + child_hashes: item + .relationship_pair + .into_iter() + .map(|pair| pair.child) + .collect::>(), + accumulator_info, + }, + )?; + + if num_leaves == self.target.num_leaves { + Ok(CollectorState::Enough) + } else { + Ok(CollectorState::Need) + } + } + + fn finish(self) -> Result { + let accumulator_info = self.accumulator.get_info(); + + ensure!( + accumulator_info == self.target, + "local accumulator info: {:?}, peer's: {:?}", + accumulator_info, + self.target + ); + println!("finish to sync accumulator, its info is: {:?}", accumulator_info); + + Ok((self.start_leaf_index, self.accumulator)) + } +} From 6088b6040d500dc46677d513bc814e365e6e3d94 Mon Sep 17 00:00:00 2001 From: jackzhhuang Date: Thu, 27 Jul 2023 11:25:15 +0800 Subject: [PATCH 21/30] add dag block sync task --- sync/src/tasks/mod.rs | 7 +- sync/src/tasks/sync_dag_accumulator_task.rs | 34 ++++--- sync/src/tasks/sync_dag_block_task.rs | 102 ++++++++++++++++++++ sync/src/tasks/sync_dag_protocol_trait.rs | 4 +- sync/src/tasks/sync_find_ancestor_task.rs | 11 +-- sync/src/verified_rpc_client.rs | 2 +- 6 files changed, 137 insertions(+), 23 deletions(-) create mode 100644 sync/src/tasks/sync_dag_block_task.rs diff --git a/sync/src/tasks/mod.rs b/sync/src/tasks/mod.rs index 7299235c8d..2d6521a0e7 100644 --- a/sync/src/tasks/mod.rs +++ b/sync/src/tasks/mod.rs @@ -506,11 +506,12 @@ mod accumulator_sync_task; mod block_sync_task; mod find_ancestor_task; mod inner_sync_task; -mod sync_dag_protocol_trait; -mod sync_find_ancestor_task; -mod sync_dag_accumulator_task; #[cfg(test)] pub(crate) mod mock; +mod sync_dag_accumulator_task; +mod sync_dag_block_task; +mod sync_dag_protocol_trait; +mod sync_find_ancestor_task; #[cfg(test)] mod tests; diff --git a/sync/src/tasks/sync_dag_accumulator_task.rs b/sync/src/tasks/sync_dag_accumulator_task.rs index 116705b778..9baedd3607 100644 --- a/sync/src/tasks/sync_dag_accumulator_task.rs +++ b/sync/src/tasks/sync_dag_accumulator_task.rs @@ -1,6 +1,6 @@ use anyhow::{bail, ensure, format_err, Result}; use bcs_ext::BCSCodec; -use futures::{FutureExt, future::BoxFuture}; +use futures::{future::BoxFuture, FutureExt}; use starcoin_accumulator::{accumulator_info::AccumulatorInfo, Accumulator, MerkleAccumulator}; use starcoin_crypto::HashValue; use starcoin_network_rpc_api::dag_protocol::TargetDagAccumulatorLeafDetail; @@ -21,12 +21,10 @@ pub struct SyncDagAccumulatorTask { fetcher: Arc, } impl SyncDagAccumulatorTask { - pub fn new( - leaf_index: u64, - batch_size: u64, - target_index: u64, - fetcher: F, - ) -> Self where F: PeerSynDagAccumulator + 'static { + pub fn new(leaf_index: u64, batch_size: u64, target_index: u64, fetcher: F) -> Self + where + F: PeerSynDagAccumulator + 'static, + { SyncDagAccumulatorTask { leaf_index, batch_size, @@ -64,7 +62,8 @@ impl TaskState for SyncDagAccumulatorTask { } let next_number = self.leaf_index.saturating_add(self.batch_size); - if next_number > self.target_index - 1 { // genesis leaf doesn't need synchronization + if next_number > self.target_index - 1 { + // genesis leaf doesn't need synchronization return None; } Some(Self { @@ -102,10 +101,14 @@ impl SyncDagAccumulatorCollector { impl TaskResultCollector for SyncDagAccumulatorCollector { type Output = (u64, MerkleAccumulator); - fn collect(&mut self, mut item: TargetDagAccumulatorLeafDetail) -> anyhow::Result { + fn collect( + &mut self, + mut item: TargetDagAccumulatorLeafDetail, + ) -> anyhow::Result { item.relationship_pair.sort(); let accumulator_leaf = HashValue::sha3_256_of( - &item.relationship_pair + &item + .relationship_pair .encode() .expect("encoding the sorted relatship set must be successful"), ); @@ -114,7 +117,11 @@ impl TaskResultCollector for SyncDagAccumulatorC let accumulator_info = self.accumulator.get_info(); if accumulator_info.accumulator_root != item.accumulator_root { - bail!("sync occurs error for the accumulator root differs from other!, local {}, peer {}", accumulator_info.accumulator_root, item.accumulator_root) + bail!( + "sync occurs error for the accumulator root differs from other!, local {}, peer {}", + accumulator_info.accumulator_root, + item.accumulator_root + ) } self.accumulator.flush()?; @@ -147,7 +154,10 @@ impl TaskResultCollector for SyncDagAccumulatorC accumulator_info, self.target ); - println!("finish to sync accumulator, its info is: {:?}", accumulator_info); + println!( + "finish to sync accumulator, its info is: {:?}", + accumulator_info + ); Ok((self.start_leaf_index, self.accumulator)) } diff --git a/sync/src/tasks/sync_dag_block_task.rs b/sync/src/tasks/sync_dag_block_task.rs new file mode 100644 index 0000000000..6ff0ec6801 --- /dev/null +++ b/sync/src/tasks/sync_dag_block_task.rs @@ -0,0 +1,102 @@ +use anyhow::{bail, ensure, format_err, Ok, Result}; +use bcs_ext::BCSCodec; +use futures::{future::BoxFuture, FutureExt}; +use starcoin_accumulator::{accumulator_info::AccumulatorInfo, Accumulator, MerkleAccumulator}; +use starcoin_crypto::HashValue; +use starcoin_network_rpc_api::dag_protocol::SyncDagBlockInfo; +use starcoin_storage::{ + flexi_dag::{SyncFlexiDagSnapshot, SyncFlexiDagSnapshotStorage}, + storage::CodecKVStore, +}; +use std::sync::Arc; +use stream_task::{CollectorState, TaskResultCollector, TaskState}; + +use super::sync_dag_protocol_trait::PeerSynDagAccumulator; + +#[derive(Clone)] +pub struct SyncDagBlockTask { + accumulator: Arc, + start_index: u64, + batch_size: u64, + target: AccumulatorInfo, + fetcher: Arc, +} +impl SyncDagBlockTask { + pub fn new( + accumulator: Arc, + start_index: u64, + batch_size: u64, + target: AccumulatorInfo, + fetcher: F, + ) -> Self + where + F: PeerSynDagAccumulator + 'static, + { + SyncDagBlockTask { + accumulator, + start_index, + batch_size, + target, + fetcher: Arc::new(fetcher), + } + } +} + +impl TaskState for SyncDagBlockTask { + type Item = SyncDagBlockInfo; + + fn new_sub_task(self) -> BoxFuture<'static, Result>> { + async move { + let dag_info: Vec = match self + .fetcher + .get_dag_block_info(None, self.start_index, self.batch_size) + .await + { + anyhow::Result::Ok(result) => result.unwrap_or_else(|| { + println!("failed to get the sync dag block info, result is None"); + [].to_vec() + }), + Err(error) => { + println!("failed to get the sync dag block info, error: {:?}", error); + [].to_vec() + } + }; + Ok(dag_info) + } + .boxed() + } + + fn next(&self) -> Option { + let next_number = self.start_index.saturating_add(self.batch_size); + if next_number > self.target.num_leaves { + return None; + } + Some(Self { + accumulator: self.accumulator.clone(), + start_index: next_number, + batch_size: self.batch_size, + target: self.target.clone(), + fetcher: self.fetcher.clone(), + }) + } +} + +pub struct SyncDagBlockCollector {} + +impl SyncDagBlockCollector { + pub fn new() -> Self { + Self {} + } +} + +impl TaskResultCollector for SyncDagBlockCollector { + type Output = (); + + fn collect(&mut self, mut item: SyncDagBlockInfo) -> anyhow::Result { + Ok(CollectorState::Enough) + } + + fn finish(self) -> Result { + Ok(()) + } +} diff --git a/sync/src/tasks/sync_dag_protocol_trait.rs b/sync/src/tasks/sync_dag_protocol_trait.rs index 9eb7533075..78b2093c7a 100644 --- a/sync/src/tasks/sync_dag_protocol_trait.rs +++ b/sync/src/tasks/sync_dag_protocol_trait.rs @@ -1,7 +1,9 @@ use anyhow::Result; use futures::future::BoxFuture; use network_p2p_core::PeerId; -use starcoin_network_rpc_api::dag_protocol::{TargetDagAccumulatorLeaf, TargetDagAccumulatorLeafDetail, SyncDagBlockInfo}; +use starcoin_network_rpc_api::dag_protocol::{ + SyncDagBlockInfo, TargetDagAccumulatorLeaf, TargetDagAccumulatorLeafDetail, +}; pub trait PeerSynDagAccumulator: Send + Sync { fn get_sync_dag_asccumulator_leaves( diff --git a/sync/src/tasks/sync_find_ancestor_task.rs b/sync/src/tasks/sync_find_ancestor_task.rs index e2be9e71dc..fbf648152e 100644 --- a/sync/src/tasks/sync_find_ancestor_task.rs +++ b/sync/src/tasks/sync_find_ancestor_task.rs @@ -1,5 +1,5 @@ use anyhow::{format_err, Result}; -use futures::{FutureExt, future::BoxFuture}; +use futures::{future::BoxFuture, FutureExt}; use starcoin_accumulator::{accumulator_info::AccumulatorInfo, Accumulator, MerkleAccumulator}; use starcoin_network_rpc_api::dag_protocol::TargetDagAccumulatorLeaf; use starcoin_storage::{flexi_dag::SyncFlexiDagSnapshotStorage, storage::CodecKVStore}; @@ -15,11 +15,10 @@ pub struct FindAncestorTask { batch_size: u64, } impl FindAncestorTask { - pub(crate) fn new( - current_leaf_numeber: u64, - target_leaf_numeber: u64, - fetcher: F, - ) -> Self where F: PeerSynDagAccumulator + 'static { + pub(crate) fn new(current_leaf_numeber: u64, target_leaf_numeber: u64, fetcher: F) -> Self + where + F: PeerSynDagAccumulator + 'static, + { FindAncestorTask { start_leaf_number: std::cmp::min(current_leaf_numeber, target_leaf_numeber), fetcher: Arc::new(fetcher), diff --git a/sync/src/verified_rpc_client.rs b/sync/src/verified_rpc_client.rs index 8f28b79eaa..ff4fc0d5d4 100644 --- a/sync/src/verified_rpc_client.rs +++ b/sync/src/verified_rpc_client.rs @@ -418,7 +418,7 @@ impl VerifiedRpcClient { req: dag_protocol::GetDagAccumulatorLeaves, ) -> Result> { let peer_id = self.select_a_peer()?; - self.client.get_dag_accumulator_leaves (peer_id, req).await + self.client.get_dag_accumulator_leaves(peer_id, req).await } pub async fn get_accumulator_leaf_detail( From b740bef2f45753bafeabbb99b251fe4aa9c43ade Mon Sep 17 00:00:00 2001 From: jackzhhuang Date: Thu, 27 Jul 2023 18:52:48 +0800 Subject: [PATCH 22/30] add chain info v2 including original chain info --- cmd/peer-watcher/src/lib.rs | 4 +- cmd/peer-watcher/src/main.rs | 8 +-- genesis/src/lib.rs | 12 ++-- network/api/src/tests.rs | 10 ++-- network/src/network_p2p_handle.rs | 18 +++--- network/src/service.rs | 22 +++---- network/src/worker.rs | 6 +- network/types/src/peer_info.rs | 24 ++++---- rpc/api/src/types.rs | 2 +- storage/src/lib.rs | 39 +++++++++++-- sync/src/sync.rs | 7 ++- sync/src/tasks/mod.rs | 28 +++++++-- sync/src/tasks/sync_dag_accumulator_task.rs | 2 +- sync/src/tasks/sync_dag_block_task.rs | 6 +- test-helper/src/network.rs | 14 +++-- types/src/startup_info.rs | 63 +++++++++++++++++++++ 16 files changed, 192 insertions(+), 73 deletions(-) diff --git a/cmd/peer-watcher/src/lib.rs b/cmd/peer-watcher/src/lib.rs index 0defa9ba3e..649845b6da 100644 --- a/cmd/peer-watcher/src/lib.rs +++ b/cmd/peer-watcher/src/lib.rs @@ -9,6 +9,7 @@ use starcoin_network::network_p2p_handle::Networkp2pHandle; use starcoin_network::{build_network_worker, NotificationMessage}; use starcoin_storage::storage::StorageInstance; use starcoin_storage::Storage; +use starcoin_types::startup_info::ChainInfoV2; use std::sync::Arc; pub fn build_lighting_network( @@ -18,9 +19,10 @@ pub fn build_lighting_network( let genesis = starcoin_genesis::Genesis::load_or_build(net)?; let storage = Arc::new(Storage::new(StorageInstance::new_cache_instance())?); let chain_info = genesis.execute_genesis_block(net, storage)?; + let chain_info_v2 = ChainInfoV2::new(chain_info.chain_id(), chain_info.genesis_hash(), chain_info.status().clone()); build_network_worker( network_config, - chain_info, + chain_info_v2, NotificationMessage::protocols(), None, None, diff --git a/cmd/peer-watcher/src/main.rs b/cmd/peer-watcher/src/main.rs index 7d455f311a..fd2fc5b639 100644 --- a/cmd/peer-watcher/src/main.rs +++ b/cmd/peer-watcher/src/main.rs @@ -8,7 +8,7 @@ use network_p2p::Event; use network_types::peer_info::PeerInfo; use starcoin_config::{NodeConfig, StarcoinOpt}; use starcoin_peer_watcher::build_lighting_network; -use starcoin_types::startup_info::ChainInfo; +use starcoin_types::startup_info::ChainInfoV2; /// A lighting node, connect to peer to peer network, and monitor peers. fn main() { @@ -31,10 +31,10 @@ fn main() { notif_protocols, rpc_protocols, version_string, - } => match ChainInfo::decode(&generic_data) { - Ok(chain_info) => Some(PeerInfo::new( + } => match ChainInfoV2::decode(&generic_data) { + Ok(chain_info_v2) => Some(PeerInfo::new( remote.into(), - chain_info, + chain_info_v2, notif_protocols, rpc_protocols, version_string, diff --git a/genesis/src/lib.rs b/genesis/src/lib.rs index 18df6cfa6b..6177a8ebc9 100644 --- a/genesis/src/lib.rs +++ b/genesis/src/lib.rs @@ -253,9 +253,9 @@ impl Genesis { )?; let startup_info = StartupInfo::new(genesis_chain.current_header().id()); storage.save_startup_info(startup_info)?; - storage + Ok(storage .get_chain_info()? - .ok_or_else(|| format_err!("ChainInfo should exist after genesis block executed.")) + .ok_or_else(|| format_err!("ChainInfo should exist after genesis block executed."))?.chain_info) } pub fn save

(&self, data_dir: P) -> Result<()> @@ -308,13 +308,13 @@ impl Genesis { ) -> Result<(ChainInfo, Genesis)> { debug!("load startup_info."); let (chain_info, genesis) = match storage.get_chain_info() { - Ok(Some(chain_info)) => { - debug!("Get chain info {:?} from db", chain_info); + Ok(Some(chain_info_v2)) => { + debug!("Get chain info {:?} from db", chain_info_v2); info!("Check genesis file."); let genesis = Self::load_and_check_genesis(net, data_dir, false)?; match storage.get_block(genesis.block().header().id()) { Ok(Some(block)) => { - if *genesis.block() == block && chain_info.genesis_hash() == block.id() { + if *genesis.block() == block && chain_info_v2.chain_info.genesis_hash() == block.id() { info!("Check genesis db block ok!"); } else { return Err(GenesisError::GenesisVersionMismatch { @@ -329,7 +329,7 @@ impl Genesis { } Err(e) => return Err(GenesisError::GenesisLoadFailure(e).into()), } - (chain_info, genesis) + (chain_info_v2.chain_info, genesis) } Ok(None) => { let genesis = Self::load_and_check_genesis(net, data_dir, true)?; diff --git a/network/api/src/tests.rs b/network/api/src/tests.rs index 801277064e..423653b45c 100644 --- a/network/api/src/tests.rs +++ b/network/api/src/tests.rs @@ -7,7 +7,7 @@ use network_p2p_types::peer_id::PeerId; use network_types::peer_info::PeerInfo; use starcoin_crypto::HashValue; use starcoin_logger::prelude::*; -use starcoin_types::startup_info::{ChainInfo, ChainStatus}; +use starcoin_types::startup_info::{ChainInfoV2, ChainStatus}; use starcoin_types::U256; #[test] @@ -34,28 +34,28 @@ fn test_peer_selector() { let peers = vec![ PeerInfo::new( PeerId::random(), - ChainInfo::new(1.into(), HashValue::zero(), mock_chain_status(100.into())), + ChainInfoV2::new(1.into(), HashValue::zero(), mock_chain_status(100.into())), vec![], vec![], None, ), PeerInfo::new( PeerId::random(), - ChainInfo::new(1.into(), HashValue::zero(), mock_chain_status(99.into())), + ChainInfoV2::new(1.into(), HashValue::zero(), mock_chain_status(99.into())), vec![], vec![], None, ), PeerInfo::new( PeerId::random(), - ChainInfo::new(1.into(), HashValue::zero(), mock_chain_status(100.into())), + ChainInfoV2::new(1.into(), HashValue::zero(), mock_chain_status(100.into())), vec![], vec![], None, ), PeerInfo::new( PeerId::random(), - ChainInfo::new(1.into(), HashValue::zero(), mock_chain_status(1.into())), + ChainInfoV2::new(1.into(), HashValue::zero(), mock_chain_status(1.into())), vec![], vec![], None, diff --git a/network/src/network_p2p_handle.rs b/network/src/network_p2p_handle.rs index 0c58124c82..197aea58c1 100644 --- a/network/src/network_p2p_handle.rs +++ b/network/src/network_p2p_handle.rs @@ -10,10 +10,10 @@ use network_p2p::business_layer_handle::HandshakeResult; use network_p2p::{business_layer_handle::BusinessLayerHandle, protocol::rep, PeerId}; use sc_peerset::ReputationChange; use serde::{Deserialize, Serialize}; -use starcoin_types::startup_info::{ChainInfo, ChainStatus}; +use starcoin_types::startup_info::{ChainInfoV2, ChainStatus}; /// Current protocol version. -pub(crate) const CURRENT_VERSION: u32 = 5; +pub(crate) const CURRENT_VERSION: u32 = 6; /// Lowest version we support pub(crate) const MIN_VERSION: u32 = 3; @@ -29,7 +29,7 @@ pub struct Status { /// Tell other peer which rpc api we support. pub rpc_protocols: Vec>, /// the generic data related to the peer - pub info: ChainInfo, + pub info: ChainInfoV2, } pub struct Networkp2pHandle { @@ -37,7 +37,7 @@ pub struct Networkp2pHandle { } impl Networkp2pHandle { - pub fn new(chain_info: ChainInfo) -> Self { + pub fn new(chain_info: ChainInfoV2) -> Self { let status = Status { version: CURRENT_VERSION, min_supported_version: MIN_VERSION, @@ -56,13 +56,13 @@ impl Networkp2pHandle { status: Status, ) -> Result { debug!(target: "network-p2p", "New peer {} {:?}", who, status); - if status.info.genesis_hash() != self.status.info.genesis_hash() { + if status.info.chain_info.genesis_hash() != self.status.info.chain_info.genesis_hash() { error!( target: "network-p2p", "Bootnode with peer id `{}` is on a different chain (our genesis: {} theirs: {})", who, - self.status.info.genesis_hash(), - status.info.genesis_hash(), + self.status.info.chain_info.genesis_hash(), + status.info.chain_info.genesis_hash(), ); return Err(rep::GENESIS_MISMATCH); } @@ -108,7 +108,7 @@ impl BusinessLayerHandle for Networkp2pHandle { } fn update_generic_data(&mut self, peer_info: &[u8]) -> Result<(), anyhow::Error> { - match ChainInfo::decode(peer_info) { + match ChainInfoV2::decode(peer_info) { std::result::Result::Ok(other_chain_info) => { self.status.info = other_chain_info; Ok(()) @@ -125,7 +125,7 @@ impl BusinessLayerHandle for Networkp2pHandle { fn update_status(&mut self, peer_status: &[u8]) -> Result<(), anyhow::Error> { match ChainStatus::decode(peer_status) { std::result::Result::Ok(status) => { - self.status.info.update_status(status); + self.status.info.chain_info.update_status(status); Ok(()) } Err(error) => { diff --git a/network/src/service.rs b/network/src/service.rs index 800de2f9e6..3d4519a2d6 100644 --- a/network/src/service.rs +++ b/network/src/service.rs @@ -30,7 +30,7 @@ use starcoin_service_registry::{ ActorService, EventHandler, ServiceContext, ServiceHandler, ServiceRef, ServiceRequest, }; use starcoin_txpool_api::PropagateTransactions; -use starcoin_types::startup_info::{ChainInfo, ChainStatus}; +use starcoin_types::startup_info::{ChainInfoV2, ChainStatus}; use starcoin_types::sync_status::SyncStatus; use starcoin_types::system_events::SyncStatusChangeEvent; use std::borrow::Cow; @@ -56,7 +56,7 @@ impl NetworkActor for NetworkActorService {} impl NetworkActorService { pub fn new( config: Arc, - chain_info: ChainInfo, + chain_info_v2: ChainInfoV2, rpc: Option<(RpcInfo, ServiceRef)>, peer_message_handler: H, ) -> Result @@ -65,7 +65,7 @@ impl NetworkActorService { { let (self_info, worker) = build_network_worker( &config.network, - chain_info, + chain_info_v2, config.network.supported_network_protocols(), rpc, config.metrics.registry().cloned(), @@ -186,11 +186,11 @@ impl EventHandler for NetworkActorService { "Connected peer {:?}, protocol: {}, notif_protocols: {:?}, rpc_protocols: {:?}", remote, protocol, notif_protocols, rpc_protocols ); - let info = match ChainInfo::decode(&generic_data) { + let info = match ChainInfoV2::decode(&generic_data) { Ok(data) => data, Err(_) => return, }; - if info.chain_id().is_barnard() { + if info.chain_info.chain_id().is_barnard() { info!("Connected peer ver_string {:?}", version_string); if let Some(ref ver_str) = version_string { if !ver_str.contains(BARNARD_HARD_FORK_PEER_VERSION_STRING_PREFIX) @@ -205,7 +205,7 @@ impl EventHandler for NetworkActorService { } } } - let peer_event = PeerEvent::Open(remote.into(), Box::new(info.clone())); + let peer_event = PeerEvent::Open(remote.into(), Box::new(info.chain_info.clone())); self.inner.on_peer_connected( remote.into(), info, @@ -626,7 +626,7 @@ impl Inner { pub(crate) fn on_peer_connected( &mut self, peer_id: PeerId, - chain_info: ChainInfo, + chain_info_v2: ChainInfoV2, notif_protocols: Vec>, rpc_protocols: Vec>, version_string: Option, @@ -637,17 +637,17 @@ impl Inner { // avoid update chain status to old // this many happend when multi protocol send repeat handhake. //FIXME after PeerEvent refactor. - if chain_info.total_difficulty() - > peer.peer_info.chain_info.status().info.total_difficulty + if chain_info_v2.chain_info.total_difficulty() + > peer.peer_info.chain_info_v2.chain_info.status().info.total_difficulty { peer.peer_info - .update_chain_status(chain_info.status().clone()); + .update_chain_status(chain_info_v2.chain_info.status().clone()); } }) .or_insert_with(|| { Peer::new(PeerInfo::new( peer_id, - chain_info, + chain_info_v2, notif_protocols, rpc_protocols, version_string, diff --git a/network/src/worker.rs b/network/src/worker.rs index c59558bde6..529da91e95 100644 --- a/network/src/worker.rs +++ b/network/src/worker.rs @@ -16,7 +16,7 @@ use starcoin_config::NetworkConfig; use starcoin_metrics::Registry; use starcoin_network_rpc::NetworkRpcService; use starcoin_service_registry::ServiceRef; -use starcoin_types::startup_info::ChainInfo; +use starcoin_types::startup_info::ChainInfoV2; use std::borrow::Cow; use crate::network_p2p_handle::Networkp2pHandle; @@ -28,7 +28,7 @@ pub const RPC_PROTOCOL_PREFIX: &str = RpcInfo::RPC_PROTOCOL_PREFIX; pub fn build_network_worker( network_config: &NetworkConfig, - chain_info: ChainInfo, + chain_info: ChainInfoV2, protocols: Vec>, rpc_service: Option<(RpcInfo, ServiceRef)>, metrics_registry: Option, @@ -107,7 +107,7 @@ pub fn build_network_worker( ..NetworkConfiguration::default() }; // protocol id is chain/{chain_id}, `RegisteredProtocol` will append `/starcoin` prefix - let protocol_id = ProtocolId::from(format!("chain/{}", chain_info.chain_id()).as_str()); + let protocol_id = ProtocolId::from(format!("chain/{}", chain_info.chain_info.chain_id()).as_str()); debug!("Init network worker with config: {:?}", config); let worker: NetworkWorker = NetworkWorker::new(Params::new( diff --git a/network/types/src/peer_info.rs b/network/types/src/peer_info.rs index 1ab7bdd70e..8e591a70e0 100644 --- a/network/types/src/peer_info.rs +++ b/network/types/src/peer_info.rs @@ -9,14 +9,14 @@ use serde::{Deserialize, Serialize}; use starcoin_crypto::HashValue; use starcoin_types::block::BlockHeader; use starcoin_types::block::BlockNumber; -use starcoin_types::startup_info::{ChainInfo, ChainStatus}; +use starcoin_types::startup_info::{ChainInfoV2, ChainStatus}; use starcoin_types::U256; use std::borrow::Cow; #[derive(Eq, PartialEq, Hash, Deserialize, Serialize, Clone, Debug)] pub struct PeerInfo { pub peer_id: PeerId, - pub chain_info: ChainInfo, + pub chain_info_v2: ChainInfoV2, pub notif_protocols: Vec>, pub rpc_protocols: Vec>, pub version_string: Option, @@ -25,14 +25,14 @@ pub struct PeerInfo { impl PeerInfo { pub fn new( peer_id: PeerId, - chain_info: ChainInfo, + chain_info_v2: ChainInfoV2, notif_protocols: Vec>, rpc_protocols: Vec>, version_string: Option, ) -> Self { Self { peer_id, - chain_info, + chain_info_v2, notif_protocols, rpc_protocols, version_string, @@ -43,28 +43,28 @@ impl PeerInfo { self.peer_id.clone() } - pub fn chain_info(&self) -> &ChainInfo { - &self.chain_info + pub fn chain_info(&self) -> &ChainInfoV2 { + &self.chain_info_v2 } pub fn block_number(&self) -> BlockNumber { - self.chain_info.head().number() + self.chain_info_v2.chain_info.head().number() } pub fn latest_header(&self) -> &BlockHeader { - self.chain_info.head() + self.chain_info_v2.chain_info.head() } pub fn block_id(&self) -> HashValue { - self.chain_info.head().id() + self.chain_info_v2.chain_info.head().id() } pub fn total_difficulty(&self) -> U256 { - self.chain_info.total_difficulty() + self.chain_info_v2.chain_info.total_difficulty() } pub fn update_chain_status(&mut self, chain_status: ChainStatus) { - self.chain_info.update_status(chain_status) + self.chain_info_v2.chain_info.update_status(chain_status) } /// This peer is support notification @@ -96,7 +96,7 @@ impl PeerInfo { pub fn random() -> Self { Self { peer_id: PeerId::random(), - chain_info: ChainInfo::random(), + chain_info_v2: ChainInfoV2::random(), notif_protocols: vec![], rpc_protocols: vec![], version_string: None, diff --git a/rpc/api/src/types.rs b/rpc/api/src/types.rs index 121567d253..948e712904 100644 --- a/rpc/api/src/types.rs +++ b/rpc/api/src/types.rs @@ -1321,7 +1321,7 @@ impl From for PeerInfoView { fn from(info: PeerInfo) -> Self { Self { peer_id: info.peer_id, - chain_info: info.chain_info.into(), + chain_info: info.chain_info_v2.chain_info.into(), notif_protocols: info.notif_protocols.join(","), rpc_protocols: info.rpc_protocols.join(","), version_string: info.version_string, diff --git a/storage/src/lib.rs b/storage/src/lib.rs index 60e6c93b6e..9feeb0f572 100644 --- a/storage/src/lib.rs +++ b/storage/src/lib.rs @@ -20,13 +20,13 @@ use flexi_dag::{SyncFlexiDagSnapshot, SyncFlexiDagSnapshotStorage, SyncFlexiDagS use network_p2p_types::peer_id::PeerId; use num_enum::{IntoPrimitive, TryFromPrimitive}; use once_cell::sync::Lazy; -use starcoin_accumulator::{node::AccumulatorStoreType, AccumulatorTreeStore}; +use starcoin_accumulator::{node::AccumulatorStoreType, AccumulatorTreeStore, accumulator_info::AccumulatorInfo}; use starcoin_crypto::HashValue; use starcoin_state_store_api::{StateNode, StateNodeStore}; use starcoin_types::{ block::{Block, BlockBody, BlockHeader, BlockInfo}, contract_event::ContractEvent, - startup_info::{ChainInfo, ChainStatus, SnapshotRange, StartupInfo}, + startup_info::{ChainInfo, ChainStatus, SnapshotRange, StartupInfo, ChainInfoV2, DagChainStatus}, transaction::{RichTransactionInfo, Transaction}, }; //use starcoin_vm_types::state_store::table::{TableHandle, TableInfo}; @@ -198,6 +198,7 @@ impl StorageVersion { pub trait DagBlockStore { fn get_flexi_dag_startup_info(&self) -> Result>; fn save_flexi_dag_startup_info(&self, startup_info: StartupInfo) -> Result<()>; + fn get_dag_accumulator_info(&self) -> Result; } pub trait BlockStore { @@ -208,7 +209,7 @@ pub trait BlockStore { fn save_genesis(&self, genesis_hash: HashValue) -> Result<()>; - fn get_chain_info(&self) -> Result>; + fn get_chain_info(&self) -> Result>; fn get_block(&self, block_id: HashValue) -> Result>; @@ -391,6 +392,24 @@ impl DagBlockStore for Storage { self.chain_info_storage .save_flexi_dag_startup_info(startup_info) } + + fn get_dag_accumulator_info(&self) -> Result { + // initialize the block accumulator + let startup_info = match self.get_flexi_dag_startup_info()? { + Some(startup_info) => startup_info, + None => bail!("failed to get dag startup info") + }; + + // let accmulator_info = sync_flexi_dag_store.get_snapshot_storage().get(startup_info.main); + let accumulator_info = match self.query_by_hash(startup_info.main) { + Ok(op_snapshot) => match op_snapshot { + Some(snapshot) => snapshot.accumulator_info, + None => bail!("failed to get sync accumulator info since it is None"), + }, + Err(error) => bail!("failed to get sync accumulator info: {}", error.to_string()), + }; + Ok(accumulator_info) + } } impl BlockStore for Storage { @@ -410,7 +429,7 @@ impl BlockStore for Storage { self.chain_info_storage.save_genesis(genesis_hash) } - fn get_chain_info(&self) -> Result> { + fn get_chain_info(&self) -> Result> { let genesis_hash = match self.get_genesis()? { Some(genesis_hash) => genesis_hash, None => return Ok(None), @@ -425,11 +444,19 @@ impl BlockStore for Storage { let head_block_info = self.get_block_info(head_block.id())?.ok_or_else(|| { format_err!("Startup block info {:?} should exist", startup_info.main) })?; - Ok(Some(ChainInfo::new( + + let flexi_dag_accumulator_info = self.get_dag_accumulator_info().unwrap_or(AccumulatorInfo::default()); + let chain_info = ChainInfo::new( head_block.chain_id(), genesis_hash, ChainStatus::new(head_block, head_block_info), - ))) + ); + Ok(Some(ChainInfoV2 { + chain_info, + dag_status: DagChainStatus { + flexi_dag_accumulator_info + } + })) } fn get_block(&self, block_id: HashValue) -> Result> { diff --git a/sync/src/sync.rs b/sync/src/sync.rs index dd4bb57f3c..c5f7bede14 100644 --- a/sync/src/sync.rs +++ b/sync/src/sync.rs @@ -217,7 +217,10 @@ impl SyncService { peer_selector.clone(), network.clone(), )); - if let Some(target) = + + if let Some(accumulator_info) = rpc_client.get_best_dag_target(current_block_info.get_total_difficulty())? { + todo!() + } else if let Some(target) = rpc_client.get_best_target(current_block_info.get_total_difficulty())? { info!("[sync] Find target({}), total_difficulty:{}, current head({})'s total_difficulty({})", target.target_id.id(), target.block_info.total_difficulty, current_block_id, current_block_info.total_difficulty); @@ -520,6 +523,8 @@ impl CheckSyncEvent { impl EventHandler for SyncService { fn handle_event(&mut self, msg: CheckSyncEvent, ctx: &mut ServiceContext) { + // comment temporarily, for the dag branch, starcoin will sync dag only + // it will add some logic to determine which part to sync in the future if let Err(e) = self.check_and_start_sync(msg.peers, msg.skip_pow_verify, msg.strategy, ctx) { error!("[sync] Check sync error: {:?}", e); diff --git a/sync/src/tasks/mod.rs b/sync/src/tasks/mod.rs index 2d6521a0e7..a873d8cf96 100644 --- a/sync/src/tasks/mod.rs +++ b/sync/src/tasks/mod.rs @@ -5,11 +5,13 @@ use crate::tasks::block_sync_task::SyncBlockData; use crate::tasks::inner_sync_task::InnerSyncTask; use crate::verified_rpc_client::{RpcVerifyError, VerifiedRpcClient}; use anyhow::{format_err, Error, Result}; +use std::result::Result::Ok; use futures::channel::mpsc::UnboundedSender; use futures::future::BoxFuture; use futures::{FutureExt, TryFutureExt}; use network_api::{PeerId, PeerProvider, PeerSelector}; use network_p2p_core::{NetRpcError, RpcErrorCode}; +use starcoin_accumulator::accumulator_info::AccumulatorInfo; use starcoin_accumulator::node::AccumulatorStoreType; use starcoin_accumulator::MerkleAccumulator; use starcoin_chain::{BlockChain, ChainReader}; @@ -32,6 +34,24 @@ use stream_task::{ }; pub trait SyncFetcher: PeerOperator + BlockIdFetcher + BlockFetcher + BlockInfoFetcher { + fn get_best_dag_target(&self, min_difficulty: U256) -> Result> { + if let Some(best_peers) = self.peer_selector().bests(min_difficulty) { + // to do, here simply returns the accumulator info containing longest leaves + let result = match best_peers.into_iter().max_by_key(|peer_info| { + peer_info.chain_info_v2.dag_status.flexi_dag_accumulator_info.num_leaves + }) { + Some(peer_info) => Ok(Some(peer_info.chain_info_v2.dag_status.flexi_dag_accumulator_info)), + None => { + debug!("failed to find the best dag target"); + return Ok(None); + } + }; + return result; + } + debug!("failed to find the best dag target, since maybe no peers"); + return Ok(None); + } + fn get_best_target(&self, min_difficulty: U256) -> Result> { if let Some(best_peers) = self.peer_selector().bests(min_difficulty) { //TODO fast verify best peers by accumulator @@ -42,7 +62,7 @@ pub trait SyncFetcher: PeerOperator + BlockIdFetcher + BlockFetcher + BlockInfoF let update = chain_statuses .iter_mut() .find(|(chain_status, _peers)| { - peer.chain_info().status() == chain_status + peer.chain_info().chain_info.status() == chain_status }) .map(|(_chain_status, peers)| { peers.push(peer.peer_id()); @@ -52,7 +72,7 @@ pub trait SyncFetcher: PeerOperator + BlockIdFetcher + BlockFetcher + BlockInfoF if !update { chain_statuses - .push((peer.chain_info().status().clone(), vec![peer.peer_id()])) + .push((peer.chain_info().chain_info.status().clone(), vec![peer.peer_id()])) } chain_statuses }); @@ -117,7 +137,7 @@ pub trait SyncFetcher: PeerOperator + BlockIdFetcher + BlockFetcher + BlockInfoF && best_target.peers.contains(&better_peer.peer_id()) { target = Some(( - better_peer.chain_info.status().info().clone(), + better_peer.chain_info_v2.chain_info.status().info().clone(), BlockIdAndNumber { number: better_peer.latest_header().number(), id: better_peer.latest_header().id(), @@ -134,7 +154,7 @@ pub trait SyncFetcher: PeerOperator + BlockIdFetcher + BlockFetcher + BlockInfoF let mut block_info = None; if block_id == better_peer.block_id() { block_info = - Some(better_peer.chain_info.status().info().clone()); + Some(better_peer.chain_info_v2.chain_info.status().info().clone()); } else if let Some(better_block_id) = self .fetch_block_id( Some(better_peer.peer_id()), diff --git a/sync/src/tasks/sync_dag_accumulator_task.rs b/sync/src/tasks/sync_dag_accumulator_task.rs index 9baedd3607..c27b06132c 100644 --- a/sync/src/tasks/sync_dag_accumulator_task.rs +++ b/sync/src/tasks/sync_dag_accumulator_task.rs @@ -1,4 +1,4 @@ -use anyhow::{bail, ensure, format_err, Result}; +use anyhow::{bail, ensure, Result}; use bcs_ext::BCSCodec; use futures::{future::BoxFuture, FutureExt}; use starcoin_accumulator::{accumulator_info::AccumulatorInfo, Accumulator, MerkleAccumulator}; diff --git a/sync/src/tasks/sync_dag_block_task.rs b/sync/src/tasks/sync_dag_block_task.rs index 6ff0ec6801..4fbc5bcb03 100644 --- a/sync/src/tasks/sync_dag_block_task.rs +++ b/sync/src/tasks/sync_dag_block_task.rs @@ -1,8 +1,6 @@ -use anyhow::{bail, ensure, format_err, Ok, Result}; -use bcs_ext::BCSCodec; +use anyhow::{Ok, Result}; use futures::{future::BoxFuture, FutureExt}; -use starcoin_accumulator::{accumulator_info::AccumulatorInfo, Accumulator, MerkleAccumulator}; -use starcoin_crypto::HashValue; +use starcoin_accumulator::{accumulator_info::AccumulatorInfo, MerkleAccumulator}; use starcoin_network_rpc_api::dag_protocol::SyncDagBlockInfo; use starcoin_storage::{ flexi_dag::{SyncFlexiDagSnapshot, SyncFlexiDagSnapshotStorage}, diff --git a/test-helper/src/network.rs b/test-helper/src/network.rs index 2e5faea961..82ec0c3a7c 100644 --- a/test-helper/src/network.rs +++ b/test-helper/src/network.rs @@ -16,8 +16,8 @@ use starcoin_service_registry::{ RegistryAsyncService, RegistryService, ServiceContext, ServiceFactory, ServiceRef, }; use starcoin_storage::block_info::BlockInfoStore; -use starcoin_storage::{BlockStore, Storage}; -use starcoin_types::startup_info::{ChainInfo, ChainStatus}; +use starcoin_storage::{BlockStore, Storage, DagBlockStore}; +use starcoin_types::startup_info::{ChainInfo, ChainStatus, DagChainStatus, ChainInfoV2}; use std::any::Any; use std::borrow::Cow; use std::sync::{Arc, Mutex}; @@ -195,10 +195,14 @@ impl ServiceFactory for MockNetworkServiceFactory { .get_block_info(head_block_hash)? .ok_or_else(|| format_err!("can't get block info by hash {}", head_block_hash))?; let chain_status = ChainStatus::new(head_block_header, head_block_info); - let chain_info = - ChainInfo::new(config.net().chain_id(), genesis_hash, chain_status.clone()); + let chain_info_v2 = ChainInfoV2 { + chain_info: ChainInfo::new(config.net().chain_id(), genesis_hash, chain_status.clone()), + dag_status: DagChainStatus { + flexi_dag_accumulator_info: storage.get_dag_accumulator_info()?, + } + }; let actor_service = - NetworkActorService::new(config, chain_info, rpc, peer_message_handle.clone())?; + NetworkActorService::new(config, chain_info_v2, rpc, peer_message_handle.clone())?; let network_service = actor_service.network_service(); let network_async_service = NetworkServiceRef::new(network_service, ctx.self_ref()); // set self sync status to synced for test. diff --git a/types/src/startup_info.rs b/types/src/startup_info.rs index d536020128..73652152a1 100644 --- a/types/src/startup_info.rs +++ b/types/src/startup_info.rs @@ -13,6 +13,38 @@ use starcoin_vm_types::genesis_config::ChainId; use std::convert::{TryFrom, TryInto}; use std::fmt; use std::fmt::Formatter; + +#[derive(Eq, PartialEq, Hash, Deserialize, Serialize, Clone, Debug)] +pub struct ChainInfoV2 { + pub chain_info: ChainInfo, + pub dag_status: DagChainStatus, +} + +impl ChainInfoV2 { + pub fn new(chain_id: ChainId, genesis_hash: HashValue, status: ChainStatus) -> Self { + ChainInfoV2 { + chain_info: ChainInfo::new(chain_id, genesis_hash, status), + dag_status: DagChainStatus { + flexi_dag_accumulator_info: AccumulatorInfo::default(), // dag todo + }, + } + } + pub fn random() -> Self { + Self { + chain_info: ChainInfo::random(), + dag_status: DagChainStatus::random(), + } + } +} +impl std::fmt::Display for ChainInfoV2 { + fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { + write!( + f, + "{}", + serde_json::to_string(self).map_err(|_| std::fmt::Error)? + ) + } +} /// The info of a chain. #[derive(Eq, PartialEq, Hash, Deserialize, Serialize, Clone, Debug)] pub struct ChainInfo { @@ -119,6 +151,7 @@ impl ChainStatus { rand::random::(), ), ); + Self { head, info: block_info, @@ -151,6 +184,36 @@ impl Sample for ChainStatus { } } +#[derive(Eq, PartialEq, Hash, Deserialize, Serialize, Clone, Debug)] +pub struct DagChainStatus { + pub flexi_dag_accumulator_info: AccumulatorInfo, +} + +impl DagChainStatus { + pub fn new(flexi_dag_accumulator_info: AccumulatorInfo) -> Self { + Self { + flexi_dag_accumulator_info, + } + } + + pub fn random() -> Self { + let head = BlockHeader::random(); + Self { + flexi_dag_accumulator_info: AccumulatorInfo::new( + head.block_accumulator_root(), + vec![], + rand::random::(), + rand::random::(), + )} + } + + pub fn sample() -> Self { + Self { + flexi_dag_accumulator_info: AccumulatorInfo::sample(), + } + } +} + #[derive(Eq, PartialEq, Hash, Deserialize, Serialize, Clone, Debug)] pub struct StartupInfo { /// main chain head block hash From 747aafa0aedff7c5fa5a668d980c72e3a0adcd1d Mon Sep 17 00:00:00 2001 From: jackzhhuang Date: Fri, 28 Jul 2023 10:22:20 +0800 Subject: [PATCH 23/30] use chain state info instead of chain info v2 --- cmd/peer-watcher/src/lib.rs | 6 +++--- cmd/peer-watcher/src/main.rs | 8 ++++---- genesis/src/lib.rs | 8 ++++---- network/api/src/tests.rs | 10 +++++----- network/src/network_p2p_handle.rs | 8 ++++---- network/src/service.rs | 18 +++++++++--------- network/src/worker.rs | 4 ++-- network/types/src/peer_info.rs | 24 ++++++++++++------------ rpc/api/src/types.rs | 2 +- storage/src/lib.rs | 8 ++++---- sync/src/tasks/mod.rs | 8 ++++---- test-helper/src/network.rs | 6 +++--- types/src/startup_info.rs | 8 ++++---- 13 files changed, 59 insertions(+), 59 deletions(-) diff --git a/cmd/peer-watcher/src/lib.rs b/cmd/peer-watcher/src/lib.rs index 649845b6da..5c79b158e0 100644 --- a/cmd/peer-watcher/src/lib.rs +++ b/cmd/peer-watcher/src/lib.rs @@ -9,7 +9,7 @@ use starcoin_network::network_p2p_handle::Networkp2pHandle; use starcoin_network::{build_network_worker, NotificationMessage}; use starcoin_storage::storage::StorageInstance; use starcoin_storage::Storage; -use starcoin_types::startup_info::ChainInfoV2; +use starcoin_types::startup_info::ChainStateInfo; use std::sync::Arc; pub fn build_lighting_network( @@ -19,10 +19,10 @@ pub fn build_lighting_network( let genesis = starcoin_genesis::Genesis::load_or_build(net)?; let storage = Arc::new(Storage::new(StorageInstance::new_cache_instance())?); let chain_info = genesis.execute_genesis_block(net, storage)?; - let chain_info_v2 = ChainInfoV2::new(chain_info.chain_id(), chain_info.genesis_hash(), chain_info.status().clone()); + let chain_state_info = ChainStateInfo::new(chain_info.chain_id(), chain_info.genesis_hash(), chain_info.status().clone()); build_network_worker( network_config, - chain_info_v2, + chain_state_info, NotificationMessage::protocols(), None, None, diff --git a/cmd/peer-watcher/src/main.rs b/cmd/peer-watcher/src/main.rs index fd2fc5b639..b537c8975c 100644 --- a/cmd/peer-watcher/src/main.rs +++ b/cmd/peer-watcher/src/main.rs @@ -8,7 +8,7 @@ use network_p2p::Event; use network_types::peer_info::PeerInfo; use starcoin_config::{NodeConfig, StarcoinOpt}; use starcoin_peer_watcher::build_lighting_network; -use starcoin_types::startup_info::ChainInfoV2; +use starcoin_types::startup_info::ChainStateInfo; /// A lighting node, connect to peer to peer network, and monitor peers. fn main() { @@ -31,10 +31,10 @@ fn main() { notif_protocols, rpc_protocols, version_string, - } => match ChainInfoV2::decode(&generic_data) { - Ok(chain_info_v2) => Some(PeerInfo::new( + } => match ChainStateInfo::decode(&generic_data) { + Ok(chain_state_info) => Some(PeerInfo::new( remote.into(), - chain_info_v2, + chain_state_info, notif_protocols, rpc_protocols, version_string, diff --git a/genesis/src/lib.rs b/genesis/src/lib.rs index 6177a8ebc9..5dff7f4ee2 100644 --- a/genesis/src/lib.rs +++ b/genesis/src/lib.rs @@ -308,13 +308,13 @@ impl Genesis { ) -> Result<(ChainInfo, Genesis)> { debug!("load startup_info."); let (chain_info, genesis) = match storage.get_chain_info() { - Ok(Some(chain_info_v2)) => { - debug!("Get chain info {:?} from db", chain_info_v2); + Ok(Some(chain_state_info)) => { + debug!("Get chain info {:?} from db", chain_state_info); info!("Check genesis file."); let genesis = Self::load_and_check_genesis(net, data_dir, false)?; match storage.get_block(genesis.block().header().id()) { Ok(Some(block)) => { - if *genesis.block() == block && chain_info_v2.chain_info.genesis_hash() == block.id() { + if *genesis.block() == block && chain_state_info.chain_info.genesis_hash() == block.id() { info!("Check genesis db block ok!"); } else { return Err(GenesisError::GenesisVersionMismatch { @@ -329,7 +329,7 @@ impl Genesis { } Err(e) => return Err(GenesisError::GenesisLoadFailure(e).into()), } - (chain_info_v2.chain_info, genesis) + (chain_state_info.chain_info, genesis) } Ok(None) => { let genesis = Self::load_and_check_genesis(net, data_dir, true)?; diff --git a/network/api/src/tests.rs b/network/api/src/tests.rs index 423653b45c..bb41fcc06b 100644 --- a/network/api/src/tests.rs +++ b/network/api/src/tests.rs @@ -7,7 +7,7 @@ use network_p2p_types::peer_id::PeerId; use network_types::peer_info::PeerInfo; use starcoin_crypto::HashValue; use starcoin_logger::prelude::*; -use starcoin_types::startup_info::{ChainInfoV2, ChainStatus}; +use starcoin_types::startup_info::{ChainStateInfo, ChainStatus}; use starcoin_types::U256; #[test] @@ -34,28 +34,28 @@ fn test_peer_selector() { let peers = vec![ PeerInfo::new( PeerId::random(), - ChainInfoV2::new(1.into(), HashValue::zero(), mock_chain_status(100.into())), + ChainStateInfo::new(1.into(), HashValue::zero(), mock_chain_status(100.into())), vec![], vec![], None, ), PeerInfo::new( PeerId::random(), - ChainInfoV2::new(1.into(), HashValue::zero(), mock_chain_status(99.into())), + ChainStateInfo::new(1.into(), HashValue::zero(), mock_chain_status(99.into())), vec![], vec![], None, ), PeerInfo::new( PeerId::random(), - ChainInfoV2::new(1.into(), HashValue::zero(), mock_chain_status(100.into())), + ChainStateInfo::new(1.into(), HashValue::zero(), mock_chain_status(100.into())), vec![], vec![], None, ), PeerInfo::new( PeerId::random(), - ChainInfoV2::new(1.into(), HashValue::zero(), mock_chain_status(1.into())), + ChainStateInfo::new(1.into(), HashValue::zero(), mock_chain_status(1.into())), vec![], vec![], None, diff --git a/network/src/network_p2p_handle.rs b/network/src/network_p2p_handle.rs index 197aea58c1..9b579765a8 100644 --- a/network/src/network_p2p_handle.rs +++ b/network/src/network_p2p_handle.rs @@ -10,7 +10,7 @@ use network_p2p::business_layer_handle::HandshakeResult; use network_p2p::{business_layer_handle::BusinessLayerHandle, protocol::rep, PeerId}; use sc_peerset::ReputationChange; use serde::{Deserialize, Serialize}; -use starcoin_types::startup_info::{ChainInfoV2, ChainStatus}; +use starcoin_types::startup_info::{ChainStateInfo, ChainStatus}; /// Current protocol version. pub(crate) const CURRENT_VERSION: u32 = 6; @@ -29,7 +29,7 @@ pub struct Status { /// Tell other peer which rpc api we support. pub rpc_protocols: Vec>, /// the generic data related to the peer - pub info: ChainInfoV2, + pub info: ChainStateInfo, } pub struct Networkp2pHandle { @@ -37,7 +37,7 @@ pub struct Networkp2pHandle { } impl Networkp2pHandle { - pub fn new(chain_info: ChainInfoV2) -> Self { + pub fn new(chain_info: ChainStateInfo) -> Self { let status = Status { version: CURRENT_VERSION, min_supported_version: MIN_VERSION, @@ -108,7 +108,7 @@ impl BusinessLayerHandle for Networkp2pHandle { } fn update_generic_data(&mut self, peer_info: &[u8]) -> Result<(), anyhow::Error> { - match ChainInfoV2::decode(peer_info) { + match ChainStateInfo::decode(peer_info) { std::result::Result::Ok(other_chain_info) => { self.status.info = other_chain_info; Ok(()) diff --git a/network/src/service.rs b/network/src/service.rs index 3d4519a2d6..b8a614ebfc 100644 --- a/network/src/service.rs +++ b/network/src/service.rs @@ -30,7 +30,7 @@ use starcoin_service_registry::{ ActorService, EventHandler, ServiceContext, ServiceHandler, ServiceRef, ServiceRequest, }; use starcoin_txpool_api::PropagateTransactions; -use starcoin_types::startup_info::{ChainInfoV2, ChainStatus}; +use starcoin_types::startup_info::{ChainStateInfo, ChainStatus}; use starcoin_types::sync_status::SyncStatus; use starcoin_types::system_events::SyncStatusChangeEvent; use std::borrow::Cow; @@ -56,7 +56,7 @@ impl NetworkActor for NetworkActorService {} impl NetworkActorService { pub fn new( config: Arc, - chain_info_v2: ChainInfoV2, + chain_state_info: ChainStateInfo, rpc: Option<(RpcInfo, ServiceRef)>, peer_message_handler: H, ) -> Result @@ -65,7 +65,7 @@ impl NetworkActorService { { let (self_info, worker) = build_network_worker( &config.network, - chain_info_v2, + chain_state_info, config.network.supported_network_protocols(), rpc, config.metrics.registry().cloned(), @@ -186,7 +186,7 @@ impl EventHandler for NetworkActorService { "Connected peer {:?}, protocol: {}, notif_protocols: {:?}, rpc_protocols: {:?}", remote, protocol, notif_protocols, rpc_protocols ); - let info = match ChainInfoV2::decode(&generic_data) { + let info = match ChainStateInfo::decode(&generic_data) { Ok(data) => data, Err(_) => return, }; @@ -626,7 +626,7 @@ impl Inner { pub(crate) fn on_peer_connected( &mut self, peer_id: PeerId, - chain_info_v2: ChainInfoV2, + chain_state_info: ChainStateInfo, notif_protocols: Vec>, rpc_protocols: Vec>, version_string: Option, @@ -637,17 +637,17 @@ impl Inner { // avoid update chain status to old // this many happend when multi protocol send repeat handhake. //FIXME after PeerEvent refactor. - if chain_info_v2.chain_info.total_difficulty() - > peer.peer_info.chain_info_v2.chain_info.status().info.total_difficulty + if chain_state_info.chain_info.total_difficulty() + > peer.peer_info.chain_state_info.chain_info.status().info.total_difficulty { peer.peer_info - .update_chain_status(chain_info_v2.chain_info.status().clone()); + .update_chain_status(chain_state_info.chain_info.status().clone()); } }) .or_insert_with(|| { Peer::new(PeerInfo::new( peer_id, - chain_info_v2, + chain_state_info, notif_protocols, rpc_protocols, version_string, diff --git a/network/src/worker.rs b/network/src/worker.rs index 529da91e95..d9d76e30cf 100644 --- a/network/src/worker.rs +++ b/network/src/worker.rs @@ -16,7 +16,7 @@ use starcoin_config::NetworkConfig; use starcoin_metrics::Registry; use starcoin_network_rpc::NetworkRpcService; use starcoin_service_registry::ServiceRef; -use starcoin_types::startup_info::ChainInfoV2; +use starcoin_types::startup_info::ChainStateInfo; use std::borrow::Cow; use crate::network_p2p_handle::Networkp2pHandle; @@ -28,7 +28,7 @@ pub const RPC_PROTOCOL_PREFIX: &str = RpcInfo::RPC_PROTOCOL_PREFIX; pub fn build_network_worker( network_config: &NetworkConfig, - chain_info: ChainInfoV2, + chain_info: ChainStateInfo, protocols: Vec>, rpc_service: Option<(RpcInfo, ServiceRef)>, metrics_registry: Option, diff --git a/network/types/src/peer_info.rs b/network/types/src/peer_info.rs index 8e591a70e0..f886b35c96 100644 --- a/network/types/src/peer_info.rs +++ b/network/types/src/peer_info.rs @@ -9,14 +9,14 @@ use serde::{Deserialize, Serialize}; use starcoin_crypto::HashValue; use starcoin_types::block::BlockHeader; use starcoin_types::block::BlockNumber; -use starcoin_types::startup_info::{ChainInfoV2, ChainStatus}; +use starcoin_types::startup_info::{ChainStateInfo, ChainStatus}; use starcoin_types::U256; use std::borrow::Cow; #[derive(Eq, PartialEq, Hash, Deserialize, Serialize, Clone, Debug)] pub struct PeerInfo { pub peer_id: PeerId, - pub chain_info_v2: ChainInfoV2, + pub chain_state_info: ChainStateInfo, pub notif_protocols: Vec>, pub rpc_protocols: Vec>, pub version_string: Option, @@ -25,14 +25,14 @@ pub struct PeerInfo { impl PeerInfo { pub fn new( peer_id: PeerId, - chain_info_v2: ChainInfoV2, + chain_state_info: ChainStateInfo, notif_protocols: Vec>, rpc_protocols: Vec>, version_string: Option, ) -> Self { Self { peer_id, - chain_info_v2, + chain_state_info, notif_protocols, rpc_protocols, version_string, @@ -43,28 +43,28 @@ impl PeerInfo { self.peer_id.clone() } - pub fn chain_info(&self) -> &ChainInfoV2 { - &self.chain_info_v2 + pub fn chain_info(&self) -> &ChainStateInfo { + &self.chain_state_info } pub fn block_number(&self) -> BlockNumber { - self.chain_info_v2.chain_info.head().number() + self.chain_state_info.chain_info.head().number() } pub fn latest_header(&self) -> &BlockHeader { - self.chain_info_v2.chain_info.head() + self.chain_state_info.chain_info.head() } pub fn block_id(&self) -> HashValue { - self.chain_info_v2.chain_info.head().id() + self.chain_state_info.chain_info.head().id() } pub fn total_difficulty(&self) -> U256 { - self.chain_info_v2.chain_info.total_difficulty() + self.chain_state_info.chain_info.total_difficulty() } pub fn update_chain_status(&mut self, chain_status: ChainStatus) { - self.chain_info_v2.chain_info.update_status(chain_status) + self.chain_state_info.chain_info.update_status(chain_status) } /// This peer is support notification @@ -96,7 +96,7 @@ impl PeerInfo { pub fn random() -> Self { Self { peer_id: PeerId::random(), - chain_info_v2: ChainInfoV2::random(), + chain_state_info: ChainStateInfo::random(), notif_protocols: vec![], rpc_protocols: vec![], version_string: None, diff --git a/rpc/api/src/types.rs b/rpc/api/src/types.rs index 948e712904..b8ff1a24c0 100644 --- a/rpc/api/src/types.rs +++ b/rpc/api/src/types.rs @@ -1321,7 +1321,7 @@ impl From for PeerInfoView { fn from(info: PeerInfo) -> Self { Self { peer_id: info.peer_id, - chain_info: info.chain_info_v2.chain_info.into(), + chain_info: info.chain_state_info.chain_info.into(), notif_protocols: info.notif_protocols.join(","), rpc_protocols: info.rpc_protocols.join(","), version_string: info.version_string, diff --git a/storage/src/lib.rs b/storage/src/lib.rs index 9feeb0f572..2fbd858e8c 100644 --- a/storage/src/lib.rs +++ b/storage/src/lib.rs @@ -26,7 +26,7 @@ use starcoin_state_store_api::{StateNode, StateNodeStore}; use starcoin_types::{ block::{Block, BlockBody, BlockHeader, BlockInfo}, contract_event::ContractEvent, - startup_info::{ChainInfo, ChainStatus, SnapshotRange, StartupInfo, ChainInfoV2, DagChainStatus}, + startup_info::{ChainInfo, ChainStatus, SnapshotRange, StartupInfo, ChainStateInfo, DagChainStatus}, transaction::{RichTransactionInfo, Transaction}, }; //use starcoin_vm_types::state_store::table::{TableHandle, TableInfo}; @@ -209,7 +209,7 @@ pub trait BlockStore { fn save_genesis(&self, genesis_hash: HashValue) -> Result<()>; - fn get_chain_info(&self) -> Result>; + fn get_chain_info(&self) -> Result>; fn get_block(&self, block_id: HashValue) -> Result>; @@ -429,7 +429,7 @@ impl BlockStore for Storage { self.chain_info_storage.save_genesis(genesis_hash) } - fn get_chain_info(&self) -> Result> { + fn get_chain_info(&self) -> Result> { let genesis_hash = match self.get_genesis()? { Some(genesis_hash) => genesis_hash, None => return Ok(None), @@ -451,7 +451,7 @@ impl BlockStore for Storage { genesis_hash, ChainStatus::new(head_block, head_block_info), ); - Ok(Some(ChainInfoV2 { + Ok(Some(ChainStateInfo { chain_info, dag_status: DagChainStatus { flexi_dag_accumulator_info diff --git a/sync/src/tasks/mod.rs b/sync/src/tasks/mod.rs index a873d8cf96..af0b1ac60b 100644 --- a/sync/src/tasks/mod.rs +++ b/sync/src/tasks/mod.rs @@ -38,9 +38,9 @@ pub trait SyncFetcher: PeerOperator + BlockIdFetcher + BlockFetcher + BlockInfoF if let Some(best_peers) = self.peer_selector().bests(min_difficulty) { // to do, here simply returns the accumulator info containing longest leaves let result = match best_peers.into_iter().max_by_key(|peer_info| { - peer_info.chain_info_v2.dag_status.flexi_dag_accumulator_info.num_leaves + peer_info.chain_state_info.dag_status.flexi_dag_accumulator_info.num_leaves }) { - Some(peer_info) => Ok(Some(peer_info.chain_info_v2.dag_status.flexi_dag_accumulator_info)), + Some(peer_info) => Ok(Some(peer_info.chain_state_info.dag_status.flexi_dag_accumulator_info)), None => { debug!("failed to find the best dag target"); return Ok(None); @@ -137,7 +137,7 @@ pub trait SyncFetcher: PeerOperator + BlockIdFetcher + BlockFetcher + BlockInfoF && best_target.peers.contains(&better_peer.peer_id()) { target = Some(( - better_peer.chain_info_v2.chain_info.status().info().clone(), + better_peer.chain_state_info.chain_info.status().info().clone(), BlockIdAndNumber { number: better_peer.latest_header().number(), id: better_peer.latest_header().id(), @@ -154,7 +154,7 @@ pub trait SyncFetcher: PeerOperator + BlockIdFetcher + BlockFetcher + BlockInfoF let mut block_info = None; if block_id == better_peer.block_id() { block_info = - Some(better_peer.chain_info_v2.chain_info.status().info().clone()); + Some(better_peer.chain_state_info.chain_info.status().info().clone()); } else if let Some(better_block_id) = self .fetch_block_id( Some(better_peer.peer_id()), diff --git a/test-helper/src/network.rs b/test-helper/src/network.rs index 82ec0c3a7c..2b3ea61fb4 100644 --- a/test-helper/src/network.rs +++ b/test-helper/src/network.rs @@ -17,7 +17,7 @@ use starcoin_service_registry::{ }; use starcoin_storage::block_info::BlockInfoStore; use starcoin_storage::{BlockStore, Storage, DagBlockStore}; -use starcoin_types::startup_info::{ChainInfo, ChainStatus, DagChainStatus, ChainInfoV2}; +use starcoin_types::startup_info::{ChainInfo, ChainStatus, DagChainStatus, ChainStateInfo}; use std::any::Any; use std::borrow::Cow; use std::sync::{Arc, Mutex}; @@ -195,14 +195,14 @@ impl ServiceFactory for MockNetworkServiceFactory { .get_block_info(head_block_hash)? .ok_or_else(|| format_err!("can't get block info by hash {}", head_block_hash))?; let chain_status = ChainStatus::new(head_block_header, head_block_info); - let chain_info_v2 = ChainInfoV2 { + let chain_state_info = ChainStateInfo { chain_info: ChainInfo::new(config.net().chain_id(), genesis_hash, chain_status.clone()), dag_status: DagChainStatus { flexi_dag_accumulator_info: storage.get_dag_accumulator_info()?, } }; let actor_service = - NetworkActorService::new(config, chain_info_v2, rpc, peer_message_handle.clone())?; + NetworkActorService::new(config, chain_state_info, rpc, peer_message_handle.clone())?; let network_service = actor_service.network_service(); let network_async_service = NetworkServiceRef::new(network_service, ctx.self_ref()); // set self sync status to synced for test. diff --git a/types/src/startup_info.rs b/types/src/startup_info.rs index 73652152a1..fe57b026d8 100644 --- a/types/src/startup_info.rs +++ b/types/src/startup_info.rs @@ -15,14 +15,14 @@ use std::fmt; use std::fmt::Formatter; #[derive(Eq, PartialEq, Hash, Deserialize, Serialize, Clone, Debug)] -pub struct ChainInfoV2 { +pub struct ChainStateInfo { pub chain_info: ChainInfo, pub dag_status: DagChainStatus, } -impl ChainInfoV2 { +impl ChainStateInfo { pub fn new(chain_id: ChainId, genesis_hash: HashValue, status: ChainStatus) -> Self { - ChainInfoV2 { + ChainStateInfo { chain_info: ChainInfo::new(chain_id, genesis_hash, status), dag_status: DagChainStatus { flexi_dag_accumulator_info: AccumulatorInfo::default(), // dag todo @@ -36,7 +36,7 @@ impl ChainInfoV2 { } } } -impl std::fmt::Display for ChainInfoV2 { +impl std::fmt::Display for ChainStateInfo { fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { write!( f, From 4eec0db0b4350e1a571e8d6c195788b1242a69b8 Mon Sep 17 00:00:00 2001 From: jackzhhuang Date: Fri, 28 Jul 2023 16:17:07 +0800 Subject: [PATCH 24/30] add sync dag ancestor --- cmd/peer-watcher/src/lib.rs | 6 +- genesis/src/lib.rs | 7 +- network/src/service.rs | 8 ++- network/src/worker.rs | 3 +- storage/src/lib.rs | 22 +++--- sync/src/sync.rs | 31 +++++++-- sync/src/tasks/mod.rs | 40 ++++++++--- sync/src/tasks/sync_dag_block_task.rs | 6 +- sync/src/tasks/sync_dag_full_task.rs | 85 +++++++++++++++++++++++ sync/src/tasks/sync_find_ancestor_task.rs | 23 +++--- test-helper/src/network.rs | 6 +- types/src/startup_info.rs | 5 +- 12 files changed, 197 insertions(+), 45 deletions(-) create mode 100644 sync/src/tasks/sync_dag_full_task.rs diff --git a/cmd/peer-watcher/src/lib.rs b/cmd/peer-watcher/src/lib.rs index 5c79b158e0..3d3d3f337f 100644 --- a/cmd/peer-watcher/src/lib.rs +++ b/cmd/peer-watcher/src/lib.rs @@ -19,7 +19,11 @@ pub fn build_lighting_network( let genesis = starcoin_genesis::Genesis::load_or_build(net)?; let storage = Arc::new(Storage::new(StorageInstance::new_cache_instance())?); let chain_info = genesis.execute_genesis_block(net, storage)?; - let chain_state_info = ChainStateInfo::new(chain_info.chain_id(), chain_info.genesis_hash(), chain_info.status().clone()); + let chain_state_info = ChainStateInfo::new( + chain_info.chain_id(), + chain_info.genesis_hash(), + chain_info.status().clone(), + ); build_network_worker( network_config, chain_state_info, diff --git a/genesis/src/lib.rs b/genesis/src/lib.rs index 5dff7f4ee2..5f6f5c2d5c 100644 --- a/genesis/src/lib.rs +++ b/genesis/src/lib.rs @@ -255,7 +255,8 @@ impl Genesis { storage.save_startup_info(startup_info)?; Ok(storage .get_chain_info()? - .ok_or_else(|| format_err!("ChainInfo should exist after genesis block executed."))?.chain_info) + .ok_or_else(|| format_err!("ChainInfo should exist after genesis block executed."))? + .chain_info) } pub fn save

(&self, data_dir: P) -> Result<()> @@ -314,7 +315,9 @@ impl Genesis { let genesis = Self::load_and_check_genesis(net, data_dir, false)?; match storage.get_block(genesis.block().header().id()) { Ok(Some(block)) => { - if *genesis.block() == block && chain_state_info.chain_info.genesis_hash() == block.id() { + if *genesis.block() == block + && chain_state_info.chain_info.genesis_hash() == block.id() + { info!("Check genesis db block ok!"); } else { return Err(GenesisError::GenesisVersionMismatch { diff --git a/network/src/service.rs b/network/src/service.rs index b8a614ebfc..eb898e4043 100644 --- a/network/src/service.rs +++ b/network/src/service.rs @@ -638,7 +638,13 @@ impl Inner { // this many happend when multi protocol send repeat handhake. //FIXME after PeerEvent refactor. if chain_state_info.chain_info.total_difficulty() - > peer.peer_info.chain_state_info.chain_info.status().info.total_difficulty + > peer + .peer_info + .chain_state_info + .chain_info + .status() + .info + .total_difficulty { peer.peer_info .update_chain_status(chain_state_info.chain_info.status().clone()); diff --git a/network/src/worker.rs b/network/src/worker.rs index d9d76e30cf..be75548864 100644 --- a/network/src/worker.rs +++ b/network/src/worker.rs @@ -107,7 +107,8 @@ pub fn build_network_worker( ..NetworkConfiguration::default() }; // protocol id is chain/{chain_id}, `RegisteredProtocol` will append `/starcoin` prefix - let protocol_id = ProtocolId::from(format!("chain/{}", chain_info.chain_info.chain_id()).as_str()); + let protocol_id = + ProtocolId::from(format!("chain/{}", chain_info.chain_info.chain_id()).as_str()); debug!("Init network worker with config: {:?}", config); let worker: NetworkWorker = NetworkWorker::new(Params::new( diff --git a/storage/src/lib.rs b/storage/src/lib.rs index 2fbd858e8c..4633bc6110 100644 --- a/storage/src/lib.rs +++ b/storage/src/lib.rs @@ -20,13 +20,17 @@ use flexi_dag::{SyncFlexiDagSnapshot, SyncFlexiDagSnapshotStorage, SyncFlexiDagS use network_p2p_types::peer_id::PeerId; use num_enum::{IntoPrimitive, TryFromPrimitive}; use once_cell::sync::Lazy; -use starcoin_accumulator::{node::AccumulatorStoreType, AccumulatorTreeStore, accumulator_info::AccumulatorInfo}; +use starcoin_accumulator::{ + accumulator_info::AccumulatorInfo, node::AccumulatorStoreType, AccumulatorTreeStore, +}; use starcoin_crypto::HashValue; use starcoin_state_store_api::{StateNode, StateNodeStore}; use starcoin_types::{ block::{Block, BlockBody, BlockHeader, BlockInfo}, contract_event::ContractEvent, - startup_info::{ChainInfo, ChainStatus, SnapshotRange, StartupInfo, ChainStateInfo, DagChainStatus}, + startup_info::{ + ChainInfo, ChainStateInfo, ChainStatus, DagChainStatus, SnapshotRange, StartupInfo, + }, transaction::{RichTransactionInfo, Transaction}, }; //use starcoin_vm_types::state_store::table::{TableHandle, TableInfo}; @@ -397,7 +401,7 @@ impl DagBlockStore for Storage { // initialize the block accumulator let startup_info = match self.get_flexi_dag_startup_info()? { Some(startup_info) => startup_info, - None => bail!("failed to get dag startup info") + None => bail!("failed to get dag startup info"), }; // let accmulator_info = sync_flexi_dag_store.get_snapshot_storage().get(startup_info.main); @@ -445,17 +449,19 @@ impl BlockStore for Storage { format_err!("Startup block info {:?} should exist", startup_info.main) })?; - let flexi_dag_accumulator_info = self.get_dag_accumulator_info().unwrap_or(AccumulatorInfo::default()); + let flexi_dag_accumulator_info = self + .get_dag_accumulator_info() + .unwrap_or(AccumulatorInfo::default()); let chain_info = ChainInfo::new( head_block.chain_id(), genesis_hash, ChainStatus::new(head_block, head_block_info), ); - Ok(Some(ChainStateInfo { - chain_info, + Ok(Some(ChainStateInfo { + chain_info, dag_status: DagChainStatus { - flexi_dag_accumulator_info - } + flexi_dag_accumulator_info, + }, })) } diff --git a/sync/src/sync.rs b/sync/src/sync.rs index c5f7bede14..3e2e56eed1 100644 --- a/sync/src/sync.rs +++ b/sync/src/sync.rs @@ -3,13 +3,14 @@ use crate::block_connector::BlockConnectorService; use crate::sync_metrics::SyncMetrics; -use crate::tasks::{full_sync_task, AncestorEvent, SyncFetcher}; +use crate::tasks::{full_sync_task, AncestorEvent, SyncFetcher, sync_dag_full_task}; use crate::verified_rpc_client::{RpcVerifyError, VerifiedRpcClient}; use anyhow::{format_err, Result}; use futures::FutureExt; use futures_timer::Delay; use network_api::peer_score::PeerScoreMetrics; use network_api::{PeerId, PeerProvider, PeerSelector, PeerStrategy, ReputationChange}; +use starcoin_accumulator::node::AccumulatorStoreType; use starcoin_chain::BlockChain; use starcoin_chain_api::ChainReader; use starcoin_config::NodeConfig; @@ -21,7 +22,7 @@ use starcoin_service_registry::{ ActorService, EventHandler, ServiceContext, ServiceFactory, ServiceHandler, }; use starcoin_storage::block_info::BlockInfoStore; -use starcoin_storage::{BlockStore, Storage}; +use starcoin_storage::{BlockStore, DagBlockStore, Storage, Store, SyncFlexiDagStore}; use starcoin_sync_api::{ PeerScoreRequest, PeerScoreResponse, SyncCancelRequest, SyncProgressReport, SyncProgressRequest, SyncServiceHandler, SyncStartRequest, SyncStatusRequest, SyncTarget, @@ -33,6 +34,7 @@ use starcoin_types::system_events::{NewHeadBlock, SyncStatusChangeEvent, SystemS use std::sync::Arc; use std::time::Duration; use stream_task::{TaskError, TaskEventCounterHandle, TaskHandle}; +use std::result::Result::Ok; const REPUTATION_THRESHOLD: i32 = -1000; @@ -149,6 +151,16 @@ impl SyncService { let peer_score_metrics = self.peer_score_metrics.clone(); let sync_metrics = self.metrics.clone(); let vm_metrics = self.vm_metrics.clone(); + + let accumulator_store = ctx + .get_shared::>() + .expect("storage must exist") + .get_accumulator_store(AccumulatorStoreType::SyncDag); + let accumulator_snapshot = ctx + .get_shared::>() + .expect("storage must exist") + .get_accumulator_snapshot_storage(); + let fut = async move { let peer_select_strategy = peer_strategy.unwrap_or_else(|| config.sync.peer_select_strategy()); @@ -218,8 +230,19 @@ impl SyncService { network.clone(), )); - if let Some(accumulator_info) = rpc_client.get_best_dag_target(current_block_info.get_total_difficulty())? { - todo!() + // for testing, we start dag sync directly + if let Some(target_accumulator_info) = + rpc_client.get_best_dag_target(current_block_info.get_total_difficulty())? + { + let local_dag_accumulator_info = storage.get_dag_accumulator_info()?; + sync_dag_full_task( + local_dag_accumulator_info, + target_accumulator_info, + rpc_client.clone(), + accumulator_store, + accumulator_snapshot, + ); + Ok(None) } else if let Some(target) = rpc_client.get_best_target(current_block_info.get_total_difficulty())? { diff --git a/sync/src/tasks/mod.rs b/sync/src/tasks/mod.rs index af0b1ac60b..439c414985 100644 --- a/sync/src/tasks/mod.rs +++ b/sync/src/tasks/mod.rs @@ -5,7 +5,6 @@ use crate::tasks::block_sync_task::SyncBlockData; use crate::tasks::inner_sync_task::InnerSyncTask; use crate::verified_rpc_client::{RpcVerifyError, VerifiedRpcClient}; use anyhow::{format_err, Error, Result}; -use std::result::Result::Ok; use futures::channel::mpsc::UnboundedSender; use futures::future::BoxFuture; use futures::{FutureExt, TryFutureExt}; @@ -24,6 +23,7 @@ use starcoin_time_service::TimeService; use starcoin_types::block::{Block, BlockIdAndNumber, BlockInfo, BlockNumber}; use starcoin_types::startup_info::ChainStatus; use starcoin_types::U256; +use std::result::Result::Ok; use std::str::FromStr; use std::sync::mpsc::Sender; use std::sync::Arc; @@ -38,9 +38,18 @@ pub trait SyncFetcher: PeerOperator + BlockIdFetcher + BlockFetcher + BlockInfoF if let Some(best_peers) = self.peer_selector().bests(min_difficulty) { // to do, here simply returns the accumulator info containing longest leaves let result = match best_peers.into_iter().max_by_key(|peer_info| { - peer_info.chain_state_info.dag_status.flexi_dag_accumulator_info.num_leaves + peer_info + .chain_state_info + .dag_status + .flexi_dag_accumulator_info + .num_leaves }) { - Some(peer_info) => Ok(Some(peer_info.chain_state_info.dag_status.flexi_dag_accumulator_info)), + Some(peer_info) => Ok(Some( + peer_info + .chain_state_info + .dag_status + .flexi_dag_accumulator_info, + )), None => { debug!("failed to find the best dag target"); return Ok(None); @@ -71,8 +80,10 @@ pub trait SyncFetcher: PeerOperator + BlockIdFetcher + BlockFetcher + BlockInfoF .unwrap_or(false); if !update { - chain_statuses - .push((peer.chain_info().chain_info.status().clone(), vec![peer.peer_id()])) + chain_statuses.push(( + peer.chain_info().chain_info.status().clone(), + vec![peer.peer_id()], + )) } chain_statuses }); @@ -137,7 +148,12 @@ pub trait SyncFetcher: PeerOperator + BlockIdFetcher + BlockFetcher + BlockInfoF && best_target.peers.contains(&better_peer.peer_id()) { target = Some(( - better_peer.chain_state_info.chain_info.status().info().clone(), + better_peer + .chain_state_info + .chain_info + .status() + .info() + .clone(), BlockIdAndNumber { number: better_peer.latest_header().number(), id: better_peer.latest_header().id(), @@ -153,8 +169,14 @@ pub trait SyncFetcher: PeerOperator + BlockIdFetcher + BlockFetcher + BlockInfoF { let mut block_info = None; if block_id == better_peer.block_id() { - block_info = - Some(better_peer.chain_state_info.chain_info.status().info().clone()); + block_info = Some( + better_peer + .chain_state_info + .chain_info + .status() + .info() + .clone(), + ); } else if let Some(better_block_id) = self .fetch_block_id( Some(better_peer.peer_id()), @@ -530,6 +552,7 @@ mod inner_sync_task; pub(crate) mod mock; mod sync_dag_accumulator_task; mod sync_dag_block_task; +mod sync_dag_full_task; mod sync_dag_protocol_trait; mod sync_find_ancestor_task; #[cfg(test)] @@ -540,6 +563,7 @@ pub use accumulator_sync_task::{AccumulatorCollector, BlockAccumulatorSyncTask}; pub use block_sync_task::{BlockCollector, BlockSyncTask}; pub use find_ancestor_task::{AncestorCollector, FindAncestorTask}; use starcoin_executor::VMMetrics; +pub use sync_dag_full_task::sync_dag_full_task; pub fn full_sync_task( current_block_id: HashValue, diff --git a/sync/src/tasks/sync_dag_block_task.rs b/sync/src/tasks/sync_dag_block_task.rs index 4fbc5bcb03..d7de5b5db9 100644 --- a/sync/src/tasks/sync_dag_block_task.rs +++ b/sync/src/tasks/sync_dag_block_task.rs @@ -2,10 +2,6 @@ use anyhow::{Ok, Result}; use futures::{future::BoxFuture, FutureExt}; use starcoin_accumulator::{accumulator_info::AccumulatorInfo, MerkleAccumulator}; use starcoin_network_rpc_api::dag_protocol::SyncDagBlockInfo; -use starcoin_storage::{ - flexi_dag::{SyncFlexiDagSnapshot, SyncFlexiDagSnapshotStorage}, - storage::CodecKVStore, -}; use std::sync::Arc; use stream_task::{CollectorState, TaskResultCollector, TaskState}; @@ -90,7 +86,7 @@ impl SyncDagBlockCollector { impl TaskResultCollector for SyncDagBlockCollector { type Output = (); - fn collect(&mut self, mut item: SyncDagBlockInfo) -> anyhow::Result { + fn collect(&mut self, mut _item: SyncDagBlockInfo) -> anyhow::Result { Ok(CollectorState::Enough) } diff --git a/sync/src/tasks/sync_dag_full_task.rs b/sync/src/tasks/sync_dag_full_task.rs new file mode 100644 index 0000000000..3ecb4fbb43 --- /dev/null +++ b/sync/src/tasks/sync_dag_full_task.rs @@ -0,0 +1,85 @@ +use std::sync::Arc; + +use starcoin_accumulator::{ + accumulator_info::AccumulatorInfo, node::AccumulatorStoreType, AccumulatorTreeStore, + MerkleAccumulator, +}; +use starcoin_service_registry::ServiceContext; +use starcoin_storage::{flexi_dag::SyncFlexiDagSnapshotStorage, Storage, Store, SyncFlexiDagStore}; +use stream_task::{Generator, TaskEventCounterHandle, TaskGenerator}; + +use crate::{sync::SyncService, verified_rpc_client::VerifiedRpcClient}; + +use super::{ + sync_find_ancestor_task::{AncestorCollector, FindAncestorTask}, + ExtSyncTaskErrorHandle, +}; + +pub fn find_dag_ancestor_task( + local_accumulator_info: AccumulatorInfo, + target_accumulator_info: AccumulatorInfo, + fetcher: Arc, + accumulator_store: Arc, + accumulator_snapshot: Arc, +) -> anyhow::Result { + let max_retry_times = 10; // in startcoin, it is in config + let delay_milliseconds_on_error = 100; + + let event_handle = Arc::new(TaskEventCounterHandle::new()); + + let ext_error_handle = Arc::new(ExtSyncTaskErrorHandle::new(fetcher.clone())); + + let find_ancestor_task = async_std::task::spawn(async move { + // here should compare the dag's node not accumulator leaf node + let sync_task = TaskGenerator::new( + FindAncestorTask::new( + local_accumulator_info.num_leaves - 1, + target_accumulator_info.num_leaves, + fetcher, + ), + 2, + max_retry_times, + delay_milliseconds_on_error, + AncestorCollector::new( + Arc::new(MerkleAccumulator::new_with_info( + local_accumulator_info, + accumulator_store.clone(), + )), + accumulator_snapshot.clone(), + ), + event_handle.clone(), + ext_error_handle.clone(), + ) + .generate(); + let (fut, _handle) = sync_task.with_handle(); + match fut.await { + anyhow::Result::Ok(ancestor) => { + println!("receive ancestor {:?}", ancestor); + return Ok(ancestor); + } + Err(error) => { + println!("an error happened: {}", error.to_string()); + return Err(error.into()); + } + } + }); + return async_std::task::block_on(find_ancestor_task); +} + +pub fn sync_dag_full_task( + local_accumulator_info: AccumulatorInfo, + target_accumulator_info: AccumulatorInfo, + fetcher: Arc, + accumulator_store: Arc, + accumulator_snapshot: Arc, +) { + async move { + let ancestor = find_dag_ancestor_task( + local_accumulator_info, + target_accumulator_info, + fetcher, + accumulator_store, + accumulator_snapshot, + ); + }; +} diff --git a/sync/src/tasks/sync_find_ancestor_task.rs b/sync/src/tasks/sync_find_ancestor_task.rs index fbf648152e..5206c2ef0c 100644 --- a/sync/src/tasks/sync_find_ancestor_task.rs +++ b/sync/src/tasks/sync_find_ancestor_task.rs @@ -1,27 +1,28 @@ use anyhow::{format_err, Result}; use futures::{future::BoxFuture, FutureExt}; use starcoin_accumulator::{accumulator_info::AccumulatorInfo, Accumulator, MerkleAccumulator}; -use starcoin_network_rpc_api::dag_protocol::TargetDagAccumulatorLeaf; +use starcoin_network_rpc_api::dag_protocol::{self, TargetDagAccumulatorLeaf}; use starcoin_storage::{flexi_dag::SyncFlexiDagSnapshotStorage, storage::CodecKVStore}; use std::sync::Arc; use stream_task::{CollectorState, TaskResultCollector, TaskState}; -use super::sync_dag_protocol_trait::PeerSynDagAccumulator; +use crate::verified_rpc_client::VerifiedRpcClient; #[derive(Clone)] pub struct FindAncestorTask { start_leaf_number: u64, - fetcher: Arc, + fetcher: Arc, batch_size: u64, } impl FindAncestorTask { - pub(crate) fn new(current_leaf_numeber: u64, target_leaf_numeber: u64, fetcher: F) -> Self - where - F: PeerSynDagAccumulator + 'static, - { + pub(crate) fn new( + current_leaf_numeber: u64, + target_leaf_numeber: u64, + fetcher: Arc, + ) -> Self { FindAncestorTask { start_leaf_number: std::cmp::min(current_leaf_numeber, target_leaf_numeber), - fetcher: Arc::new(fetcher), + fetcher, batch_size: 3, } } @@ -32,10 +33,12 @@ impl TaskState for FindAncestorTask { fn new_sub_task(self) -> BoxFuture<'static, Result>> { async move { - let current_number = self.start_leaf_number; let target_accumulator_leaves = self .fetcher - .get_sync_dag_asccumulator_leaves(None, self.start_leaf_number, self.batch_size) + .get_dag_accumulator_leaves(dag_protocol::GetDagAccumulatorLeaves { + accumulator_leaf_index: self.start_leaf_number, + batch_size: self.batch_size, + }) .await?; Ok(target_accumulator_leaves) } diff --git a/test-helper/src/network.rs b/test-helper/src/network.rs index 2b3ea61fb4..8a67db9cdf 100644 --- a/test-helper/src/network.rs +++ b/test-helper/src/network.rs @@ -16,8 +16,8 @@ use starcoin_service_registry::{ RegistryAsyncService, RegistryService, ServiceContext, ServiceFactory, ServiceRef, }; use starcoin_storage::block_info::BlockInfoStore; -use starcoin_storage::{BlockStore, Storage, DagBlockStore}; -use starcoin_types::startup_info::{ChainInfo, ChainStatus, DagChainStatus, ChainStateInfo}; +use starcoin_storage::{BlockStore, DagBlockStore, Storage}; +use starcoin_types::startup_info::{ChainInfo, ChainStateInfo, ChainStatus, DagChainStatus}; use std::any::Any; use std::borrow::Cow; use std::sync::{Arc, Mutex}; @@ -199,7 +199,7 @@ impl ServiceFactory for MockNetworkServiceFactory { chain_info: ChainInfo::new(config.net().chain_id(), genesis_hash, chain_status.clone()), dag_status: DagChainStatus { flexi_dag_accumulator_info: storage.get_dag_accumulator_info()?, - } + }, }; let actor_service = NetworkActorService::new(config, chain_state_info, rpc, peer_message_handle.clone())?; diff --git a/types/src/startup_info.rs b/types/src/startup_info.rs index fe57b026d8..7a4db59306 100644 --- a/types/src/startup_info.rs +++ b/types/src/startup_info.rs @@ -187,7 +187,7 @@ impl Sample for ChainStatus { #[derive(Eq, PartialEq, Hash, Deserialize, Serialize, Clone, Debug)] pub struct DagChainStatus { pub flexi_dag_accumulator_info: AccumulatorInfo, -} +} impl DagChainStatus { pub fn new(flexi_dag_accumulator_info: AccumulatorInfo) -> Self { @@ -204,7 +204,8 @@ impl DagChainStatus { vec![], rand::random::(), rand::random::(), - )} + ), + } } pub fn sample() -> Self { From 543d38b55347d61249fb297d407bc304eb303fd1 Mon Sep 17 00:00:00 2001 From: jackzhhuang Date: Fri, 28 Jul 2023 17:09:07 +0800 Subject: [PATCH 25/30] add sync dag accumulator task --- chain/api/src/service.rs | 8 +- network-rpc/api/src/lib.rs | 2 +- network-rpc/src/rpc.rs | 2 +- sync/src/tasks/sync_dag_accumulator_task.rs | 18 +-- sync/src/tasks/sync_dag_full_task.rs | 115 +++++++++++++++++--- sync/src/verified_rpc_client.rs | 10 +- 6 files changed, 123 insertions(+), 32 deletions(-) diff --git a/chain/api/src/service.rs b/chain/api/src/service.rs index 1529b22e28..9f9a2ce8e9 100644 --- a/chain/api/src/service.rs +++ b/chain/api/src/service.rs @@ -147,7 +147,7 @@ pub trait ChainAsyncService: async fn get_dag_accumulator_leaves_detail( &self, req: dag_protocol::GetTargetDagAccumulatorLeafDetail, - ) -> Result>; + ) -> Result>>; } #[async_trait::async_trait] @@ -209,7 +209,7 @@ where async fn get_dag_accumulator_leaves_detail( &self, req: dag_protocol::GetTargetDagAccumulatorLeafDetail, - ) -> Result> { + ) -> Result>> { if let ChainResponse::TargetDagAccumulatorLeafDetail(details) = self .send(ChainRequest::GetTargetDagAccumulatorLeafDetail { leaf_index: req.leaf_index, @@ -217,9 +217,9 @@ where }) .await?? { - Ok(details) + Ok(Some(details)) } else { - bail!("get_dag_accumulator_leaves response type error.") + Ok(None) } } diff --git a/network-rpc/api/src/lib.rs b/network-rpc/api/src/lib.rs index 1dafb55f58..7dfb73bfe5 100644 --- a/network-rpc/api/src/lib.rs +++ b/network-rpc/api/src/lib.rs @@ -297,7 +297,7 @@ pub trait NetworkRpc: Sized + Send + Sync + 'static { &self, peer_id: PeerId, req: dag_protocol::GetTargetDagAccumulatorLeafDetail, - ) -> BoxFuture>>; + ) -> BoxFuture>>>; fn get_dag_block_info( &self, peer_id: PeerId, diff --git a/network-rpc/src/rpc.rs b/network-rpc/src/rpc.rs index 5c80f47d9d..52cd60e448 100644 --- a/network-rpc/src/rpc.rs +++ b/network-rpc/src/rpc.rs @@ -321,7 +321,7 @@ impl gen_server::NetworkRpc for NetworkRpcImpl { &self, _peer_id: PeerId, req: dag_protocol::GetTargetDagAccumulatorLeafDetail, - ) -> BoxFuture>> { + ) -> BoxFuture>>> { let chain_service = self.chain_service.clone(); let fut = async move { chain_service.get_dag_accumulator_leaves_detail(req).await }; Box::pin(fut) diff --git a/sync/src/tasks/sync_dag_accumulator_task.rs b/sync/src/tasks/sync_dag_accumulator_task.rs index c27b06132c..a304cf4819 100644 --- a/sync/src/tasks/sync_dag_accumulator_task.rs +++ b/sync/src/tasks/sync_dag_accumulator_task.rs @@ -3,7 +3,7 @@ use bcs_ext::BCSCodec; use futures::{future::BoxFuture, FutureExt}; use starcoin_accumulator::{accumulator_info::AccumulatorInfo, Accumulator, MerkleAccumulator}; use starcoin_crypto::HashValue; -use starcoin_network_rpc_api::dag_protocol::TargetDagAccumulatorLeafDetail; +use starcoin_network_rpc_api::dag_protocol::{TargetDagAccumulatorLeafDetail, self}; use starcoin_storage::{ flexi_dag::{SyncFlexiDagSnapshot, SyncFlexiDagSnapshotStorage}, storage::CodecKVStore, @@ -11,6 +11,8 @@ use starcoin_storage::{ use std::sync::Arc; use stream_task::{CollectorState, TaskResultCollector, TaskState}; +use crate::verified_rpc_client::VerifiedRpcClient; + use super::sync_dag_protocol_trait::PeerSynDagAccumulator; #[derive(Clone)] @@ -18,18 +20,15 @@ pub struct SyncDagAccumulatorTask { leaf_index: u64, batch_size: u64, target_index: u64, - fetcher: Arc, + fetcher: Arc, } impl SyncDagAccumulatorTask { - pub fn new(leaf_index: u64, batch_size: u64, target_index: u64, fetcher: F) -> Self - where - F: PeerSynDagAccumulator + 'static, - { + pub fn new(leaf_index: u64, batch_size: u64, target_index: u64, fetcher: Arc) -> Self { SyncDagAccumulatorTask { leaf_index, batch_size, target_index, - fetcher: Arc::new(fetcher), + fetcher, } } } @@ -41,7 +40,10 @@ impl TaskState for SyncDagAccumulatorTask { async move { let target_details = match self .fetcher - .get_accumulator_leaf_detail(None, self.leaf_index, self.batch_size) + .get_accumulator_leaf_detail(dag_protocol::GetTargetDagAccumulatorLeafDetail { + leaf_index: self.leaf_index, + batch_size: self.batch_size, + }) .await? { Some(details) => details, diff --git a/sync/src/tasks/sync_dag_full_task.rs b/sync/src/tasks/sync_dag_full_task.rs index 3ecb4fbb43..d0c8b6df7a 100644 --- a/sync/src/tasks/sync_dag_full_task.rs +++ b/sync/src/tasks/sync_dag_full_task.rs @@ -1,18 +1,17 @@ use std::sync::Arc; use starcoin_accumulator::{ - accumulator_info::AccumulatorInfo, node::AccumulatorStoreType, AccumulatorTreeStore, - MerkleAccumulator, + accumulator_info::AccumulatorInfo, AccumulatorTreeStore, + MerkleAccumulator, Accumulator, }; -use starcoin_service_registry::ServiceContext; -use starcoin_storage::{flexi_dag::SyncFlexiDagSnapshotStorage, Storage, Store, SyncFlexiDagStore}; +use starcoin_storage::flexi_dag::SyncFlexiDagSnapshotStorage; use stream_task::{Generator, TaskEventCounterHandle, TaskGenerator}; -use crate::{sync::SyncService, verified_rpc_client::VerifiedRpcClient}; +use crate::verified_rpc_client::VerifiedRpcClient; use super::{ sync_find_ancestor_task::{AncestorCollector, FindAncestorTask}, - ExtSyncTaskErrorHandle, + ExtSyncTaskErrorHandle, sync_dag_accumulator_task::{SyncDagAccumulatorTask, SyncDagAccumulatorCollector}, }; pub fn find_dag_ancestor_task( @@ -66,20 +65,104 @@ pub fn find_dag_ancestor_task( return async_std::task::block_on(find_ancestor_task); } +fn sync_accumulator( + local_accumulator_info: AccumulatorInfo, + target_accumulator_info: AccumulatorInfo, + fetcher: Arc, + accumulator_store: Arc, + accumulator_snapshot: Arc +) -> anyhow::Result<()> { + let max_retry_times = 10; // in startcoin, it is in config + let delay_milliseconds_on_error = 100; + + let start_index = local_accumulator_info.get_num_leaves().saturating_sub(1); + + let event_handle = Arc::new(TaskEventCounterHandle::new()); + + let ext_error_handle = Arc::new(ExtSyncTaskErrorHandle::new(fetcher.clone())); + + let sync = async_std::task::spawn(async move { + let sync_task = TaskGenerator::new( + SyncDagAccumulatorTask::new( + start_index.saturating_add(1), + 3, + target_accumulator_info.num_leaves, + fetcher.clone(), + ), + 2, + max_retry_times, + delay_milliseconds_on_error, + SyncDagAccumulatorCollector::new( + MerkleAccumulator::new_with_info(local_accumulator_info, accumulator_store.clone()), + accumulator_snapshot.clone(), + target_accumulator_info, + start_index, + ), + event_handle.clone(), + ext_error_handle, + ).generate(); + let (fut, handle) = sync_task.with_handle(); + match fut.await { + anyhow::Result::Ok((start_index, full_accumulator)) => { + println!("start index: {}, full accumulator info is {:?}", start_index, full_accumulator.get_info()); + return anyhow::Result::Ok((start_index, full_accumulator)); + } + Err(error) => { + println!("an error happened: {}", error.to_string()); + return Err(error); + } + } + + // TODO: we need to talk about this + // .and_then(|sync_accumulator_result, event_handle| { + // let sync_dag_accumulator_task = TaskGenerator::new( + // SyncDagBlockTask::new(), + // 2, + // max_retry_times, + // delay_milliseconds_on_error, + // SyncDagAccumulatorCollector::new(), + // event_handle.clone(), + // ext_error_handle, + // ); + // Ok(sync_dag_accumulator_task) + // }); + }); + // return Ok(async_std::task::block_on(sync)); + match async_std::task::block_on(sync) { + std::result::Result::Ok(result) => { + println!("sync accumulator success"); + return Ok(()); + } + Err(error) => { + println!("sync accumulator error: {}", error.to_string()); + Err(error.into()) + } + } +} + pub fn sync_dag_full_task( local_accumulator_info: AccumulatorInfo, target_accumulator_info: AccumulatorInfo, fetcher: Arc, accumulator_store: Arc, accumulator_snapshot: Arc, -) { - async move { - let ancestor = find_dag_ancestor_task( - local_accumulator_info, - target_accumulator_info, - fetcher, - accumulator_store, - accumulator_snapshot, - ); - }; +) -> anyhow::Result<()> { + + let ancestor = find_dag_ancestor_task( + local_accumulator_info, + target_accumulator_info.clone(), + fetcher.clone(), + accumulator_store.clone(), + accumulator_snapshot.clone(), + )?; + + sync_accumulator( + ancestor, + target_accumulator_info, + fetcher.clone(), + accumulator_store.clone(), + accumulator_snapshot.clone(), + )?; + + Ok(()) } diff --git a/sync/src/verified_rpc_client.rs b/sync/src/verified_rpc_client.rs index ff4fc0d5d4..7406ad770e 100644 --- a/sync/src/verified_rpc_client.rs +++ b/sync/src/verified_rpc_client.rs @@ -424,9 +424,15 @@ impl VerifiedRpcClient { pub async fn get_accumulator_leaf_detail( &self, req: dag_protocol::GetTargetDagAccumulatorLeafDetail, - ) -> Result> { + ) -> Result>> { let peer_id = self.select_a_peer()?; - self.client.get_accumulator_leaf_detail(peer_id, req).await + match self.client.get_accumulator_leaf_detail(peer_id, req).await { + Ok(result) => Ok(result), + Err(error) => { + warn!("get_accumulator_leaf_detail return None, error: {}", error.to_string()); + Ok(None) + }, + } } pub async fn get_dag_block_info( From 75445881f644d796461b45152b93277f68ec29fc Mon Sep 17 00:00:00 2001 From: jackzhhuang Date: Fri, 28 Jul 2023 17:31:35 +0800 Subject: [PATCH 26/30] add dbupgrade --- storage/src/upgrade.rs | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/storage/src/upgrade.rs b/storage/src/upgrade.rs index ac27e111f9..6d88b855fe 100644 --- a/storage/src/upgrade.rs +++ b/storage/src/upgrade.rs @@ -189,7 +189,11 @@ impl DBUpgrade { (StorageVersion::V2, StorageVersion::V3) => { Self::db_upgrade_v2_v3(instance)?; } - (StorageVersion::V3, StorageVersion::V4) => { + (StorageVersion::V3, StorageVersion::V4) | + (StorageVersion::V1, StorageVersion::V4) | + (StorageVersion::V2, StorageVersion::V4) + => { + // just for testing. todo Self::db_upgrade_v3_v4(instance)?; } _ => bail!( From 23547cda0a2c9f1847b16c84b01a178a15b861f4 Mon Sep 17 00:00:00 2001 From: jackzhhuang Date: Mon, 31 Jul 2023 11:56:27 +0800 Subject: [PATCH 27/30] merge dag form simon --- Cargo.lock | 1 + consensus/Cargo.toml | 1 + consensus/src/consensusdb/access.rs | 174 +++++++----------- consensus/src/consensusdb/cache.rs | 44 +++++ consensus/src/consensusdb/cache/mod.rs | 15 -- consensus/src/consensusdb/cache/stc_cache.rs | 45 ----- .../src/consensusdb/consensus_ghostdag.rs | 65 ++++++- consensus/src/consensusdb/consensus_header.rs | 69 +++++-- .../src/consensusdb/consensus_reachability.rs | 66 ++++++- .../src/consensusdb/consensus_relations.rs | 55 +++++- consensus/src/consensusdb/db.rs | 12 +- .../src/consensusdb/{errors.rs => error.rs} | 7 +- consensus/src/consensusdb/item.rs | 59 +++--- consensus/src/consensusdb/mod.rs | 11 +- consensus/src/consensusdb/schema.rs | 40 ++++ consensus/src/consensusdb/writer.rs | 33 ++-- consensus/src/dag/blockdag.rs | 2 +- consensus/src/dag/ghostdag/mergeset.rs | 2 +- consensus/src/dag/ghostdag/protocol.rs | 22 ++- consensus/src/dag/reachability/extensions.rs | 12 +- consensus/src/dag/reachability/inquirer.rs | 17 +- .../dag/reachability/reachability_service.rs | 4 +- consensus/src/dag/reachability/reindex.rs | 93 +++++++--- .../src/dag/reachability/relations_service.rs | 2 +- consensus/src/dag/reachability/tests.rs | 2 +- consensus/src/dag/reachability/tree.rs | 22 ++- consensus/src/dag/types/ghostdata.rs | 11 +- consensus/src/dag/types/interval.rs | 56 ++++-- consensus/src/dag/types/reachability.rs | 2 +- consensus/src/lib.rs | 1 + storage/src/batch/mod.rs | 14 +- storage/src/cache_storage/mod.rs | 93 +++++----- storage/src/upgrade.rs | 1 + types/src/header.rs | 6 +- types/src/startup_info.rs | 1 - 35 files changed, 669 insertions(+), 391 deletions(-) create mode 100644 consensus/src/consensusdb/cache.rs delete mode 100644 consensus/src/consensusdb/cache/mod.rs delete mode 100644 consensus/src/consensusdb/cache/stc_cache.rs rename consensus/src/consensusdb/{errors.rs => error.rs} (91%) create mode 100644 consensus/src/consensusdb/schema.rs diff --git a/Cargo.lock b/Cargo.lock index 1bbf21a1fb..480a86667f 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -9400,6 +9400,7 @@ name = "starcoin-consensus" version = "1.13.5" dependencies = [ "anyhow", + "bcs-ext", "bincode", "byteorder", "cryptonight-rs", diff --git a/consensus/Cargo.toml b/consensus/Cargo.toml index 4bbd48dd3e..bcc9d32604 100644 --- a/consensus/Cargo.toml +++ b/consensus/Cargo.toml @@ -26,6 +26,7 @@ starcoin-storage = { workspace = true } parking_lot = { workspace = true } itertools = { workspace = true } starcoin-config = { workspace = true } +bcs-ext = { workspace = true } [dev-dependencies] proptest = { workspace = true } diff --git a/consensus/src/consensusdb/access.rs b/consensus/src/consensusdb/access.rs index 999da966ca..e46e85acfe 100644 --- a/consensus/src/consensusdb/access.rs +++ b/consensus/src/consensusdb/access.rs @@ -1,9 +1,9 @@ -use super::{cache::DagCache, db::DBStorage, errors::StoreError}; +use super::{cache::DagCache, db::DBStorage, error::StoreError}; -use super::prelude::{Cache, DbWriter}; +use super::prelude::DbWriter; +use super::schema::{KeyCodec, Schema, ValueCodec}; use itertools::Itertools; use rocksdb::{Direction, IteratorMode, ReadOptions}; -use serde::{de::DeserializeOwned, Serialize}; use starcoin_storage::storage::RawDBStorage; use std::{ collections::hash_map::RandomState, error::Error, hash::BuildHasher, marker::PhantomData, @@ -12,94 +12,72 @@ use std::{ /// A concurrent DB store access with typed caching. #[derive(Clone)] -pub struct CachedDbAccess -where - TKey: Clone + std::hash::Hash + Eq + Send + Sync + AsRef<[u8]>, - TData: Clone + Send + Sync + DeserializeOwned, -{ +pub struct CachedDbAccess { db: Arc, // Cache - cache: Cache, + cache: DagCache, - // DB bucket/path - prefix: &'static str, - - _phantom: PhantomData<(TData, S)>, + _phantom: PhantomData, } -impl CachedDbAccess +impl CachedDbAccess where - TKey: Clone + std::hash::Hash + Eq + Send + Sync + AsRef<[u8]>, - TData: Clone + Send + Sync + DeserializeOwned, - S: BuildHasher + Default, + R: BuildHasher + Default, { - pub fn new(db: Arc, cache_size: u64, prefix: &'static str) -> Self { + pub fn new(db: Arc, cache_size: u64) -> Self { Self { db, - cache: Cache::new_with_capacity(cache_size), - prefix, + cache: DagCache::new_with_capacity(cache_size), _phantom: Default::default(), } } - pub fn read_from_cache(&self, key: TKey) -> Result, StoreError> - where - TKey: Copy + AsRef<[u8]>, - { - self.cache - .get(&key) - .map(|b| bincode::deserialize(&b).map_err(StoreError::DeserializationError)) - .transpose() + pub fn read_from_cache(&self, key: S::Key) -> Option { + self.cache.get(&key) } - pub fn has(&self, key: TKey) -> Result - where - TKey: Clone + AsRef<[u8]>, - { + pub fn has(&self, key: S::Key) -> Result { Ok(self.cache.contains_key(&key) || self .db - .raw_get_pinned_cf(self.prefix, key) - .map_err(|_| StoreError::CFNotExist(self.prefix.to_string()))? + .raw_get_pinned_cf(S::COLUMN_FAMILY, key.encode_key().unwrap()) + .map_err(|_| StoreError::CFNotExist(S::COLUMN_FAMILY.to_string()))? .is_some()) } - pub fn read(&self, key: TKey) -> Result - where - TKey: Clone + AsRef<[u8]> + ToString, - TData: DeserializeOwned, // We need `DeserializeOwned` since the slice coming from `db.get_pinned_cf` has short lifetime - { + pub fn read(&self, key: S::Key) -> Result { if let Some(data) = self.cache.get(&key) { - let data = bincode::deserialize(&data)?; Ok(data) } else if let Some(slice) = self .db - .raw_get_pinned_cf(self.prefix, &key) - .map_err(|_| StoreError::CFNotExist(self.prefix.to_string()))? + .raw_get_pinned_cf(S::COLUMN_FAMILY, key.encode_key().unwrap()) + .map_err(|_| StoreError::CFNotExist(S::COLUMN_FAMILY.to_string()))? { - let data: TData = bincode::deserialize(&slice)?; - self.cache.insert(key, slice.to_vec()); + let data = S::Value::decode_value(slice.as_ref()) + .map_err(|o| StoreError::DecodeError(o.to_string()))?; + self.cache.insert(key, data.clone()); Ok(data) } else { - Err(StoreError::KeyNotFound(key.to_string())) + Err(StoreError::KeyNotFound("".to_string())) } } pub fn iterator( &self, - ) -> Result, TData), Box>> + '_, StoreError> - where - TKey: Clone + AsRef<[u8]>, - TData: DeserializeOwned, // We need `DeserializeOwned` since the slice coming from `db.get_pinned_cf` has short lifetime + ) -> Result, S::Value), Box>> + '_, StoreError> { let db_iterator = self .db - .raw_iterator_cf_opt(self.prefix, IteratorMode::Start, ReadOptions::default()) + .raw_iterator_cf_opt( + S::COLUMN_FAMILY, + IteratorMode::Start, + ReadOptions::default(), + ) .map_err(|e| StoreError::CFNotExist(e.to_string()))?; Ok(db_iterator.map(|iter_result| match iter_result { - Ok((key, data_bytes)) => match bincode::deserialize(&data_bytes) { + Ok((key, data_bytes)) => match S::Value::decode_value(&data_bytes) { Ok(data) => Ok((key, data)), Err(e) => Err(e.into()), }, @@ -107,30 +85,25 @@ where })) } - pub fn write(&self, mut writer: impl DbWriter, key: TKey, data: TData) -> Result<(), StoreError> - where - TKey: Clone + AsRef<[u8]>, - TData: Serialize, - { - let bin_data = bincode::serialize(&data)?; - self.cache.insert(key.clone(), bin_data.clone()); - writer.put(self.prefix, key.as_ref(), bin_data)?; + pub fn write( + &self, + mut writer: impl DbWriter, + key: S::Key, + data: S::Value, + ) -> Result<(), StoreError> { + writer.put::(&key, &data)?; + self.cache.insert(key, data); Ok(()) } pub fn write_many( &self, mut writer: impl DbWriter, - iter: &mut (impl Iterator + Clone), - ) -> Result<(), StoreError> - where - TKey: Clone + AsRef<[u8]>, - TData: Serialize, - { + iter: &mut (impl Iterator + Clone), + ) -> Result<(), StoreError> { for (key, data) in iter { - let bin_data = bincode::serialize(&data)?; - self.cache.insert(key.clone(), bin_data.clone()); - writer.put(self.prefix, key.as_ref(), bin_data)?; + writer.put::(&key, &data)?; + self.cache.insert(key, data); } Ok(()) } @@ -139,54 +112,44 @@ where pub fn write_many_without_cache( &self, mut writer: impl DbWriter, - iter: &mut impl Iterator, - ) -> Result<(), StoreError> - where - TKey: Clone + AsRef<[u8]>, - TData: Serialize, - { + iter: &mut impl Iterator, + ) -> Result<(), StoreError> { for (key, data) in iter { - let bin_data = bincode::serialize(&data)?; - writer.put(self.prefix, key.as_ref(), bin_data)?; + writer.put::(&key, &data)?; } // The cache must be cleared in order to avoid invalidated entries self.cache.remove_all(); Ok(()) } - pub fn delete(&self, mut writer: impl DbWriter, key: TKey) -> Result<(), StoreError> - where - TKey: Clone + AsRef<[u8]>, - { + pub fn delete(&self, mut writer: impl DbWriter, key: S::Key) -> Result<(), StoreError> { self.cache.remove(&key); - writer.delete(self.prefix, key.as_ref())?; + writer.delete::(&key)?; Ok(()) } pub fn delete_many( &self, mut writer: impl DbWriter, - key_iter: &mut (impl Iterator + Clone), - ) -> Result<(), StoreError> - where - TKey: Clone + AsRef<[u8]>, - { + key_iter: &mut (impl Iterator + Clone), + ) -> Result<(), StoreError> { let key_iter_clone = key_iter.clone(); self.cache.remove_many(key_iter); for key in key_iter_clone { - writer.delete(self.prefix, key.as_ref())?; + writer.delete::(&key)?; } Ok(()) } - pub fn delete_all(&self, mut writer: impl DbWriter) -> Result<(), StoreError> - where - TKey: Clone + AsRef<[u8]>, - { + pub fn delete_all(&self, mut writer: impl DbWriter) -> Result<(), StoreError> { self.cache.remove_all(); let keys = self .db - .raw_iterator_cf_opt(self.prefix, IteratorMode::Start, ReadOptions::default()) + .raw_iterator_cf_opt( + S::COLUMN_FAMILY, + IteratorMode::Start, + ReadOptions::default(), + ) .map_err(|e| StoreError::CFNotExist(e.to_string()))? .map(|iter_result| match iter_result { Ok((key, _)) => Ok::<_, rocksdb::Error>(key), @@ -194,7 +157,7 @@ where }) .collect_vec(); for key in keys { - writer.delete(self.prefix, key?.as_ref())?; + writer.delete::(&S::Key::decode_key(&key?)?)?; } Ok(()) } @@ -203,24 +166,21 @@ where //TODO: loop and chain iterators for multi-prefix iterator. pub fn seek_iterator( &self, - seek_from: Option, // iter whole range if None - limit: usize, // amount to take. + seek_from: Option, // iter whole range if None + limit: usize, // amount to take. skip_first: bool, // skips the first value, (useful in conjunction with the seek-key, as to not re-retrieve). - ) -> Result, TData), Box>> + '_, StoreError> - where - TKey: Clone + AsRef<[u8]>, - TData: DeserializeOwned, + ) -> Result, S::Value), Box>> + '_, StoreError> { let read_opts = ReadOptions::default(); let mut db_iterator = match seek_from { Some(seek_key) => self.db.raw_iterator_cf_opt( - self.prefix, - IteratorMode::From(seek_key.as_ref(), Direction::Forward), + S::COLUMN_FAMILY, + IteratorMode::From(seek_key.encode_key()?.as_slice(), Direction::Forward), read_opts, ), None => self .db - .raw_iterator_cf_opt(self.prefix, IteratorMode::Start, read_opts), + .raw_iterator_cf_opt(S::COLUMN_FAMILY, IteratorMode::Start, read_opts), } .map_err(|e| StoreError::CFNotExist(e.to_string()))?; @@ -229,12 +189,10 @@ where } Ok(db_iterator.take(limit).map(move |item| match item { - Ok((key_bytes, value_bytes)) => { - match bincode::deserialize::(value_bytes.as_ref()) { - Ok(value) => Ok((key_bytes, value)), - Err(err) => Err(err.into()), - } - } + Ok((key_bytes, value_bytes)) => match S::Value::decode_value(value_bytes.as_ref()) { + Ok(value) => Ok((key_bytes, value)), + Err(err) => Err(err.into()), + }, Err(err) => Err(err.into()), })) } diff --git a/consensus/src/consensusdb/cache.rs b/consensus/src/consensusdb/cache.rs new file mode 100644 index 0000000000..e2d5de0c3c --- /dev/null +++ b/consensus/src/consensusdb/cache.rs @@ -0,0 +1,44 @@ +use core::hash::Hash; +use starcoin_storage::cache_storage::GCacheStorage; +use std::sync::Arc; + +#[derive(Clone)] +pub struct DagCache { + cache: Arc>, +} + +impl DagCache +where + K: Hash + Eq + Default, + V: Default + Clone, +{ + pub(crate) fn new_with_capacity(size: u64) -> Self { + Self { + cache: Arc::new(GCacheStorage::new_with_capacity(size as usize, None)), + } + } + + pub(crate) fn get(&self, key: &K) -> Option { + self.cache.get_inner(key) + } + + pub(crate) fn contains_key(&self, key: &K) -> bool { + self.get(key).is_some() + } + + pub(crate) fn insert(&self, key: K, data: V) { + self.cache.put_inner(key, data); + } + + pub(crate) fn remove(&self, key: &K) { + self.cache.remove_inner(key); + } + + pub(crate) fn remove_many(&self, key_iter: &mut impl Iterator) { + key_iter.for_each(|k| self.remove(&k)); + } + + pub(crate) fn remove_all(&self) { + self.cache.remove_all(); + } +} diff --git a/consensus/src/consensusdb/cache/mod.rs b/consensus/src/consensusdb/cache/mod.rs deleted file mode 100644 index ca7bcaa830..0000000000 --- a/consensus/src/consensusdb/cache/mod.rs +++ /dev/null @@ -1,15 +0,0 @@ -mod stc_cache; -pub use stc_cache::*; - -pub trait DagCache { - type TKey: Clone + std::hash::Hash + Eq + Send + Sync + AsRef<[u8]>; - type TData: Clone + Send + Sync + AsRef<[u8]>; - - fn new_with_capacity(size: u64) -> Self; - fn get(&self, key: &Self::TKey) -> Option; - fn contains_key(&self, key: &Self::TKey) -> bool; - fn insert(&self, key: Self::TKey, data: Self::TData); - fn remove(&self, key: &Self::TKey); - fn remove_many(&self, key_iter: &mut impl Iterator); - fn remove_all(&self); -} diff --git a/consensus/src/consensusdb/cache/stc_cache.rs b/consensus/src/consensusdb/cache/stc_cache.rs deleted file mode 100644 index 45b99dd550..0000000000 --- a/consensus/src/consensusdb/cache/stc_cache.rs +++ /dev/null @@ -1,45 +0,0 @@ -use super::DagCache; -use starcoin_storage::cache_storage::CacheStorage; -use std::{marker::PhantomData, sync::Arc}; - -#[derive(Clone)] -pub struct Cache { - cache: Arc, - _phantom: PhantomData, -} - -impl> DagCache for Cache { - type TKey = TKey; - type TData = Vec; - - fn new_with_capacity(size: u64) -> Self { - Self { - cache: Arc::new(CacheStorage::new_with_capacity(size as usize, None)), - _phantom: Default::default(), - } - } - - fn get(&self, key: &Self::TKey) -> Option { - self.cache.get_inner(None, key.as_ref().to_vec()) - } - - fn contains_key(&self, key: &Self::TKey) -> bool { - self.get(key).is_some() - } - - fn insert(&self, key: Self::TKey, data: Self::TData) { - self.cache.put_inner(None, key.as_ref().to_vec(), data); - } - - fn remove(&self, key: &Self::TKey) { - self.cache.remove_inner(None, key.as_ref().to_vec()); - } - - fn remove_many(&self, key_iter: &mut impl Iterator) { - key_iter.for_each(|k| self.remove(&k)); - } - - fn remove_all(&self) { - self.cache.remove_all(); - } -} diff --git a/consensus/src/consensusdb/consensus_ghostdag.rs b/consensus/src/consensusdb/consensus_ghostdag.rs index c66caaace0..a6746d9eb5 100644 --- a/consensus/src/consensusdb/consensus_ghostdag.rs +++ b/consensus/src/consensusdb/consensus_ghostdag.rs @@ -1,9 +1,15 @@ +use super::schema::{KeyCodec, ValueCodec}; use super::{ db::DBStorage, - errors::StoreError, + error::StoreError, prelude::{CachedDbAccess, DirectDbWriter}, writer::BatchDbWriter, }; +use crate::define_schema; +use starcoin_types::blockhash::{ + BlockHashMap, BlockHashes, BlockLevel, BlueWorkType, HashKTypeMap, +}; + use crate::dag::types::{ ghostdata::{CompactGhostdagData, GhostdagData}, ordering::SortableBlock, @@ -14,9 +20,6 @@ use itertools::{ }; use rocksdb::WriteBatch; use starcoin_crypto::HashValue as Hash; -use starcoin_types::blockhash::{ - BlockHashMap, BlockHashes, BlockLevel, BlueWorkType, HashKTypeMap, -}; use std::{cell::RefCell, cmp, iter::once, sync::Arc}; pub trait GhostdagStoreReader { @@ -149,13 +152,59 @@ impl GhostDagDataWrapper { pub(crate) const GHOST_DAG_STORE_CF: &str = "block-ghostdag-data"; pub(crate) const COMPACT_GHOST_DAG_STORE_CF: &str = "compact-block-ghostdag-data"; +define_schema!(GhostDag, Hash, Arc, GHOST_DAG_STORE_CF); +define_schema!( + CompactGhostDag, + Hash, + CompactGhostdagData, + COMPACT_GHOST_DAG_STORE_CF +); + +impl KeyCodec for Hash { + fn encode_key(&self) -> Result, StoreError> { + Ok(self.to_vec()) + } + + fn decode_key(data: &[u8]) -> Result { + Hash::from_slice(data).map_err(|e| StoreError::DecodeError(e.to_string())) + } +} +impl ValueCodec for Arc { + fn encode_value(&self) -> Result, StoreError> { + bcs_ext::to_bytes(&self).map_err(|e| StoreError::EncodeError(e.to_string())) + } + + fn decode_value(data: &[u8]) -> Result { + bcs_ext::from_bytes(data).map_err(|e| StoreError::DecodeError(e.to_string())) + } +} + +impl KeyCodec for Hash { + fn encode_key(&self) -> Result, StoreError> { + Ok(self.to_vec()) + } + + fn decode_key(data: &[u8]) -> Result { + Hash::from_slice(data).map_err(|e| StoreError::DecodeError(e.to_string())) + } +} +impl ValueCodec for CompactGhostdagData { + fn encode_value(&self) -> Result, StoreError> { + bcs_ext::to_bytes(&self).map_err(|e| StoreError::EncodeError(e.to_string())) + } + + fn decode_value(data: &[u8]) -> Result { + bcs_ext::from_bytes(data).map_err(|e| StoreError::DecodeError(e.to_string())) + } +} + /// A DB + cache implementation of `GhostdagStore` trait, with concurrency support. #[derive(Clone)] pub struct DbGhostdagStore { db: Arc, level: BlockLevel, - access: CachedDbAccess>, - compact_access: CachedDbAccess, + access: CachedDbAccess, + compact_access: CachedDbAccess, } impl DbGhostdagStore { @@ -163,8 +212,8 @@ impl DbGhostdagStore { Self { db: Arc::clone(&db), level, - access: CachedDbAccess::new(db.clone(), cache_size, GHOST_DAG_STORE_CF), - compact_access: CachedDbAccess::new(db, cache_size, COMPACT_GHOST_DAG_STORE_CF), + access: CachedDbAccess::new(db.clone(), cache_size), + compact_access: CachedDbAccess::new(db, cache_size), } } diff --git a/consensus/src/consensusdb/consensus_header.rs b/consensus/src/consensusdb/consensus_header.rs index 97c3d49a98..3c512afa00 100644 --- a/consensus/src/consensusdb/consensus_header.rs +++ b/consensus/src/consensusdb/consensus_header.rs @@ -1,15 +1,17 @@ +use super::schema::{KeyCodec, ValueCodec}; use super::{ db::DBStorage, - errors::{StoreError, StoreResult}, + error::{StoreError, StoreResult}, prelude::CachedDbAccess, writer::{BatchDbWriter, DirectDbWriter}, }; +use crate::define_schema; use rocksdb::WriteBatch; use starcoin_crypto::HashValue as Hash; -use starcoin_types::U256; use starcoin_types::{ blockhash::BlockLevel, header::{CompactHeaderData, ConsensusHeader, Header, HeaderWithBlockLevel}, + U256, }; use std::sync::Arc; @@ -36,24 +38,65 @@ pub trait HeaderStore: HeaderStoreReader { pub(crate) const HEADERS_STORE_CF: &str = "headers-store"; pub(crate) const COMPACT_HEADER_DATA_STORE_CF: &str = "compact-header-data"; +define_schema!(BlockHeader, Hash, HeaderWithBlockLevel, HEADERS_STORE_CF); +define_schema!( + CompactBlockHeader, + Hash, + CompactHeaderData, + COMPACT_HEADER_DATA_STORE_CF +); + +impl KeyCodec for Hash { + fn encode_key(&self) -> Result, StoreError> { + Ok(self.to_vec()) + } + + fn decode_key(data: &[u8]) -> Result { + Hash::from_slice(data).map_err(|e| StoreError::DecodeError(e.to_string())) + } +} +impl ValueCodec for HeaderWithBlockLevel { + fn encode_value(&self) -> Result, StoreError> { + bcs_ext::to_bytes(&self).map_err(|e| StoreError::EncodeError(e.to_string())) + } + + fn decode_value(data: &[u8]) -> Result { + bcs_ext::from_bytes(data).map_err(|e| StoreError::DecodeError(e.to_string())) + } +} +impl KeyCodec for Hash { + fn encode_key(&self) -> Result, StoreError> { + Ok(self.to_vec()) + } + + fn decode_key(data: &[u8]) -> Result { + Hash::from_slice(data).map_err(|e| StoreError::DecodeError(e.to_string())) + } +} +impl ValueCodec for CompactHeaderData { + fn encode_value(&self) -> Result, StoreError> { + bcs_ext::to_bytes(&self).map_err(|e| StoreError::EncodeError(e.to_string())) + } + + fn decode_value(data: &[u8]) -> Result { + bcs_ext::from_bytes(data).map_err(|e| StoreError::DecodeError(e.to_string())) + } +} + /// A DB + cache implementation of `HeaderStore` trait, with concurrency support. #[derive(Clone)] pub struct DbHeadersStore { db: Arc, - compact_headers_access: CachedDbAccess, - headers_access: CachedDbAccess, + headers_access: CachedDbAccess, + compact_headers_access: CachedDbAccess, } impl DbHeadersStore { pub fn new(db: Arc, cache_size: u64) -> Self { Self { db: Arc::clone(&db), - compact_headers_access: CachedDbAccess::new( - Arc::clone(&db), - cache_size, - COMPACT_HEADER_DATA_STORE_CF, - ), - headers_access: CachedDbAccess::new(db, cache_size, HEADERS_STORE_CF), + headers_access: CachedDbAccess::new(db.clone(), cache_size), + compact_headers_access: CachedDbAccess::new(db, cache_size), } } @@ -110,14 +153,14 @@ impl HeaderStoreReader for DbHeadersStore { } fn get_timestamp(&self, hash: Hash) -> Result { - if let Some(header_with_block_level) = self.headers_access.read_from_cache(hash)? { + if let Some(header_with_block_level) = self.headers_access.read_from_cache(hash) { return Ok(header_with_block_level.header.timestamp()); } Ok(self.compact_headers_access.read(hash)?.timestamp) } fn get_difficulty(&self, hash: Hash) -> Result { - if let Some(header_with_block_level) = self.headers_access.read_from_cache(hash)? { + if let Some(header_with_block_level) = self.headers_access.read_from_cache(hash) { return Ok(header_with_block_level.header.difficulty()); } Ok(self.compact_headers_access.read(hash)?.difficulty) @@ -132,7 +175,7 @@ impl HeaderStoreReader for DbHeadersStore { } fn get_compact_header_data(&self, hash: Hash) -> Result { - if let Some(header_with_block_level) = self.headers_access.read_from_cache(hash)? { + if let Some(header_with_block_level) = self.headers_access.read_from_cache(hash) { return Ok(CompactHeaderData { timestamp: header_with_block_level.header.timestamp(), difficulty: header_with_block_level.header.difficulty(), diff --git a/consensus/src/consensusdb/consensus_reachability.rs b/consensus/src/consensusdb/consensus_reachability.rs index 551606c11f..308ffb88a8 100644 --- a/consensus/src/consensusdb/consensus_reachability.rs +++ b/consensus/src/consensusdb/consensus_reachability.rs @@ -5,10 +5,15 @@ use super::{ use starcoin_crypto::HashValue as Hash; use starcoin_storage::storage::RawDBStorage; -use crate::dag::types::{interval::Interval, reachability::ReachabilityData}; +use crate::{ + dag::types::{interval::Interval, reachability::ReachabilityData}, + define_schema, + schema::{KeyCodec, ValueCodec}, +}; +use starcoin_types::blockhash::{self, BlockHashMap, BlockHashes}; + use parking_lot::{RwLockUpgradableReadGuard, RwLockWriteGuard}; use rocksdb::WriteBatch; -use starcoin_types::blockhash::{self, BlockHashMap, BlockHashes}; use std::{collections::hash_map::Entry::Vacant, sync::Arc}; /// Reader API for `ReachabilityStore`. @@ -48,12 +53,57 @@ const REINDEX_ROOT_KEY: &str = "reachability-reindex-root"; pub(crate) const REACHABILITY_DATA_CF: &str = "reachability-data"; // TODO: explore perf to see if using fixed-length constants for store prefixes is preferable +define_schema!( + Reachability, + Hash, + Arc, + REACHABILITY_DATA_CF +); +define_schema!(ReachabilityCache, Vec, Hash, REACHABILITY_DATA_CF); + +impl KeyCodec for Hash { + fn encode_key(&self) -> Result, StoreError> { + Ok(self.to_vec()) + } + + fn decode_key(data: &[u8]) -> Result { + Hash::from_slice(data).map_err(|e| StoreError::DecodeError(e.to_string())) + } +} +impl ValueCodec for Arc { + fn encode_value(&self) -> Result, StoreError> { + bcs_ext::to_bytes(&self).map_err(|e| StoreError::EncodeError(e.to_string())) + } + + fn decode_value(data: &[u8]) -> Result { + bcs_ext::from_bytes(data).map_err(|e| StoreError::DecodeError(e.to_string())) + } +} +impl KeyCodec for Vec { + fn encode_key(&self) -> Result, StoreError> { + Ok(self.to_vec()) + } + + fn decode_key(data: &[u8]) -> Result { + Ok(data.to_vec()) + } +} +impl ValueCodec for Hash { + fn encode_value(&self) -> Result, StoreError> { + Ok(self.to_vec()) + } + + fn decode_value(data: &[u8]) -> Result { + Hash::from_slice(data).map_err(|e| StoreError::DecodeError(e.to_string())) + } +} + /// A DB + cache implementation of `ReachabilityStore` trait, with concurrent readers support. #[derive(Clone)] pub struct DbReachabilityStore { db: Arc, - access: CachedDbAccess>, - reindex_root: CachedDbItem, + access: CachedDbAccess, + reindex_root: CachedDbItem, } impl DbReachabilityStore { @@ -68,12 +118,8 @@ impl DbReachabilityStore { fn new_with_prefix_end(db: Arc, cache_size: u64) -> Self { Self { db: Arc::clone(&db), - access: CachedDbAccess::new(Arc::clone(&db), cache_size, REACHABILITY_DATA_CF), - reindex_root: CachedDbItem::new( - db, - REACHABILITY_DATA_CF, - REINDEX_ROOT_KEY.as_bytes().to_vec(), - ), + access: CachedDbAccess::new(Arc::clone(&db), cache_size), + reindex_root: CachedDbItem::new(db, REINDEX_ROOT_KEY.as_bytes().to_vec()), } } diff --git a/consensus/src/consensusdb/consensus_relations.rs b/consensus/src/consensusdb/consensus_relations.rs index ec1456ccf4..a34c1c049c 100644 --- a/consensus/src/consensusdb/consensus_relations.rs +++ b/consensus/src/consensusdb/consensus_relations.rs @@ -1,7 +1,9 @@ +use super::schema::{KeyCodec, ValueCodec}; use super::{ db::DBStorage, prelude::{BatchDbWriter, CachedDbAccess, DirectDbWriter, StoreError}, }; +use crate::define_schema; use rocksdb::WriteBatch; use starcoin_crypto::HashValue as Hash; use starcoin_types::blockhash::{BlockHashMap, BlockHashes, BlockLevel}; @@ -25,13 +27,54 @@ pub trait RelationsStore: RelationsStoreReader { pub(crate) const PARENTS_CF: &str = "block-parents"; pub(crate) const CHILDREN_CF: &str = "block-children"; +define_schema!(RelationParent, Hash, Arc>, PARENTS_CF); +define_schema!(RelationChildren, Hash, Arc>, CHILDREN_CF); + +impl KeyCodec for Hash { + fn encode_key(&self) -> Result, StoreError> { + Ok(self.to_vec()) + } + + fn decode_key(data: &[u8]) -> Result { + Hash::from_slice(data).map_err(|e| StoreError::DecodeError(e.to_string())) + } +} +impl ValueCodec for Arc> { + fn encode_value(&self) -> Result, StoreError> { + bcs_ext::to_bytes(self).map_err(|e| StoreError::EncodeError(e.to_string())) + } + + fn decode_value(data: &[u8]) -> Result { + bcs_ext::from_bytes(data).map_err(|e| StoreError::DecodeError(e.to_string())) + } +} +impl KeyCodec for Hash { + fn encode_key(&self) -> Result, StoreError> { + Ok(self.to_vec()) + } + + fn decode_key(data: &[u8]) -> Result { + Hash::from_slice(data).map_err(|e| StoreError::DecodeError(e.to_string())) + } +} + +impl ValueCodec for Arc> { + fn encode_value(&self) -> Result, StoreError> { + bcs_ext::to_bytes(self).map_err(|e| StoreError::EncodeError(e.to_string())) + } + + fn decode_value(data: &[u8]) -> Result { + bcs_ext::from_bytes(data).map_err(|e| StoreError::DecodeError(e.to_string())) + } +} + /// A DB + cache implementation of `RelationsStore` trait, with concurrent readers support. #[derive(Clone)] pub struct DbRelationsStore { db: Arc, level: BlockLevel, - parents_access: CachedDbAccess>>, - children_access: CachedDbAccess>>, + parents_access: CachedDbAccess, + children_access: CachedDbAccess, } impl DbRelationsStore { @@ -39,8 +82,8 @@ impl DbRelationsStore { Self { db: Arc::clone(&db), level, - parents_access: CachedDbAccess::new(Arc::clone(&db), cache_size, PARENTS_CF), - children_access: CachedDbAccess::new(db, cache_size, CHILDREN_CF), + parents_access: CachedDbAccess::new(Arc::clone(&db), cache_size), + children_access: CachedDbAccess::new(db, cache_size), } } @@ -203,8 +246,8 @@ impl RelationsStore for MemoryRelationsStore { mod tests { use super::*; use crate::consensusdb::{ - db::{FlexiDagStorageConfig, RelationsStoreConfig}, - prelude::FlexiDagStorage, + db::RelationsStoreConfig, + prelude::{FlexiDagStorage, FlexiDagStorageConfig}, }; #[test] diff --git a/consensus/src/consensusdb/db.rs b/consensus/src/consensusdb/db.rs index be6fc35cac..331df80277 100644 --- a/consensus/src/consensusdb/db.rs +++ b/consensus/src/consensusdb/db.rs @@ -1,8 +1,10 @@ -use super::errors::StoreError; -use super::schema::{ - DbGhostdagStore, DbHeadersStore, DbReachabilityStore, DbRelationsStore, CHILDREN_CF, - COMPACT_GHOST_DAG_STORE_CF, COMPACT_HEADER_DATA_STORE_CF, GHOST_DAG_STORE_CF, HEADERS_STORE_CF, - PARENTS_CF, REACHABILITY_DATA_CF, +use super::{ + error::StoreError, + schemadb::{ + DbGhostdagStore, DbHeadersStore, DbReachabilityStore, DbRelationsStore, CHILDREN_CF, + COMPACT_GHOST_DAG_STORE_CF, COMPACT_HEADER_DATA_STORE_CF, GHOST_DAG_STORE_CF, + HEADERS_STORE_CF, PARENTS_CF, REACHABILITY_DATA_CF, + }, }; use starcoin_config::RocksdbConfig; pub(crate) use starcoin_storage::db_storage::DBStorage; diff --git a/consensus/src/consensusdb/errors.rs b/consensus/src/consensusdb/error.rs similarity index 91% rename from consensus/src/consensusdb/errors.rs rename to consensus/src/consensusdb/error.rs index 1ca1683317..ff2c199c93 100644 --- a/consensus/src/consensusdb/errors.rs +++ b/consensus/src/consensusdb/error.rs @@ -17,8 +17,11 @@ pub enum StoreError { #[error("rocksdb error {0}")] DbError(#[from] rocksdb::Error), - #[error("bincode error {0}")] - DeserializationError(#[from] Box), + #[error("encode error {0}")] + EncodeError(String), + + #[error("decode error {0}")] + DecodeError(String), #[error("ghostdag {0} duplicate blocks")] DAGDupBlocksError(String), diff --git a/consensus/src/consensusdb/item.rs b/consensus/src/consensusdb/item.rs index 14cff7469b..0d27b9c347 100644 --- a/consensus/src/consensusdb/item.rs +++ b/consensus/src/consensusdb/item.rs @@ -1,96 +1,81 @@ -use super::{db::DBStorage, errors::StoreError}; - use super::prelude::DbWriter; +use super::schema::{KeyCodec, Schema, ValueCodec}; +use super::{db::DBStorage, error::StoreError}; use parking_lot::RwLock; -use serde::{de::DeserializeOwned, Serialize}; use starcoin_storage::storage::RawDBStorage; use std::sync::Arc; /// A cached DB item with concurrency support #[derive(Clone)] -pub struct CachedDbItem { +pub struct CachedDbItem { db: Arc, - key: Vec, - prefix: &'static str, - cached_item: Arc>>, + key: S::Key, + cached_item: Arc>>, } -impl CachedDbItem { - pub fn new(db: Arc, prefix: &'static str, key: Vec) -> Self { +impl CachedDbItem { + pub fn new(db: Arc, key: S::Key) -> Self { Self { db, key, - prefix, cached_item: Arc::new(RwLock::new(None)), } } - pub fn read(&self) -> Result - where - T: Clone + DeserializeOwned, - { + pub fn read(&self) -> Result { if let Some(item) = self.cached_item.read().clone() { return Ok(item); } if let Some(slice) = self .db - .raw_get_pinned_cf(self.prefix, &self.key) - .map_err(|_| StoreError::CFNotExist(self.prefix.to_string()))? + .raw_get_pinned_cf(S::COLUMN_FAMILY, &self.key.encode_key()?) + .map_err(|_| StoreError::CFNotExist(S::COLUMN_FAMILY.to_string()))? { - let item: T = bincode::deserialize(&slice)?; + let item = S::Value::decode_value(&slice)?; *self.cached_item.write() = Some(item.clone()); Ok(item) } else { Err(StoreError::KeyNotFound( - String::from_utf8(self.key.clone()) + String::from_utf8(self.key.encode_key()?) .unwrap_or(("unrecoverable key string").to_string()), )) } } - pub fn write(&mut self, mut writer: impl DbWriter, item: &T) -> Result<(), StoreError> - where - T: Clone + Serialize, - { + pub fn write(&mut self, mut writer: impl DbWriter, item: &S::Value) -> Result<(), StoreError> { *self.cached_item.write() = Some(item.clone()); - let bin_data = bincode::serialize(item)?; - writer.put(self.prefix, &self.key, bin_data)?; + writer.put::(&self.key, item)?; Ok(()) } pub fn remove(&mut self, mut writer: impl DbWriter) -> Result<(), StoreError> where { *self.cached_item.write() = None; - writer.delete(self.prefix, &self.key)?; + writer.delete::(&self.key)?; Ok(()) } - pub fn update(&mut self, mut writer: impl DbWriter, op: F) -> Result + pub fn update(&mut self, mut writer: impl DbWriter, op: F) -> Result where - T: Clone + Serialize + DeserializeOwned, - F: Fn(T) -> T, + F: Fn(S::Value) -> S::Value, { let mut guard = self.cached_item.write(); let mut item = if let Some(item) = guard.take() { item } else if let Some(slice) = self .db - .raw_get_pinned_cf(self.prefix, &self.key) - .map_err(|_| StoreError::CFNotExist(self.prefix.to_string()))? + .raw_get_pinned_cf(S::COLUMN_FAMILY, &self.key.encode_key()?) + .map_err(|_| StoreError::CFNotExist(S::COLUMN_FAMILY.to_string()))? { - let item: T = bincode::deserialize(&slice)?; + let item = S::Value::decode_value(&slice)?; item } else { - return Err(StoreError::KeyNotFound( - String::from_utf8(self.key.clone()) - .unwrap_or(("unrecoverable key string").to_string()), - )); + return Err(StoreError::KeyNotFound("".to_string())); }; item = op(item); // Apply the update op *guard = Some(item.clone()); - let bin_data = bincode::serialize(&item)?; - writer.put(self.prefix, &self.key, bin_data)?; + writer.put::(&self.key, &item)?; Ok(item) } } diff --git a/consensus/src/consensusdb/mod.rs b/consensus/src/consensusdb/mod.rs index f15f665a74..35d6495ade 100644 --- a/consensus/src/consensusdb/mod.rs +++ b/consensus/src/consensusdb/mod.rs @@ -5,24 +5,25 @@ mod consensus_header; mod consensus_reachability; mod consensus_relations; mod db; -mod errors; +mod error; mod item; +pub mod schema; mod writer; pub mod prelude { - use super::{db, errors}; + use super::{db, error}; pub use super::{ access::CachedDbAccess, - cache::Cache, + cache::DagCache, item::CachedDbItem, writer::{BatchDbWriter, DbWriter, DirectDbWriter}, }; pub use db::{FlexiDagStorage, FlexiDagStorageConfig}; - pub use errors::{StoreError, StoreResult, StoreResultEmptyTuple, StoreResultExtensions}; + pub use error::{StoreError, StoreResult, StoreResultEmptyTuple, StoreResultExtensions}; } -pub mod schema { +pub mod schemadb { pub use super::{ consensus_ghostdag::*, consensus_header::*, consensus_reachability::*, consensus_relations::*, diff --git a/consensus/src/consensusdb/schema.rs b/consensus/src/consensusdb/schema.rs new file mode 100644 index 0000000000..ad1bbc072f --- /dev/null +++ b/consensus/src/consensusdb/schema.rs @@ -0,0 +1,40 @@ +use super::error::StoreError; +use core::hash::Hash; +use std::fmt::Debug; +use std::result::Result; + +pub trait KeyCodec: Clone + Sized + Debug + Send + Sync { + /// Converts `self` to bytes to be stored in DB. + fn encode_key(&self) -> Result, StoreError>; + /// Converts bytes fetched from DB to `Self`. + fn decode_key(data: &[u8]) -> Result; +} + +pub trait ValueCodec: Clone + Sized + Debug + Send + Sync { + /// Converts `self` to bytes to be stored in DB. + fn encode_value(&self) -> Result, StoreError>; + /// Converts bytes fetched from DB to `Self`. + fn decode_value(data: &[u8]) -> Result; +} + +pub trait Schema: Debug + Send + Sync + 'static { + const COLUMN_FAMILY: &'static str; + + type Key: KeyCodec + Hash + Eq + Default; + type Value: ValueCodec + Default + Clone; +} + +#[macro_export] +macro_rules! define_schema { + ($schema_type: ident, $key_type: ty, $value_type: ty, $cf_name: expr) => { + #[derive(Clone, Debug)] + pub(crate) struct $schema_type; + + impl $crate::schema::Schema for $schema_type { + type Key = $key_type; + type Value = $value_type; + + const COLUMN_FAMILY: &'static str = $cf_name; + } + }; +} diff --git a/consensus/src/consensusdb/writer.rs b/consensus/src/consensusdb/writer.rs index 0d692c2859..717d7d7e1c 100644 --- a/consensus/src/consensusdb/writer.rs +++ b/consensus/src/consensusdb/writer.rs @@ -1,12 +1,13 @@ use rocksdb::WriteBatch; use starcoin_storage::storage::InnerStore; -use super::{db::DBStorage, errors::StoreError}; +use super::schema::{KeyCodec, Schema, ValueCodec}; +use super::{db::DBStorage, error::StoreError}; /// Abstraction over direct/batched DB writing pub trait DbWriter { - fn put(&mut self, cf_name: &str, key: &[u8], value: Vec) -> Result<(), StoreError>; - fn delete(&mut self, cf_name: &str, key: &[u8]) -> Result<(), StoreError>; + fn put(&mut self, key: &S::Key, value: &S::Value) -> Result<(), StoreError>; + fn delete(&mut self, key: &S::Key) -> Result<(), StoreError>; } pub struct DirectDbWriter<'a> { @@ -20,15 +21,18 @@ impl<'a> DirectDbWriter<'a> { } impl DbWriter for DirectDbWriter<'_> { - fn put(&mut self, cf_name: &str, key: &[u8], value: Vec) -> Result<(), StoreError> { + fn put(&mut self, key: &S::Key, value: &S::Value) -> Result<(), StoreError> { + let bin_key = key.encode_key()?; + let bin_data = value.encode_value()?; self.db - .put(cf_name, key.to_owned(), value) + .put(S::COLUMN_FAMILY, bin_key, bin_data) .map_err(|e| StoreError::DBIoError(e.to_string())) } - fn delete(&mut self, cf_name: &str, key: &[u8]) -> Result<(), StoreError> { + fn delete(&mut self, key: &S::Key) -> Result<(), StoreError> { + let key = key.encode_key()?; self.db - .remove(cf_name, key.to_owned()) + .remove(S::COLUMN_FAMILY, key) .map_err(|e| StoreError::DBIoError(e.to_string())) } } @@ -44,12 +48,15 @@ impl<'a> BatchDbWriter<'a> { } impl DbWriter for BatchDbWriter<'_> { - fn put(&mut self, _cf_name: &str, key: &[u8], value: Vec) -> Result<(), StoreError> { + fn put(&mut self, key: &S::Key, value: &S::Value) -> Result<(), StoreError> { + let key = key.encode_key()?; + let value = value.encode_value()?; self.batch.put(key, value); Ok(()) } - fn delete(&mut self, _cf_name: &str, key: &[u8]) -> Result<(), StoreError> { + fn delete(&mut self, key: &S::Key) -> Result<(), StoreError> { + let key = key.encode_key()?; self.batch.delete(key); Ok(()) } @@ -57,12 +64,12 @@ impl DbWriter for BatchDbWriter<'_> { impl DbWriter for &mut T { #[inline] - fn put(&mut self, cf_name: &str, key: &[u8], value: Vec) -> Result<(), StoreError> { - (*self).put(cf_name, key, value) + fn put(&mut self, key: &S::Key, value: &S::Value) -> Result<(), StoreError> { + (*self).put::(key, value) } #[inline] - fn delete(&mut self, cf_name: &str, key: &[u8]) -> Result<(), StoreError> { - (*self).delete(cf_name, key) + fn delete(&mut self, key: &S::Key) -> Result<(), StoreError> { + (*self).delete::(key) } } diff --git a/consensus/src/dag/blockdag.rs b/consensus/src/dag/blockdag.rs index 5b8c359765..a981b5b1dc 100644 --- a/consensus/src/dag/blockdag.rs +++ b/consensus/src/dag/blockdag.rs @@ -2,7 +2,7 @@ use super::ghostdag::protocol::GhostdagManager; use super::reachability::{inquirer, reachability_service::MTReachabilityService}; use crate::consensusdb::{ prelude::FlexiDagStorage, - schema::{ + schemadb::{ DbGhostdagStore, DbHeadersStore, DbReachabilityStore, DbRelationsStore, GhostdagStore, HeaderStore, ReachabilityStoreReader, RelationsStore, RelationsStoreReader, }, diff --git a/consensus/src/dag/ghostdag/mergeset.rs b/consensus/src/dag/ghostdag/mergeset.rs index a674cd5a65..79aefe2db7 100644 --- a/consensus/src/dag/ghostdag/mergeset.rs +++ b/consensus/src/dag/ghostdag/mergeset.rs @@ -1,5 +1,5 @@ use super::protocol::GhostdagManager; -use crate::consensusdb::schema::{GhostdagStoreReader, HeaderStoreReader, RelationsStoreReader}; +use crate::consensusdb::schemadb::{GhostdagStoreReader, HeaderStoreReader, RelationsStoreReader}; use crate::dag::reachability::reachability_service::ReachabilityService; use starcoin_crypto::HashValue as Hash; use starcoin_types::blockhash::BlockHashSet; diff --git a/consensus/src/dag/ghostdag/protocol.rs b/consensus/src/dag/ghostdag/protocol.rs index 812b245cd7..3a12740e28 100644 --- a/consensus/src/dag/ghostdag/protocol.rs +++ b/consensus/src/dag/ghostdag/protocol.rs @@ -1,5 +1,5 @@ use super::util::Refs; -use crate::consensusdb::schema::{GhostdagStoreReader, HeaderStoreReader, RelationsStoreReader}; +use crate::consensusdb::schemadb::{GhostdagStoreReader, HeaderStoreReader, RelationsStoreReader}; use crate::dag::reachability::reachability_service::ReachabilityService; use crate::dag::types::{ghostdata::GhostdagData, ordering::*}; use starcoin_crypto::HashValue as Hash; @@ -131,8 +131,12 @@ impl< } } - let blue_score = self.ghostdag_store.get_blue_score(selected_parent).unwrap() - + new_block_data.mergeset_blues.len() as u64; + let blue_score = self + .ghostdag_store + .get_blue_score(selected_parent) + .unwrap() + .checked_add(new_block_data.mergeset_blues.len() as u64) + .unwrap(); let added_blue_work: BlueWorkType = new_block_data .mergeset_blues @@ -149,8 +153,12 @@ impl< }) .sum(); - let blue_work = - self.ghostdag_store.get_blue_work(selected_parent).unwrap() + added_blue_work; + let blue_work = self + .ghostdag_store + .get_blue_work(selected_parent) + .unwrap() + .checked_add(added_blue_work) + .unwrap(); new_block_data.finalize_score_and_work(blue_score, blue_work); new_block_data @@ -195,7 +203,7 @@ impl< candidate_blues_anticone_sizes .insert(block, self.blue_anticone_size(block, new_block_data)); - *candidate_blue_anticone_size += 1; + *candidate_blue_anticone_size = (*candidate_blue_anticone_size).checked_add(1).unwrap(); if *candidate_blue_anticone_size > self.k { // k-cluster violation: The candidate's blue anticone exceeded k return ColoringState::Red; @@ -252,7 +260,7 @@ impl< ) -> ColoringOutput { // The maximum length of new_block_data.mergeset_blues can be K+1 because // it contains the selected parent. - if new_block_data.mergeset_blues.len() as KType == self.k + 1 { + if new_block_data.mergeset_blues.len() as KType == self.k.checked_add(1).unwrap() { return ColoringOutput::Red; } diff --git a/consensus/src/dag/reachability/extensions.rs b/consensus/src/dag/reachability/extensions.rs index 497fe2c4a5..9ea769fb9a 100644 --- a/consensus/src/dag/reachability/extensions.rs +++ b/consensus/src/dag/reachability/extensions.rs @@ -1,4 +1,4 @@ -use crate::consensusdb::{prelude::StoreResult, schema::ReachabilityStoreReader}; +use crate::consensusdb::{prelude::StoreResult, schemadb::ReachabilityStoreReader}; use crate::dag::types::interval::Interval; use starcoin_crypto::hash::HashValue as Hash; @@ -23,7 +23,10 @@ impl ReachabilityStoreIntervalExtensions fo match self.get_children(block)?.first() { Some(first_child) => { let first_alloc = self.get_interval(*first_child)?; - Ok(Interval::new(alloc_capacity.start, first_alloc.start - 1)) + Ok(Interval::new( + alloc_capacity.start, + first_alloc.start.checked_sub(1).unwrap(), + )) } None => Ok(alloc_capacity), } @@ -36,7 +39,10 @@ impl ReachabilityStoreIntervalExtensions fo match self.get_children(block)?.last() { Some(last_child) => { let last_alloc = self.get_interval(*last_child)?; - Ok(Interval::new(last_alloc.end + 1, alloc_capacity.end)) + Ok(Interval::new( + last_alloc.end.checked_add(1).unwrap(), + alloc_capacity.end, + )) } None => Ok(alloc_capacity), } diff --git a/consensus/src/dag/reachability/inquirer.rs b/consensus/src/dag/reachability/inquirer.rs index 2638989b08..022a71074b 100644 --- a/consensus/src/dag/reachability/inquirer.rs +++ b/consensus/src/dag/reachability/inquirer.rs @@ -1,5 +1,5 @@ use super::{tree::*, *}; -use crate::consensusdb::schema::{ReachabilityStore, ReachabilityStoreReader}; +use crate::consensusdb::schemadb::{ReachabilityStore, ReachabilityStoreReader}; use crate::dag::types::{interval::Interval, perf}; use starcoin_crypto::HashValue as Hash; use starcoin_types::blockhash; @@ -217,8 +217,17 @@ fn binary_search_descendant( Err(i) => { // `i` is where `point` was expected (i.e., point < ordered_hashes[i].interval.start), // so we expect `ordered_hashes[i - 1].interval` to be the only candidate to contain `point` - if i > 0 && is_chain_ancestor_of(store, ordered_hashes[i - 1], descendant)? { - Ok(SearchOutput::Found(ordered_hashes[i - 1], i - 1)) + if i > 0 + && is_chain_ancestor_of( + store, + ordered_hashes[i.checked_sub(1).unwrap()], + descendant, + )? + { + Ok(SearchOutput::Found( + ordered_hashes[i.checked_sub(1).unwrap()], + i.checked_sub(1).unwrap(), + )) } else { Ok(SearchOutput::NotFound(i)) } @@ -241,7 +250,7 @@ fn assert_hashes_ordered(store: &(impl ReachabilityStoreReader + ?Sized), ordere #[cfg(test)] mod tests { use super::{super::tests::*, *}; - use crate::consensusdb::schema::MemoryReachabilityStore; + use crate::consensusdb::schemadb::MemoryReachabilityStore; use starcoin_types::blockhash::ORIGIN; #[test] diff --git a/consensus/src/dag/reachability/reachability_service.rs b/consensus/src/dag/reachability/reachability_service.rs index 20e8edeeed..6b2fa643a7 100644 --- a/consensus/src/dag/reachability/reachability_service.rs +++ b/consensus/src/dag/reachability/reachability_service.rs @@ -1,5 +1,5 @@ use super::{inquirer, Result}; -use crate::consensusdb::schema::ReachabilityStoreReader; +use crate::consensusdb::schemadb::ReachabilityStoreReader; use parking_lot::RwLock; use starcoin_crypto::{HashValue as Hash, HashValue}; use starcoin_types::blockhash; @@ -228,7 +228,7 @@ impl Iterator for ForwardChainIterator { #[cfg(test)] mod tests { use super::*; - use crate::consensusdb::schema::MemoryReachabilityStore; + use crate::consensusdb::schemadb::MemoryReachabilityStore; use crate::dag::{reachability::tests::TreeBuilder, types::interval::Interval}; #[test] diff --git a/consensus/src/dag/reachability/reindex.rs b/consensus/src/dag/reachability/reindex.rs index c05c639d98..48895b602a 100644 --- a/consensus/src/dag/reachability/reindex.rs +++ b/consensus/src/dag/reachability/reindex.rs @@ -1,7 +1,7 @@ use super::{ extensions::ReachabilityStoreIntervalExtensions, inquirer::get_next_chain_ancestor_unchecked, *, }; -use crate::consensusdb::schema::ReachabilityStore; +use crate::consensusdb::schemadb::ReachabilityStore; use crate::dag::types::interval::Interval; use starcoin_crypto::HashValue as Hash; use starcoin_types::blockhash::{BlockHashExtensions, BlockHashMap}; @@ -147,7 +147,7 @@ impl<'a, T: ReachabilityStore + ?Sized> ReindexOperationContext<'a, T> { let count = counts.entry(current).or_insert(0); let children = self.store.get_children(current)?; - *count += 1; + *count = (*count).checked_add(1).unwrap(); if *count < children.len() as u64 { // Not all subtrees of the current block are ready break; @@ -157,7 +157,8 @@ impl<'a, T: ReachabilityStore + ?Sized> ReindexOperationContext<'a, T> { // Sum them all together and add 1 to get the sub tree size of // `current`. let subtree_sum: u64 = children.iter().map(|c| self.subtree_sizes[c]).sum(); - self.subtree_sizes.insert(current, subtree_sum + 1); + self.subtree_sizes + .insert(current, subtree_sum.checked_add(1).unwrap()); } } @@ -244,7 +245,11 @@ impl<'a, T: ReachabilityStore + ?Sized> ReindexOperationContext<'a, T> { if current == reindex_root { // Reached reindex root. In this case, since we reached (the unlimited) root, // we also re-allocate new slack for the chain we just traversed - let offset = required_allocation + self.slack * path_len - slack_sum; + let offset = required_allocation + .checked_add(self.slack.checked_mul(path_len).unwrap()) + .unwrap() + .checked_sub(slack_sum) + .unwrap(); self.apply_interval_op_and_propagate(current, offset, Interval::increase_start)?; self.offset_siblings_before(allocation_block, current, offset)?; @@ -254,11 +259,13 @@ impl<'a, T: ReachabilityStore + ?Sized> ReindexOperationContext<'a, T> { } let slack_before_current = self.store.interval_remaining_before(current)?.size(); - slack_sum += slack_before_current; + slack_sum = slack_sum.checked_add(slack_before_current).unwrap(); if slack_sum >= required_allocation { // Set offset to be just enough to satisfy required allocation - let offset = slack_before_current - (slack_sum - required_allocation); + let offset = slack_before_current + .checked_sub(slack_sum.checked_sub(required_allocation).unwrap()) + .unwrap(); self.apply_interval_op(current, offset, Interval::increase_start)?; self.offset_siblings_before(allocation_block, current, offset)?; @@ -266,7 +273,7 @@ impl<'a, T: ReachabilityStore + ?Sized> ReindexOperationContext<'a, T> { } current = get_next_chain_ancestor_unchecked(self.store, reindex_root, current)?; - path_len += 1; + path_len = path_len.checked_add(1).unwrap(); } // Go back down the reachability tree towards the common ancestor. @@ -280,7 +287,7 @@ impl<'a, T: ReachabilityStore + ?Sized> ReindexOperationContext<'a, T> { } let slack_before_current = self.store.interval_remaining_before(current)?.size(); - let offset = slack_before_current - path_slack_alloc; + let offset = slack_before_current.checked_sub(path_slack_alloc).unwrap(); self.apply_interval_op(current, offset, Interval::increase_start)?; self.offset_siblings_before(allocation_block, current, offset)?; } @@ -306,7 +313,11 @@ impl<'a, T: ReachabilityStore + ?Sized> ReindexOperationContext<'a, T> { if current == reindex_root { // Reached reindex root. In this case, since we reached (the unlimited) root, // we also re-allocate new slack for the chain we just traversed - let offset = required_allocation + self.slack * path_len - slack_sum; + let offset = required_allocation + .checked_add(self.slack.checked_mul(path_len).unwrap()) + .unwrap() + .checked_sub(slack_sum) + .unwrap(); self.apply_interval_op_and_propagate(current, offset, Interval::decrease_end)?; self.offset_siblings_after(allocation_block, current, offset)?; @@ -316,11 +327,13 @@ impl<'a, T: ReachabilityStore + ?Sized> ReindexOperationContext<'a, T> { } let slack_after_current = self.store.interval_remaining_after(current)?.size(); - slack_sum += slack_after_current; + slack_sum = slack_sum.checked_add(slack_after_current).unwrap(); if slack_sum >= required_allocation { // Set offset to be just enough to satisfy required allocation - let offset = slack_after_current - (slack_sum - required_allocation); + let offset = slack_after_current + .checked_sub(slack_sum.checked_sub(required_allocation).unwrap()) + .unwrap(); self.apply_interval_op(current, offset, Interval::decrease_end)?; self.offset_siblings_after(allocation_block, current, offset)?; @@ -328,7 +341,7 @@ impl<'a, T: ReachabilityStore + ?Sized> ReindexOperationContext<'a, T> { } current = get_next_chain_ancestor_unchecked(self.store, reindex_root, current)?; - path_len += 1; + path_len = path_len.checked_add(1).unwrap(); } // Go back down the reachability tree towards the common ancestor. @@ -342,7 +355,7 @@ impl<'a, T: ReachabilityStore + ?Sized> ReindexOperationContext<'a, T> { } let slack_after_current = self.store.interval_remaining_after(current)?.size(); - let offset = slack_after_current - path_slack_alloc; + let offset = slack_after_current.checked_sub(path_slack_alloc).unwrap(); self.apply_interval_op(current, offset, Interval::decrease_end)?; self.offset_siblings_after(allocation_block, current, offset)?; } @@ -472,8 +485,15 @@ impl<'a, T: ReachabilityStore + ?Sized> ReindexOperationContext<'a, T> { let interval = self.store.get_interval(parent)?; let interval_before = Interval::new( - interval.start + self.slack, - interval.start + self.slack + sum - 1, + interval.start.checked_add(self.slack).unwrap(), + interval + .start + .checked_add(self.slack) + .unwrap() + .checked_add(sum) + .unwrap() + .checked_sub(1) + .unwrap(), ); for (c, ci) in children_before @@ -505,8 +525,18 @@ impl<'a, T: ReachabilityStore + ?Sized> ReindexOperationContext<'a, T> { let interval = self.store.get_interval(parent)?; let interval_after = Interval::new( - interval.end - self.slack - sum, - interval.end - self.slack - 1, + interval + .end + .checked_sub(self.slack) + .unwrap() + .checked_sub(sum) + .unwrap(), + interval + .end + .checked_sub(self.slack) + .unwrap() + .checked_sub(1) + .unwrap(), ); for (c, ci) in children_after @@ -531,8 +561,20 @@ impl<'a, T: ReachabilityStore + ?Sized> ReindexOperationContext<'a, T> { ) -> Result<()> { let interval = self.store.get_interval(parent)?; let allocation = Interval::new( - interval.start + siblings_before_subtrees_sum + self.slack, - interval.end - siblings_after_subtrees_sum - self.slack - 1, + interval + .start + .checked_add(siblings_before_subtrees_sum) + .unwrap() + .checked_add(self.slack) + .unwrap(), + interval + .end + .checked_sub(siblings_after_subtrees_sum) + .unwrap() + .checked_sub(self.slack) + .unwrap() + .checked_sub(1) + .unwrap(), ); let current = self.store.get_interval(child)?; @@ -546,8 +588,10 @@ impl<'a, T: ReachabilityStore + ?Sized> ReindexOperationContext<'a, T> { next time this method is called (next time the reindex root moves), `allocation` is likely to contain `current`. Note that below following the propagation we reassign the full `allocation` to `child`. */ - let narrowed = - Interval::new(allocation.start + self.slack, allocation.end - self.slack); + let narrowed = Interval::new( + allocation.start.checked_add(self.slack).unwrap(), + allocation.end.checked_sub(self.slack).unwrap(), + ); self.store.set_interval(child, narrowed)?; self.propagate_interval(child)?; } @@ -560,7 +604,10 @@ impl<'a, T: ReachabilityStore + ?Sized> ReindexOperationContext<'a, T> { /// Splits `children` into two slices: the blocks that are before `pivot` and the blocks that are after. fn split_children(children: &std::sync::Arc>, pivot: Hash) -> Result<(&[Hash], &[Hash])> { if let Some(index) = children.iter().cloned().position(|c| c == pivot) { - Ok((&children[..index], &children[index + 1..])) + Ok(( + &children[..index], + &children[index.checked_add(1).unwrap()..], + )) } else { Err(ReachabilityError::DataInconsistency) } @@ -569,7 +616,7 @@ fn split_children(children: &std::sync::Arc>, pivot: Hash) -> Result<( #[cfg(test)] mod tests { use super::{super::tests::*, *}; - use crate::consensusdb::schema::{MemoryReachabilityStore, ReachabilityStoreReader}; + use crate::consensusdb::schemadb::{MemoryReachabilityStore, ReachabilityStoreReader}; use crate::dag::types::interval::Interval; use starcoin_types::blockhash; diff --git a/consensus/src/dag/reachability/relations_service.rs b/consensus/src/dag/reachability/relations_service.rs index 848391d2ee..755cfb49be 100644 --- a/consensus/src/dag/reachability/relations_service.rs +++ b/consensus/src/dag/reachability/relations_service.rs @@ -1,4 +1,4 @@ -use crate::consensusdb::{prelude::StoreError, schema::RelationsStoreReader}; +use crate::consensusdb::{prelude::StoreError, schemadb::RelationsStoreReader}; use parking_lot::RwLock; use starcoin_crypto::HashValue as Hash; use starcoin_types::blockhash::BlockHashes; diff --git a/consensus/src/dag/reachability/tests.rs b/consensus/src/dag/reachability/tests.rs index 658cca74b5..e9fa593c86 100644 --- a/consensus/src/dag/reachability/tests.rs +++ b/consensus/src/dag/reachability/tests.rs @@ -4,7 +4,7 @@ use super::{inquirer::*, tree::*}; use crate::consensusdb::{ prelude::StoreError, - schema::{ReachabilityStore, ReachabilityStoreReader}, + schemadb::{ReachabilityStore, ReachabilityStoreReader}, }; use crate::dag::types::{interval::Interval, perf}; use starcoin_crypto::HashValue as Hash; diff --git a/consensus/src/dag/reachability/tree.rs b/consensus/src/dag/reachability/tree.rs index cc8357dc18..a0d98a9b23 100644 --- a/consensus/src/dag/reachability/tree.rs +++ b/consensus/src/dag/reachability/tree.rs @@ -5,7 +5,7 @@ use super::{ extensions::ReachabilityStoreIntervalExtensions, inquirer::*, reindex::ReindexOperationContext, *, }; -use crate::consensusdb::schema::ReachabilityStore; +use crate::consensusdb::schemadb::ReachabilityStore; use starcoin_crypto::HashValue as Hash; /// Adds `new_block` as a child of `parent` in the tree structure. If this block @@ -26,7 +26,12 @@ pub fn add_tree_block( // Init with the empty interval. // Note: internal logic relies on interval being this specific interval // which comes exactly at the end of current capacity - store.insert(new_block, parent, remaining, parent_height + 1)?; + store.insert( + new_block, + parent, + remaining, + parent_height.checked_add(1).unwrap(), + )?; // Start a reindex operation (TODO: add timing) let reindex_root = store.get_reindex_root()?; @@ -34,7 +39,12 @@ pub fn add_tree_block( ctx.reindex_intervals(new_block, reindex_root)?; } else { let allocated = remaining.split_half().0; - store.insert(new_block, parent, allocated, parent_height + 1)?; + store.insert( + new_block, + parent, + allocated, + parent_height.checked_add(1).unwrap(), + )?; }; Ok(()) } @@ -81,7 +91,9 @@ pub fn find_next_reindex_root( // // Note: In some cases the height of the (hint) selected tip can be lower than the current reindex root height. // If that's the case we keep the reindex root unchanged. - if hint_height < current_height || hint_height - current_height < reindex_slack { + if hint_height < current_height + || hint_height.checked_sub(current_height).unwrap() < reindex_slack + { return Ok((current, current)); } @@ -99,7 +111,7 @@ pub fn find_next_reindex_root( if hint_height < child_height { return Err(ReachabilityError::DataInconsistency); } - if hint_height - child_height < reindex_depth { + if hint_height.checked_sub(child_height).unwrap() < reindex_depth { break; } next = child; diff --git a/consensus/src/dag/types/ghostdata.rs b/consensus/src/dag/types/ghostdata.rs index d11f630827..c680172148 100644 --- a/consensus/src/dag/types/ghostdata.rs +++ b/consensus/src/dag/types/ghostdata.rs @@ -14,7 +14,7 @@ pub struct GhostdagData { pub blues_anticone_sizes: HashKTypeMap, } -#[derive(Clone, Serialize, Deserialize, Copy)] +#[derive(Clone, Debug, Default, Serialize, Deserialize, Copy)] pub struct CompactGhostdagData { pub blue_score: u64, pub blue_work: BlueWorkType, @@ -67,7 +67,7 @@ impl GhostdagData { } pub fn new_with_selected_parent(selected_parent: Hash, k: KType) -> Self { - let mut mergeset_blues: Vec = Vec::with_capacity((k + 1) as usize); + let mut mergeset_blues: Vec = Vec::with_capacity(k.checked_add(1).unwrap() as usize); let mut blues_anticone_sizes: BlockHashMap = BlockHashMap::with_capacity(k as usize); mergeset_blues.push(selected_parent); blues_anticone_sizes.insert(selected_parent, 0); @@ -83,7 +83,10 @@ impl GhostdagData { } pub fn mergeset_size(&self) -> usize { - self.mergeset_blues.len() + self.mergeset_reds.len() + self.mergeset_blues + .len() + .checked_add(self.mergeset_reds.len()) + .unwrap() } /// Returns an iterator to the mergeset with no specified order (excluding the selected parent) @@ -128,7 +131,7 @@ impl GhostdagData { // Insert/update map entries for blocks affected by this insertion for (blue, size) in block_blues_anticone_sizes { - blues_anticone_sizes.insert(*blue, size + 1); + blues_anticone_sizes.insert(*blue, size.checked_add(1).unwrap()); } } diff --git a/consensus/src/dag/types/interval.rs b/consensus/src/dag/types/interval.rs index 6b09f68b31..0b5cc4f6e5 100644 --- a/consensus/src/dag/types/interval.rs +++ b/consensus/src/dag/types/interval.rs @@ -1,7 +1,7 @@ use serde::{Deserialize, Serialize}; use std::fmt::{Display, Formatter}; -#[derive(Debug, PartialEq, Eq, Clone, Copy, Serialize, Deserialize)] +#[derive(Debug, Default, PartialEq, Eq, Clone, Copy, Serialize, Deserialize)] pub struct Interval { pub start: u64, pub end: u64, @@ -21,9 +21,7 @@ impl From for (u64, u64) { impl Interval { pub fn new(start: u64, end: u64) -> Self { - debug_assert!(end >= start - 1); // TODO: make sure this is actually debug-only - debug_assert!(start > 0); - debug_assert!(end < u64::MAX); + debug_assert!(start > 0 && end < u64::MAX && end >= start.checked_sub(1).unwrap()); // TODO: make sure this is actually debug-only Interval { start, end } } @@ -35,14 +33,16 @@ impl Interval { /// both `u64` bounds (`0` and `u64::MAX`) in order to support the reduction of any /// legal interval to an empty one by setting `end = start - 1` or `start = end + 1` pub fn maximal() -> Self { - Self::new(1, u64::MAX - 1) + Self::new(1, u64::MAX.saturating_sub(1)) } pub fn size(&self) -> u64 { // Empty intervals are indicated by `self.end == self.start - 1`, so // we avoid the overflow by first adding 1 // Note: this function will panic if `self.end < self.start - 1` due to overflow - (self.end + 1) - self.start + (self.end.checked_add(1).unwrap()) + .checked_sub(self.start) + .unwrap() } pub fn is_empty(&self) -> bool { @@ -50,27 +50,33 @@ impl Interval { } pub fn increase(&self, offset: u64) -> Self { - Self::new(self.start + offset, self.end + offset) + Self::new( + self.start.checked_add(offset).unwrap(), + self.end.checked_add(offset).unwrap(), + ) } pub fn decrease(&self, offset: u64) -> Self { - Self::new(self.start - offset, self.end - offset) + Self::new( + self.start.checked_sub(offset).unwrap(), + self.end.checked_sub(offset).unwrap(), + ) } pub fn increase_start(&self, offset: u64) -> Self { - Self::new(self.start + offset, self.end) + Self::new(self.start.checked_add(offset).unwrap(), self.end) } pub fn decrease_start(&self, offset: u64) -> Self { - Self::new(self.start - offset, self.end) + Self::new(self.start.checked_sub(offset).unwrap(), self.end) } pub fn increase_end(&self, offset: u64) -> Self { - Self::new(self.start, self.end + offset) + Self::new(self.start, self.end.checked_add(offset).unwrap()) } pub fn decrease_end(&self, offset: u64) -> Self { - Self::new(self.start, self.end - offset) + Self::new(self.start, self.end.checked_sub(offset).unwrap()) } pub fn split_half(&self) -> (Self, Self) { @@ -86,8 +92,15 @@ impl Interval { let left_size = f32::ceil(self.size() as f32 * fraction) as u64; ( - Self::new(self.start, self.start + left_size - 1), - Self::new(self.start + left_size, self.end), + Self::new( + self.start, + self.start + .checked_add(left_size) + .unwrap() + .checked_sub(1) + .unwrap(), + ), + Self::new(self.start.checked_add(left_size).unwrap(), self.end), ) } @@ -104,8 +117,11 @@ impl Interval { sizes .iter() .map(|size| { - let interval = Self::new(start, start + size - 1); - start += size; + let interval = Self::new( + start, + start.checked_add(*size).unwrap().checked_sub(1).unwrap(), + ); + start = start.checked_add(*size).unwrap(); interval }) .collect() @@ -138,19 +154,19 @@ impl Interval { // Add a fractional bias to every size in the provided sizes // - let mut remaining_bias = interval_size - sizes_sum; + let mut remaining_bias = interval_size.checked_sub(sizes_sum).unwrap(); let total_bias = remaining_bias as f64; let mut biased_sizes = Vec::::with_capacity(sizes.len()); let exp_fractions = exponential_fractions(sizes); for (i, fraction) in exp_fractions.iter().enumerate() { - let bias: u64 = if i == exp_fractions.len() - 1 { + let bias: u64 = if i == exp_fractions.len().checked_sub(1).unwrap() { remaining_bias } else { remaining_bias.min(f64::round(total_bias * fraction) as u64) }; - biased_sizes.push(sizes[i] + bias); - remaining_bias -= bias; + biased_sizes.push(sizes[i].checked_add(bias).unwrap()); + remaining_bias = remaining_bias.checked_sub(bias).unwrap(); } self.split_exact(biased_sizes.as_slice()) diff --git a/consensus/src/dag/types/reachability.rs b/consensus/src/dag/types/reachability.rs index 62c84c3d6e..35dc3979b6 100644 --- a/consensus/src/dag/types/reachability.rs +++ b/consensus/src/dag/types/reachability.rs @@ -4,7 +4,7 @@ use starcoin_crypto::HashValue as Hash; use starcoin_types::blockhash::BlockHashes; use std::sync::Arc; -#[derive(Clone, Serialize, Deserialize)] +#[derive(Clone, Default, Debug, Serialize, Deserialize)] pub struct ReachabilityData { pub children: BlockHashes, pub parent: Hash, diff --git a/consensus/src/lib.rs b/consensus/src/lib.rs index 731aa6e235..6c8e7eca1d 100644 --- a/consensus/src/lib.rs +++ b/consensus/src/lib.rs @@ -31,6 +31,7 @@ pub mod keccak; pub use consensus::{Consensus, ConsensusVerifyError}; pub use consensusdb::prelude::{FlexiDagStorage, FlexiDagStorageConfig}; +pub(crate) use consensusdb::schema; pub use dag::blockdag::BlockDAG; pub use starcoin_time_service::duration_since_epoch; diff --git a/storage/src/batch/mod.rs b/storage/src/batch/mod.rs index 60e463274e..562ed71ae1 100644 --- a/storage/src/batch/mod.rs +++ b/storage/src/batch/mod.rs @@ -5,29 +5,31 @@ use crate::storage::{CodecWriteBatch, KeyCodec, ValueCodec, WriteOp}; use anyhow::Result; use std::convert::TryFrom; +pub type WriteBatch = GWriteBatch, Vec>; + #[derive(Debug, Default, Clone)] -pub struct WriteBatch { - pub rows: Vec<(Vec, WriteOp>)>, +pub struct GWriteBatch { + pub rows: Vec<(K, WriteOp)>, } -impl WriteBatch { +impl GWriteBatch { /// Creates an empty batch. pub fn new() -> Self { Self::default() } - pub fn new_with_rows(rows: Vec<(Vec, WriteOp>)>) -> Self { + pub fn new_with_rows(rows: Vec<(K, WriteOp)>) -> Self { Self { rows } } /// Adds an insert/update operation to the batch. - pub fn put(&mut self, key: Vec, value: Vec) -> Result<()> { + pub fn put(&mut self, key: K, value: V) -> Result<()> { self.rows.push((key, WriteOp::Value(value))); Ok(()) } /// Adds a delete operation to the batch. - pub fn delete(&mut self, key: Vec) -> Result<()> { + pub fn delete(&mut self, key: K) -> Result<()> { self.rows.push((key, WriteOp::Deletion)); Ok(()) } diff --git a/storage/src/cache_storage/mod.rs b/storage/src/cache_storage/mod.rs index c6e7807988..596fbd181d 100644 --- a/storage/src/cache_storage/mod.rs +++ b/storage/src/cache_storage/mod.rs @@ -1,31 +1,35 @@ // Copyright (c) The Starcoin Core Contributors // SPDX-License-Identifier: Apache-2.0 +use crate::batch::GWriteBatch; use crate::{ batch::WriteBatch, metrics::{record_metrics, StorageMetrics}, storage::{InnerStore, WriteOp}, }; use anyhow::{Error, Result}; +use core::hash::Hash; use lru::LruCache; use parking_lot::Mutex; use starcoin_config::DEFAULT_CACHE_SIZE; -pub struct CacheStorage { - cache: Mutex, Vec>>, +pub type CacheStorage = GCacheStorage, Vec>; + +pub struct GCacheStorage { + cache: Mutex>, metrics: Option, } -impl CacheStorage { +impl GCacheStorage { pub fn new(metrics: Option) -> Self { - CacheStorage { - cache: Mutex::new(LruCache::new(DEFAULT_CACHE_SIZE)), + GCacheStorage { + cache: Mutex::new(LruCache::::new(DEFAULT_CACHE_SIZE)), metrics, } } pub fn new_with_capacity(size: usize, metrics: Option) -> Self { - CacheStorage { - cache: Mutex::new(LruCache::new(size)), + GCacheStorage { + cache: Mutex::new(LruCache::::new(size)), metrics, } } @@ -34,7 +38,7 @@ impl CacheStorage { } } -impl Default for CacheStorage { +impl Default for GCacheStorage { fn default() -> Self { Self::new(None) } @@ -42,14 +46,16 @@ impl Default for CacheStorage { impl InnerStore for CacheStorage { fn get(&self, prefix_name: &str, key: Vec) -> Result>> { + let composed_key = compose_key(Some(prefix_name), key); record_metrics("cache", prefix_name, "get", self.metrics.as_ref()) - .call(|| Ok(self.get_inner(Some(prefix_name), key))) + .call(|| Ok(self.get_inner(&composed_key))) } fn put(&self, prefix_name: &str, key: Vec, value: Vec) -> Result<()> { // remove record_metrics for performance // record_metrics add in write_batch to reduce Instant::now system call - let len = self.put_inner(Some(prefix_name), key, value); + let composed_key = compose_key(Some(prefix_name), key); + let len = self.put_inner(composed_key, value); if let Some(metrics) = self.metrics.as_ref() { metrics.cache_items.set(len as u64); } @@ -57,13 +63,15 @@ impl InnerStore for CacheStorage { } fn contains_key(&self, prefix_name: &str, key: Vec) -> Result { + let composed_key = compose_key(Some(prefix_name), key); record_metrics("cache", prefix_name, "contains_key", self.metrics.as_ref()) - .call(|| Ok(self.contains_key_inner(Some(prefix_name), key))) + .call(|| Ok(self.contains_key_inner(&composed_key))) } fn remove(&self, prefix_name: &str, key: Vec) -> Result<()> { // remove record_metrics for performance // record_metrics add in write_batch to reduce Instant::now system call - let len = self.remove_inner(Some(prefix_name), key); + let composed_key = compose_key(Some(prefix_name), key); + let len = self.remove_inner(&composed_key); if let Some(metrics) = self.metrics.as_ref() { metrics.cache_items.set(len as u64); } @@ -71,8 +79,14 @@ impl InnerStore for CacheStorage { } fn write_batch(&self, prefix_name: &str, batch: WriteBatch) -> Result<()> { + let rows = batch + .rows + .into_iter() + .map(|(k, v)| (compose_key(Some(prefix_name), k), v)) + .collect(); + let batch = WriteBatch { rows }; record_metrics("cache", prefix_name, "write_batch", self.metrics.as_ref()).call(|| { - self.write_batch_inner(Some(prefix_name), batch); + self.write_batch_inner(batch); Ok(()) }) } @@ -98,7 +112,11 @@ impl InnerStore for CacheStorage { } fn multi_get(&self, prefix_name: &str, keys: Vec>) -> Result>>> { - Ok(self.multi_get_inner(Some(prefix_name), keys)) + let composed_keys = keys + .into_iter() + .map(|k| compose_key(Some(prefix_name), k)) + .collect::>(); + Ok(self.multi_get_inner(composed_keys.as_slice())) } } @@ -115,60 +133,53 @@ fn compose_key(prefix_name: Option<&str>, source_key: Vec) -> Vec { } } -impl CacheStorage { - pub fn get_inner(&self, prefix_name: Option<&str>, key: Vec) -> Option> { - self.cache - .lock() - .get(&compose_key(prefix_name, key)) - .cloned() +impl GCacheStorage { + pub fn get_inner(&self, key: &K) -> Option { + self.cache.lock().get(key).cloned() } - pub fn put_inner(&self, prefix_name: Option<&str>, key: Vec, value: Vec) -> usize { + pub fn put_inner(&self, key: K, value: V) -> usize { let mut cache = self.cache.lock(); - cache.put(compose_key(prefix_name, key), value); + cache.put(key, value); cache.len() } - pub fn contains_key_inner(&self, prefix_name: Option<&str>, key: Vec) -> bool { - self.cache.lock().contains(&compose_key(prefix_name, key)) + pub fn contains_key_inner(&self, key: &K) -> bool { + self.cache.lock().contains(key) } - pub fn remove_inner(&self, prefix_name: Option<&str>, key: Vec) -> usize { + pub fn remove_inner(&self, key: &K) -> usize { let mut cache = self.cache.lock(); - cache.pop(&compose_key(prefix_name, key)); + cache.pop(key); cache.len() } - pub fn write_batch_inner(&self, prefix_name: Option<&str>, batch: WriteBatch) { - for (key, write_op) in &batch.rows { + pub fn write_batch_inner(&self, batch: GWriteBatch) { + for (key, write_op) in batch.rows { match write_op { WriteOp::Value(value) => { - self.put_inner(prefix_name, key.to_vec(), value.to_vec()); + self.put_inner(key, value); } WriteOp::Deletion => { - self.remove_inner(prefix_name, key.to_vec()); + self.remove_inner(&key); } }; } } - pub fn put_sync_inner(&self, prefix_name: Option<&str>, key: Vec, value: Vec) -> usize { - self.put_inner(prefix_name, key, value) + pub fn put_sync_inner(&self, key: K, value: V) -> usize { + self.put_inner(key, value) } - pub fn write_batch_sync_inner(&self, prefix_name: Option<&str>, batch: WriteBatch) { - self.write_batch_inner(prefix_name, batch) + pub fn write_batch_sync_inner(&self, batch: GWriteBatch) { + self.write_batch_inner(batch) } - pub fn multi_get_inner( - &self, - prefix_name: Option<&str>, - keys: Vec>, - ) -> Vec>> { + pub fn multi_get_inner(&self, keys: &[K]) -> Vec> { let mut cache = self.cache.lock(); let mut result = vec![]; - for key in keys.into_iter() { - let item = cache.get(&compose_key(prefix_name, key)).cloned(); + for key in keys { + let item = cache.get(key).cloned(); result.push(item); } result diff --git a/storage/src/upgrade.rs b/storage/src/upgrade.rs index 6d88b855fe..f0aa54e4a2 100644 --- a/storage/src/upgrade.rs +++ b/storage/src/upgrade.rs @@ -164,6 +164,7 @@ impl DBUpgrade { } fn db_upgrade_v3_v4(_instance: &mut StorageInstance) -> Result<()> { + // https://github.com/facebook/rocksdb/issues/1295 Ok(()) } diff --git a/types/src/header.rs b/types/src/header.rs index 95f353411f..8c5dcb591b 100644 --- a/types/src/header.rs +++ b/types/src/header.rs @@ -12,7 +12,7 @@ pub trait ConsensusHeader { fn timestamp(&self) -> u64; } -#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq, Hash)] +#[derive(Debug, Default, Clone, Serialize, Deserialize, PartialEq, Eq, Hash)] pub struct Header { block_header: BlockHeader, parents_hash: Vec, @@ -47,13 +47,13 @@ impl ConsensusHeader for Header { } } -#[derive(Clone, Serialize, Deserialize)] +#[derive(Clone, Debug, Default, Serialize, Deserialize)] pub struct HeaderWithBlockLevel { pub header: Arc

, pub block_level: BlockLevel, } -#[derive(Clone, Copy, Serialize, Deserialize)] +#[derive(Clone, Copy, Debug, Default, Serialize, Deserialize)] pub struct CompactHeaderData { pub timestamp: u64, pub difficulty: U256, diff --git a/types/src/startup_info.rs b/types/src/startup_info.rs index 7a4db59306..f0727442bf 100644 --- a/types/src/startup_info.rs +++ b/types/src/startup_info.rs @@ -151,7 +151,6 @@ impl ChainStatus { rand::random::(), ), ); - Self { head, info: block_info, From 3c64e6e907243badf96067e80a7338d734cf9ddc Mon Sep 17 00:00:00 2001 From: jackzhhuang Date: Mon, 31 Jul 2023 14:45:12 +0800 Subject: [PATCH 28/30] use chain state info --- network/tests/network_service_test.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/network/tests/network_service_test.rs b/network/tests/network_service_test.rs index 0b2ca2958a..3e229e2565 100644 --- a/network/tests/network_service_test.rs +++ b/network/tests/network_service_test.rs @@ -15,7 +15,7 @@ use starcoin_logger::prelude::*; use starcoin_network::build_network_worker; use starcoin_types::block::{AccumulatorInfo, Block, BlockBody, BlockHeader, BlockInfo}; use starcoin_types::compact_block::CompactBlock; -use starcoin_types::startup_info::{ChainInfo, ChainStatus}; +use starcoin_types::startup_info::{ChainInfo, ChainStatus, ChainStateInfo}; use starcoin_types::transaction::SignedUserTransaction; use starcoin_types::U256; use std::sync::Arc; @@ -34,7 +34,7 @@ fn build_test_network_pair() -> (NetworkComponent, NetworkComponent) { fn build_test_network_services(num: usize) -> Vec { let mut result: Vec = Vec::with_capacity(num); let mut first_addr: Option = None; - let chain_info = ChainInfo::new( + let chain_info = ChainStateInfo::new( BuiltinNetworkID::Test.chain_id(), HashValue::random(), ChainStatus::random(), From 3af937a2fd25690a63dc540d540f2d4bc438b466 Mon Sep 17 00:00:00 2001 From: jackzhhuang Date: Mon, 31 Jul 2023 15:54:58 +0800 Subject: [PATCH 29/30] block info ext including block info --- chain/src/dag_chain.rs | 6 +- storage/src/flexi_dag/mod.rs | 21 +- storage/src/lib.rs | 14 +- storage/src/tests/test_dag.rs | 15 +- types/src/block.rs | 19 +- types/src/dag_block.rs | 947 ---------------------------------- types/src/lib.rs | 1 - 7 files changed, 44 insertions(+), 979 deletions(-) delete mode 100644 types/src/dag_block.rs diff --git a/chain/src/dag_chain.rs b/chain/src/dag_chain.rs index 386597b635..65d1e1c59d 100644 --- a/chain/src/dag_chain.rs +++ b/chain/src/dag_chain.rs @@ -55,7 +55,7 @@ impl DagBlockChain { // let accmulator_info = sync_flexi_dag_store.get_snapshot_storage().get(startup_info.main); let accumulator_info = match storage.query_by_hash(startup_info.main) { Ok(op_snapshot) => match op_snapshot { - Some(snapshot) => snapshot.accumulator_info, + Some(snapshot) => snapshot.dag_accumulator_info, None => bail!("failed to get sync accumulator info since it is None"), }, Err(error) => bail!("failed to get sync accumulator info: {}", error.to_string()), @@ -90,7 +90,7 @@ impl DagBlockChain { Ok(op_snapshot) => { let snapshot = op_snapshot.expect("snapshot must exist"); TargetDagAccumulatorLeaf { - accumulator_root: snapshot.accumulator_info.accumulator_root, + accumulator_root: snapshot.dag_accumulator_info.accumulator_root, leaf_index: req.accumulator_leaf_index.saturating_sub(index as u64), } } @@ -153,7 +153,7 @@ impl DagBlockChain { ); details.push(TargetDagAccumulatorLeafDetail { - accumulator_root: snapshot.accumulator_info.accumulator_root, + accumulator_root: snapshot.dag_accumulator_info.accumulator_root, relationship_pair, }); } diff --git a/storage/src/flexi_dag/mod.rs b/storage/src/flexi_dag/mod.rs index 6cd09959a8..f4f53c6caa 100644 --- a/storage/src/flexi_dag/mod.rs +++ b/storage/src/flexi_dag/mod.rs @@ -3,22 +3,15 @@ use std::sync::Arc; use crate::{ accumulator::{AccumulatorStorage, DagBlockAccumulatorStorage}, define_storage, - storage::{CodecKVStore, StorageInstance, ValueCodec}, + storage::{StorageInstance, ValueCodec, CodecKVStore}, SYNC_FLEXI_DAG_SNAPSHOT_PREFIX_NAME, }; use anyhow::Result; use bcs_ext::BCSCodec; -use serde::{Deserialize, Serialize}; -use starcoin_accumulator::accumulator_info::AccumulatorInfo; use starcoin_crypto::HashValue; +use starcoin_types::block::BlockInfoExt; -#[derive(Clone, Debug, Hash, Eq, PartialEq, Serialize, Deserialize)] -pub struct SyncFlexiDagSnapshot { - pub child_hashes: Vec, // child nodes, to get the relationship, use dag's relationship store - pub accumulator_info: AccumulatorInfo, -} - -impl ValueCodec for SyncFlexiDagSnapshot { +impl ValueCodec for BlockInfoExt { fn encode_value(&self) -> Result> { self.encode() } @@ -31,7 +24,7 @@ impl ValueCodec for SyncFlexiDagSnapshot { define_storage!( SyncFlexiDagSnapshotStorage, HashValue, // accumulator leaf node - SyncFlexiDagSnapshot, + BlockInfoExt, SYNC_FLEXI_DAG_SNAPSHOT_PREFIX_NAME ); @@ -63,14 +56,14 @@ impl SyncFlexiDagStorage { self.snapshot_storage.clone() } - pub fn put_hashes(&self, key: HashValue, accumulator_info: SyncFlexiDagSnapshot) -> Result<()> { - self.snapshot_storage.put(key, accumulator_info) + pub fn put_hashes(&self, key: HashValue, block_info_ext: BlockInfoExt) -> Result<()> { + self.snapshot_storage.put(key, block_info_ext) } pub fn get_hashes_by_hash( &self, hash: HashValue, - ) -> std::result::Result, anyhow::Error> { + ) -> std::result::Result, anyhow::Error> { self.snapshot_storage.get(hash) } } diff --git a/storage/src/lib.rs b/storage/src/lib.rs index 4633bc6110..5ae1b3f416 100644 --- a/storage/src/lib.rs +++ b/storage/src/lib.rs @@ -16,7 +16,7 @@ use crate::{ transaction_info::{TransactionInfoHashStorage, TransactionInfoStorage}, }; use anyhow::{bail, format_err, Error, Result}; -use flexi_dag::{SyncFlexiDagSnapshot, SyncFlexiDagSnapshotStorage, SyncFlexiDagStorage}; +use flexi_dag::{SyncFlexiDagSnapshotStorage, SyncFlexiDagStorage}; use network_p2p_types::peer_id::PeerId; use num_enum::{IntoPrimitive, TryFromPrimitive}; use once_cell::sync::Lazy; @@ -26,7 +26,7 @@ use starcoin_accumulator::{ use starcoin_crypto::HashValue; use starcoin_state_store_api::{StateNode, StateNodeStore}; use starcoin_types::{ - block::{Block, BlockBody, BlockHeader, BlockInfo}, + block::{Block, BlockBody, BlockHeader, BlockInfo, BlockInfoExt}, contract_event::ContractEvent, startup_info::{ ChainInfo, ChainStateInfo, ChainStatus, DagChainStatus, SnapshotRange, StartupInfo, @@ -304,8 +304,8 @@ pub trait TransactionStore { } pub trait SyncFlexiDagStore { - fn put_hashes(&self, key: HashValue, accumulator_snapshot: SyncFlexiDagSnapshot) -> Result<()>; - fn query_by_hash(&self, key: HashValue) -> Result>; + fn put_hashes(&self, key: HashValue, accumulator_snapshot: BlockInfoExt) -> Result<()>; + fn query_by_hash(&self, key: HashValue) -> Result>; fn get_accumulator_snapshot_storage(&self) -> std::sync::Arc; } @@ -407,7 +407,7 @@ impl DagBlockStore for Storage { // let accmulator_info = sync_flexi_dag_store.get_snapshot_storage().get(startup_info.main); let accumulator_info = match self.query_by_hash(startup_info.main) { Ok(op_snapshot) => match op_snapshot { - Some(snapshot) => snapshot.accumulator_info, + Some(snapshot) => snapshot.dag_accumulator_info, None => bail!("failed to get sync accumulator info since it is None"), }, Err(error) => bail!("failed to get sync accumulator info: {}", error.to_string()), @@ -654,11 +654,11 @@ impl TransactionStore for Storage { } impl SyncFlexiDagStore for Storage { - fn put_hashes(&self, key: HashValue, accumulator_snapshot: SyncFlexiDagSnapshot) -> Result<()> { + fn put_hashes(&self, key: HashValue, accumulator_snapshot: BlockInfoExt) -> Result<()> { self.flexi_dag_storage.put_hashes(key, accumulator_snapshot) } - fn query_by_hash(&self, key: HashValue) -> Result> { + fn query_by_hash(&self, key: HashValue) -> Result> { self.flexi_dag_storage.get_hashes_by_hash(key) } diff --git a/storage/src/tests/test_dag.rs b/storage/src/tests/test_dag.rs index 159c905ba2..6bb37fa534 100644 --- a/storage/src/tests/test_dag.rs +++ b/storage/src/tests/test_dag.rs @@ -1,16 +1,17 @@ use starcoin_accumulator::{accumulator_info::AccumulatorInfo, Accumulator, MerkleAccumulator}; use starcoin_config::RocksdbConfig; use starcoin_crypto::HashValue; +use starcoin_types::block::{BlockInfo, BlockInfoExt}; use crate::{ - cache_storage::CacheStorage, db_storage::DBStorage, flexi_dag::SyncFlexiDagSnapshot, + cache_storage::CacheStorage, db_storage::DBStorage, storage::StorageInstance, Storage, Store, SyncFlexiDagStore, }; use anyhow::{Ok, Result}; trait SyncFlexiDagManager { fn insert_hashes(&self, hashes: Vec) -> Result; - fn query_by_hash(&self, hash: HashValue) -> Result>; + fn query_by_hash(&self, hash: HashValue) -> Result>; fn fork(&mut self, accumulator_info: AccumulatorInfo) -> Result<()>; fn get_hash_by_position(&self, position: u64) -> Result>; fn get_accumulator_info(&self) -> AccumulatorInfo; @@ -59,15 +60,17 @@ impl SyncFlexiDagManager for SyncFlexiDagManagerImp { self.accumulator.append(&[accumulator_key])?; self.flexi_dag_storage.put_hashes( accumulator_key, - SyncFlexiDagSnapshot { + BlockInfoExt { child_hashes, - accumulator_info: self.get_accumulator_info(), + block_ext_id: accumulator_key, + block_info: BlockInfo::default(), + dag_accumulator_info: self.accumulator.get_info(), }, )?; Ok(accumulator_key) } - fn query_by_hash(&self, hash: HashValue) -> Result> { + fn query_by_hash(&self, hash: HashValue) -> Result> { self.flexi_dag_storage.query_by_hash(hash) } @@ -266,7 +269,7 @@ fn test_syn_dag_accumulator_fork() { .query_by_hash(layer3) .unwrap() .unwrap() - .accumulator_info; + .dag_accumulator_info; println!("{:?}", info); assert_eq!( diff --git a/types/src/block.rs b/types/src/block.rs index 45704fa069..a72050786e 100644 --- a/types/src/block.rs +++ b/types/src/block.rs @@ -771,10 +771,27 @@ impl Sample for Block { } } +#[derive( + Clone, Default, Debug, Hash, Eq, PartialEq, Serialize, Deserialize, CryptoHasher, CryptoHash, JsonSchema, +)] +pub struct BlockInfoExt { + /// Block ext id + pub block_ext_id: HashValue, + + #[serde(skip)] + pub block_info: BlockInfo, + + /// The chid hashes. + pub child_hashes: Vec, // child nodes, to get the relationship, use dag's relationship store + + /// The dag accumulator info. + pub dag_accumulator_info: AccumulatorInfo, +} + /// `BlockInfo` is the object we store in the storage. It consists of the /// block as well as the execution result of this block. #[derive( - Clone, Debug, Hash, Eq, PartialEq, Serialize, Deserialize, CryptoHasher, CryptoHash, JsonSchema, + Clone, Default, Debug, Hash, Eq, PartialEq, Serialize, Deserialize, CryptoHasher, CryptoHash, JsonSchema, )] pub struct BlockInfo { /// Block id diff --git a/types/src/dag_block.rs b/types/src/dag_block.rs deleted file mode 100644 index bc089a92e5..0000000000 --- a/types/src/dag_block.rs +++ /dev/null @@ -1,947 +0,0 @@ -// Copyright (c) The Starcoin Core Contributors -// SPDX-License-Identifier: Apache-2.0 - -use crate::account_address::AccountAddress; -use crate::block::BlockHeaderExtra; -use crate::blockhash::ORIGIN; -use crate::genesis_config::{ChainId, ConsensusStrategy}; -use crate::language_storage::CORE_CODE_ADDRESS; -use crate::transaction::SignedUserTransaction; -use crate::U256; -use bcs_ext::Sample; -use schemars::{self, JsonSchema}; -use serde::{Deserialize, Deserializer, Serialize}; -pub use starcoin_accumulator::accumulator_info::AccumulatorInfo; -use starcoin_crypto::hash::{ACCUMULATOR_PLACEHOLDER_HASH, SPARSE_MERKLE_PLACEHOLDER_HASH}; -use starcoin_crypto::{ - hash::{CryptoHash, CryptoHasher, PlainCryptoHash}, - HashValue, -}; -use starcoin_vm_types::account_config::genesis_address; -use starcoin_vm_types::dag_block_metadata::DagBlockMetadata; -use starcoin_vm_types::transaction::authenticator::AuthenticationKey; -use std::fmt::Formatter; - -/// block timestamp allowed future times -pub const ALLOWED_FUTURE_BLOCKTIME: u64 = 30000; // 30 second; - -#[derive(Clone, Debug, Hash, Eq, PartialEq, Serialize, CryptoHasher, CryptoHash, JsonSchema)] -pub struct DagBlockHeader { - #[serde(skip)] - id: Option, - /// Parent hash. - parent_hash: Vec, - /// Block timestamp. - timestamp: u64, - /// Block author. - author: AccountAddress, - /// Block author auth key. - /// this field is deprecated - author_auth_key: Option, - /// The transaction accumulator root hash after executing this block. - txn_accumulator_root: HashValue, - /// The parent block info's block accumulator root hash. - block_accumulator_root: HashValue, - /// The last transaction state_root of this block after execute. - state_root: HashValue, - /// Gas used for contracts execution. - gas_used: u64, - /// Block difficulty - #[schemars(with = "String")] - difficulty: U256, - /// hash for block body - body_hash: HashValue, - /// The chain id - chain_id: ChainId, - /// Consensus nonce field. - nonce: u32, - /// block header extra - extra: BlockHeaderExtra, -} - -impl DagBlockHeader { - pub fn new( - parent_hash: Vec, - timestamp: u64, - author: AccountAddress, - txn_accumulator_root: HashValue, - block_accumulator_root: HashValue, - state_root: HashValue, - gas_used: u64, - difficulty: U256, - body_hash: HashValue, - chain_id: ChainId, - nonce: u32, - extra: BlockHeaderExtra, - ) -> DagBlockHeader { - Self::new_with_auth_key( - parent_hash, - timestamp, - author, - None, - txn_accumulator_root, - block_accumulator_root, - state_root, - gas_used, - difficulty, - body_hash, - chain_id, - nonce, - extra, - ) - } - - // the author_auth_key field is deprecated, but keep this fn for compat with old block. - fn new_with_auth_key( - parent_hash: Vec, - timestamp: u64, - author: AccountAddress, - author_auth_key: Option, - txn_accumulator_root: HashValue, - block_accumulator_root: HashValue, - state_root: HashValue, - gas_used: u64, - difficulty: U256, - body_hash: HashValue, - chain_id: ChainId, - nonce: u32, - extra: BlockHeaderExtra, - ) -> DagBlockHeader { - let mut header = DagBlockHeader { - id: None, - parent_hash, - block_accumulator_root, - timestamp, - author, - author_auth_key, - txn_accumulator_root, - state_root, - gas_used, - difficulty, - nonce, - body_hash, - chain_id, - extra, - }; - header.id = Some(header.crypto_hash()); - header - } - - pub fn as_pow_header_blob(&self) -> Vec { - let mut blob = Vec::new(); - let raw_header: RawDagBlockHeader = self.to_owned().into(); - let raw_header_hash = raw_header.crypto_hash(); - let mut diff = [0u8; 32]; - raw_header.difficulty.to_big_endian(&mut diff); - let extend_and_nonce = [0u8; 12]; - blob.extend_from_slice(raw_header_hash.to_vec().as_slice()); - blob.extend_from_slice(&extend_and_nonce); - blob.extend_from_slice(&diff); - blob - } - - pub fn id(&self) -> HashValue { - self.id - .expect("DagBlockHeader id should be Some after init.") - } - - pub fn parent_hash(&self) -> Vec { - self.parent_hash.clone() - } - - pub fn timestamp(&self) -> u64 { - self.timestamp - } - - pub fn author(&self) -> AccountAddress { - self.author - } - - pub fn author_auth_key(&self) -> Option { - self.author_auth_key - } - - pub fn txn_accumulator_root(&self) -> HashValue { - self.txn_accumulator_root - } - - pub fn state_root(&self) -> HashValue { - self.state_root - } - - pub fn gas_used(&self) -> u64 { - self.gas_used - } - - pub fn nonce(&self) -> u32 { - self.nonce - } - - pub fn difficulty(&self) -> U256 { - self.difficulty - } - - pub fn block_accumulator_root(&self) -> HashValue { - self.block_accumulator_root - } - - pub fn body_hash(&self) -> HashValue { - self.body_hash - } - - pub fn chain_id(&self) -> ChainId { - self.chain_id - } - - pub fn extra(&self) -> &BlockHeaderExtra { - &self.extra - } - - pub fn is_genesis(&self) -> bool { - if self.parent_hash.len() == 1 { - return self.parent_hash[0] == HashValue::new(ORIGIN); - } - false - } - - pub fn genesis_block_header( - parent_hash: Vec, - timestamp: u64, - txn_accumulator_root: HashValue, - state_root: HashValue, - difficulty: U256, - body_hash: HashValue, - chain_id: ChainId, - ) -> Self { - Self::new( - parent_hash, - timestamp, - CORE_CODE_ADDRESS, - txn_accumulator_root, - *ACCUMULATOR_PLACEHOLDER_HASH, - state_root, - 0, - difficulty, - body_hash, - chain_id, - 0, - BlockHeaderExtra::default(), - ) - } - - pub fn random() -> Self { - Self::new( - vec![HashValue::random()], - rand::random(), - AccountAddress::random(), - HashValue::random(), - HashValue::random(), - HashValue::random(), - rand::random(), - U256::max_value(), - HashValue::random(), - ChainId::test(), - 0, - BlockHeaderExtra::new([0u8; 4]), - ) - } - - pub fn as_builder(&self) -> DagBlockHeaderBuilder { - DagBlockHeaderBuilder::new_with(self.clone()) - } -} - -impl<'de> Deserialize<'de> for DagBlockHeader { - fn deserialize(deserializer: D) -> Result>::Error> - where - D: Deserializer<'de>, - { - #[derive(Deserialize)] - #[serde(rename = "DagBlockHeader")] - struct DagBlockHeaderData { - parent_hash: Vec, - timestamp: u64, - author: AccountAddress, - author_auth_key: Option, - txn_accumulator_root: HashValue, - block_accumulator_root: HashValue, - state_root: HashValue, - gas_used: u64, - difficulty: U256, - body_hash: HashValue, - chain_id: ChainId, - nonce: u32, - extra: BlockHeaderExtra, - } - - let header_data = DagBlockHeaderData::deserialize(deserializer)?; - let block_header = Self::new_with_auth_key( - header_data.parent_hash, - header_data.timestamp, - header_data.author, - header_data.author_auth_key, - header_data.txn_accumulator_root, - header_data.block_accumulator_root, - header_data.state_root, - header_data.gas_used, - header_data.difficulty, - header_data.body_hash, - header_data.chain_id, - header_data.nonce, - header_data.extra, - ); - Ok(block_header) - } -} - -impl Default for DagBlockHeader { - fn default() -> Self { - Self::new( - vec![HashValue::zero()], - 0, - AccountAddress::ZERO, - HashValue::zero(), - HashValue::zero(), - HashValue::zero(), - 0, - 0.into(), - HashValue::zero(), - ChainId::test(), - 0, - BlockHeaderExtra::new([0u8; 4]), - ) - } -} - -impl Sample for DagBlockHeader { - fn sample() -> Self { - Self::new( - vec![HashValue::zero()], - 1610110515000, - genesis_address(), - *ACCUMULATOR_PLACEHOLDER_HASH, - *ACCUMULATOR_PLACEHOLDER_HASH, - *SPARSE_MERKLE_PLACEHOLDER_HASH, - 0, - U256::from(1), - BlockBody::sample().crypto_hash(), - ChainId::test(), - 0, - BlockHeaderExtra::new([0u8; 4]), - ) - } -} - -#[allow(clippy::from_over_into)] -impl Into for DagBlockHeader { - fn into(self) -> RawDagBlockHeader { - RawDagBlockHeader { - parent_hash: self.parent_hash, - timestamp: self.timestamp, - author: self.author, - author_auth_key: self.author_auth_key, - accumulator_root: self.txn_accumulator_root, - parent_block_accumulator_root: self.block_accumulator_root, - state_root: self.state_root, - gas_used: self.gas_used, - difficulty: self.difficulty, - body_hash: self.body_hash, - chain_id: self.chain_id, - } - } -} - -#[derive(Clone, Debug, Hash, Eq, PartialEq, Serialize, Deserialize, CryptoHasher, CryptoHash)] -pub struct RawDagBlockHeader { - /// Parent hash. - pub parent_hash: Vec, - /// Block timestamp. - pub timestamp: u64, - /// Block author. - pub author: AccountAddress, - /// Block author auth key. - /// this field is deprecated - pub author_auth_key: Option, - /// The transaction accumulator root hash after executing this block. - pub accumulator_root: HashValue, - /// The parent block accumulator root hash. - pub parent_block_accumulator_root: HashValue, - /// The last transaction state_root of this block after execute. - pub state_root: HashValue, - /// Gas used for contracts execution. - pub gas_used: u64, - /// Block difficulty - pub difficulty: U256, - /// hash for block body - pub body_hash: HashValue, - /// The chain id - pub chain_id: ChainId, -} - -#[derive(Default)] -pub struct DagBlockHeaderBuilder { - buffer: DagBlockHeader, -} - -impl DagBlockHeaderBuilder { - pub fn new() -> Self { - Self::default() - } - - pub fn random() -> Self { - Self { - buffer: DagBlockHeader::random(), - } - } - - fn new_with(buffer: DagBlockHeader) -> Self { - Self { buffer } - } - - pub fn with_parent_hash(mut self, parent_hash: Vec) -> Self { - self.buffer.parent_hash = parent_hash; - self - } - - pub fn with_timestamp(mut self, timestamp: u64) -> Self { - self.buffer.timestamp = timestamp; - self - } - - pub fn with_author(mut self, author: AccountAddress) -> Self { - self.buffer.author = author; - self - } - - pub fn with_author_auth_key(mut self, author_auth_key: Option) -> Self { - self.buffer.author_auth_key = author_auth_key; - self - } - - pub fn with_accumulator_root(mut self, accumulator_root: HashValue) -> Self { - self.buffer.txn_accumulator_root = accumulator_root; - self - } - - pub fn with_parent_block_accumulator_root( - mut self, - parent_block_accumulator_root: HashValue, - ) -> Self { - self.buffer.block_accumulator_root = parent_block_accumulator_root; - self - } - - pub fn with_state_root(mut self, state_root: HashValue) -> Self { - self.buffer.state_root = state_root; - self - } - - pub fn with_gas_used(mut self, gas_used: u64) -> Self { - self.buffer.gas_used = gas_used; - self - } - - pub fn with_difficulty(mut self, difficulty: U256) -> Self { - self.buffer.difficulty = difficulty; - self - } - - pub fn with_body_hash(mut self, body_hash: HashValue) -> Self { - self.buffer.body_hash = body_hash; - self - } - - pub fn with_chain_id(mut self, chain_id: ChainId) -> Self { - self.buffer.chain_id = chain_id; - self - } - - pub fn with_nonce(mut self, nonce: u32) -> Self { - self.buffer.nonce = nonce; - self - } - - pub fn with_extra(mut self, extra: BlockHeaderExtra) -> Self { - self.buffer.extra = extra; - self - } - - pub fn build(mut self) -> DagBlockHeader { - self.buffer.id = Some(self.buffer.crypto_hash()); - self.buffer - } -} - -#[derive( - Default, Clone, Debug, Hash, Eq, PartialEq, Serialize, Deserialize, CryptoHasher, CryptoHash, -)] -pub struct BlockBody { - /// The transactions in this block. - pub transactions: Vec, - /// uncles block header - pub uncles: Option>, -} - -impl BlockBody { - pub fn new( - transactions: Vec, - uncles: Option>, - ) -> Self { - Self { - transactions, - uncles, - } - } - pub fn get_txn(&self, index: usize) -> Option<&SignedUserTransaction> { - self.transactions.get(index) - } - - /// Just for test - pub fn new_empty() -> BlockBody { - BlockBody { - transactions: Vec::new(), - uncles: None, - } - } - - pub fn hash(&self) -> HashValue { - self.crypto_hash() - } -} - -#[allow(clippy::from_over_into)] -impl Into for Vec { - fn into(self) -> BlockBody { - BlockBody { - transactions: self, - uncles: None, - } - } -} - -#[allow(clippy::from_over_into)] -impl Into> for BlockBody { - fn into(self) -> Vec { - self.transactions - } -} - -impl Sample for BlockBody { - fn sample() -> Self { - Self { - transactions: vec![], - uncles: None, - } - } -} - -/// A block, encoded as it is on the block chain. -#[derive(Clone, Debug, Hash, Eq, PartialEq, Serialize, Deserialize, CryptoHasher, CryptoHash)] -pub struct Block { - /// The header of this block. - pub header: DagBlockHeader, - /// The body of this block. - pub body: BlockBody, -} - -impl Block { - pub fn new(header: DagBlockHeader, body: B) -> Self - where - B: Into, - { - Block { - header, - body: body.into(), - } - } - - pub fn id(&self) -> HashValue { - self.header.id() - } - pub fn header(&self) -> &DagBlockHeader { - &self.header - } - pub fn transactions(&self) -> &[SignedUserTransaction] { - self.body.transactions.as_slice() - } - - pub fn uncles(&self) -> Option<&[DagBlockHeader]> { - match &self.body.uncles { - Some(uncles) => Some(uncles.as_slice()), - None => None, - } - } - - pub fn uncle_ids(&self) -> Vec { - self.uncles() - .map(|uncles| uncles.iter().map(|header| header.id()).collect()) - .unwrap_or_default() - } - - pub fn into_inner(self) -> (DagBlockHeader, BlockBody) { - (self.header, self.body) - } - - pub fn genesis_block( - parent_hash: Vec, - timestamp: u64, - accumulator_root: HashValue, - state_root: HashValue, - difficulty: U256, - genesis_txn: SignedUserTransaction, - ) -> Self { - let chain_id = genesis_txn.chain_id(); - let block_body = BlockBody::new(vec![genesis_txn], None); - let header = DagBlockHeader::genesis_block_header( - parent_hash, - timestamp, - accumulator_root, - state_root, - difficulty, - block_body.hash(), - chain_id, - ); - Self { - header, - body: block_body, - } - } - - pub fn to_metadata(&self, parent_gas_used: u64) -> DagBlockMetadata { - DagBlockMetadata::new( - self.header.parent_hash(), - self.header.timestamp, - self.header.author, - self.header.author_auth_key, - self.header.chain_id, - parent_gas_used, - ) - } -} - -impl std::fmt::Display for Block { - fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { - write!( - f, - "Block{{id:\"{}\", parent_id:\"{:?}\",", - self.id(), - self.header().parent_hash() - )?; - if let Some(uncles) = &self.body.uncles { - write!(f, "uncles:[")?; - for uncle in uncles { - write!(f, "\"{}\",", uncle.id())?; - } - write!(f, "],")?; - } - write!(f, "transactions:[")?; - for txn in &self.body.transactions { - write!(f, "\"{}\",", txn.id())?; - } - write!(f, "]}}") - } -} - -impl Sample for Block { - fn sample() -> Self { - Self { - header: DagBlockHeader::sample(), - body: BlockBody::sample(), - } - } -} - -/// `BlockInfo` is the object we store in the storage. It consists of the -/// block as well as the execution result of this block. -#[derive( - Clone, Debug, Hash, Eq, PartialEq, Serialize, Deserialize, CryptoHasher, CryptoHash, JsonSchema, -)] -pub struct BlockInfo { - /// Block id - pub block_id: HashValue, - /// The total difficulty. - #[schemars(with = "String")] - pub total_difficulty: U256, - /// The transaction accumulator info - pub txn_accumulator_info: AccumulatorInfo, - /// The block accumulator info. - pub block_accumulator_info: AccumulatorInfo, -} - -impl BlockInfo { - pub fn new( - block_id: HashValue, - total_difficulty: U256, - txn_accumulator_info: AccumulatorInfo, - block_accumulator_info: AccumulatorInfo, - ) -> Self { - Self { - block_id, - total_difficulty, - txn_accumulator_info, - block_accumulator_info, - } - } - - pub fn id(&self) -> HashValue { - self.crypto_hash() - } - - pub fn get_total_difficulty(&self) -> U256 { - self.total_difficulty - } - - pub fn get_block_accumulator_info(&self) -> &AccumulatorInfo { - &self.block_accumulator_info - } - - pub fn get_txn_accumulator_info(&self) -> &AccumulatorInfo { - &self.txn_accumulator_info - } - - pub fn block_id(&self) -> &HashValue { - &self.block_id - } -} - -impl Sample for BlockInfo { - fn sample() -> Self { - Self { - block_id: DagBlockHeader::sample().id(), - total_difficulty: 0.into(), - txn_accumulator_info: AccumulatorInfo::sample(), - block_accumulator_info: AccumulatorInfo::sample(), - } - } -} - -#[derive(Clone, Debug)] -pub struct DagBlockTemplate { - /// Parent hash. - pub parent_hash: Vec, - /// Block timestamp. - pub timestamp: u64, - /// Block author. - pub author: AccountAddress, - /// The transaction accumulator root hash after executing this block. - pub txn_accumulator_root: HashValue, - /// The block accumulator root hash. - pub block_accumulator_root: HashValue, - /// The last transaction state_root of this block after execute. - pub state_root: HashValue, - /// Gas used for contracts execution. - pub gas_used: u64, - /// hash for block body - pub body_hash: HashValue, - /// body of the block - pub body: BlockBody, - /// The chain id - pub chain_id: ChainId, - /// Block difficulty - pub difficulty: U256, - /// Block consensus strategy - pub strategy: ConsensusStrategy, -} - -impl DagBlockTemplate { - pub fn new( - parent_block_accumulator_root: HashValue, - accumulator_root: HashValue, - state_root: HashValue, - gas_used: u64, - body: BlockBody, - chain_id: ChainId, - difficulty: U256, - strategy: ConsensusStrategy, - block_metadata: DagBlockMetadata, - ) -> Self { - let (parent_hash, timestamp, author, _author_auth_key, _, _) = block_metadata.into_inner(); - Self { - parent_hash, - block_accumulator_root: parent_block_accumulator_root, - timestamp, - author, - txn_accumulator_root: accumulator_root, - state_root, - gas_used, - body_hash: body.hash(), - body, - chain_id, - difficulty, - strategy, - } - } - - pub fn into_block(self, nonce: u32, extra: BlockHeaderExtra) -> Block { - let header = DagBlockHeader::new( - self.parent_hash, - self.timestamp, - self.author, - self.txn_accumulator_root, - self.block_accumulator_root, - self.state_root, - self.gas_used, - self.difficulty, - self.body_hash, - self.chain_id, - nonce, - extra, - ); - Block { - header, - body: self.body, - } - } - - pub fn as_raw_block_header(&self) -> RawDagBlockHeader { - RawDagBlockHeader { - parent_hash: self.parent_hash.clone(), - timestamp: self.timestamp, - author: self.author, - author_auth_key: None, - accumulator_root: self.txn_accumulator_root, - parent_block_accumulator_root: self.block_accumulator_root, - state_root: self.state_root, - gas_used: self.gas_used, - body_hash: self.body_hash, - difficulty: self.difficulty, - chain_id: self.chain_id, - } - } - - pub fn as_pow_header_blob(&self) -> Vec { - let mut blob = Vec::new(); - let raw_header = self.as_raw_block_header(); - let raw_header_hash = raw_header.crypto_hash(); - let mut dh = [0u8; 32]; - raw_header.difficulty.to_big_endian(&mut dh); - let extend_and_nonce = [0u8; 12]; - - blob.extend_from_slice(raw_header_hash.to_vec().as_slice()); - blob.extend_from_slice(&extend_and_nonce); - blob.extend_from_slice(&dh); - blob - } - - pub fn into_block_header(self, nonce: u32, extra: BlockHeaderExtra) -> DagBlockHeader { - DagBlockHeader::new( - self.parent_hash, - self.timestamp, - self.author, - self.txn_accumulator_root, - self.block_accumulator_root, - self.state_root, - self.gas_used, - self.difficulty, - self.body_hash, - self.chain_id, - nonce, - extra, - ) - } -} - -#[derive(Clone, Debug, Hash, Serialize, Deserialize, CryptoHasher, CryptoHash)] -pub struct ExecutedBlock { - pub block: Block, - pub block_info: BlockInfo, -} - -impl ExecutedBlock { - pub fn new(block: Block, block_info: BlockInfo) -> Self { - ExecutedBlock { block, block_info } - } - - pub fn total_difficulty(&self) -> U256 { - self.block_info.total_difficulty - } - - pub fn block(&self) -> &Block { - &self.block - } - - pub fn block_info(&self) -> &BlockInfo { - &self.block_info - } - - pub fn header(&self) -> &DagBlockHeader { - self.block.header() - } -} - -#[derive(Clone, Debug, Serialize, Deserialize)] -pub struct BlockSummary { - pub block_header: DagBlockHeader, - pub uncles: Vec, -} - -impl BlockSummary { - pub fn uncles(&self) -> &[DagBlockHeader] { - &self.uncles - } - - pub fn header(&self) -> &DagBlockHeader { - &self.block_header - } -} - -impl From for BlockSummary { - fn from(block: Block) -> Self { - Self { - block_header: block.header, - uncles: block.body.uncles.unwrap_or_default(), - } - } -} - -#[allow(clippy::from_over_into)] -impl Into<(DagBlockHeader, Vec)> for BlockSummary { - fn into(self) -> (DagBlockHeader, Vec) { - (self.block_header, self.uncles) - } -} - -#[derive(Clone, Debug, Serialize, Deserialize)] -pub struct UncleSummary { - /// total uncle - pub uncles: u64, - /// sum(number of the block which contain uncle block - uncle parent block number). - pub sum: u64, - pub avg: u64, - pub time_sum: u64, - pub time_avg: u64, -} - -impl UncleSummary { - pub fn new(uncles: u64, sum: u64, time_sum: u64) -> Self { - let (avg, time_avg) = ( - sum.checked_div(uncles).unwrap_or_default(), - time_sum.checked_div(uncles).unwrap_or_default(), - ); - Self { - uncles, - sum, - avg, - time_sum, - time_avg, - } - } -} - -#[derive(Clone, Debug, Serialize, Deserialize)] -pub struct EpochUncleSummary { - /// epoch number - pub epoch: u64, - pub number_summary: UncleSummary, - pub epoch_summary: UncleSummary, -} - -impl EpochUncleSummary { - pub fn new(epoch: u64, number_summary: UncleSummary, epoch_summary: UncleSummary) -> Self { - Self { - epoch, - number_summary, - epoch_summary, - } - } -} diff --git a/types/src/lib.rs b/types/src/lib.rs index 9ff354a624..7e4e65b6d8 100644 --- a/types/src/lib.rs +++ b/types/src/lib.rs @@ -24,7 +24,6 @@ pub mod account_state; #[allow(clippy::too_many_arguments)] pub mod block; pub mod compact_block; -pub mod dag_block; pub mod block_metadata { pub use starcoin_vm_types::block_metadata::BlockMetadata; From ea020d70c0486c501d6ae41ccdfa402fb55f0232 Mon Sep 17 00:00:00 2001 From: jackzhhuang Date: Mon, 31 Jul 2023 18:35:41 +0800 Subject: [PATCH 30/30] add header and block info ext --- sync/src/tasks/sync_dag_accumulator_task.rs | 10 ++-- sync/src/tasks/sync_find_ancestor_task.rs | 2 +- types/src/block.rs | 64 +++++++++++++++++++-- 3 files changed, 66 insertions(+), 10 deletions(-) diff --git a/sync/src/tasks/sync_dag_accumulator_task.rs b/sync/src/tasks/sync_dag_accumulator_task.rs index a304cf4819..e6ebbb2c2d 100644 --- a/sync/src/tasks/sync_dag_accumulator_task.rs +++ b/sync/src/tasks/sync_dag_accumulator_task.rs @@ -5,16 +5,15 @@ use starcoin_accumulator::{accumulator_info::AccumulatorInfo, Accumulator, Merkl use starcoin_crypto::HashValue; use starcoin_network_rpc_api::dag_protocol::{TargetDagAccumulatorLeafDetail, self}; use starcoin_storage::{ - flexi_dag::{SyncFlexiDagSnapshot, SyncFlexiDagSnapshotStorage}, + flexi_dag::SyncFlexiDagSnapshotStorage, storage::CodecKVStore, }; +use starcoin_types::block::BlockInfoExt; use std::sync::Arc; use stream_task::{CollectorState, TaskResultCollector, TaskState}; use crate::verified_rpc_client::VerifiedRpcClient; -use super::sync_dag_protocol_trait::PeerSynDagAccumulator; - #[derive(Clone)] pub struct SyncDagAccumulatorTask { leaf_index: u64, @@ -130,13 +129,14 @@ impl TaskResultCollector for SyncDagAccumulatorC let num_leaves = accumulator_info.num_leaves; self.accumulator_snapshot.put( accumulator_leaf, - SyncFlexiDagSnapshot { + BlockInfoExt { child_hashes: item .relationship_pair .into_iter() .map(|pair| pair.child) .collect::>(), - accumulator_info, + dag_accumulator_info: accumulator_info, + block_info: None, }, )?; diff --git a/sync/src/tasks/sync_find_ancestor_task.rs b/sync/src/tasks/sync_find_ancestor_task.rs index 5206c2ef0c..4f47540f13 100644 --- a/sync/src/tasks/sync_find_ancestor_task.rs +++ b/sync/src/tasks/sync_find_ancestor_task.rs @@ -95,7 +95,7 @@ impl TaskResultCollector for AncestorCollector { })?; let accumulator_info = match self.accumulator_snapshot.get(accumulator_leaf)? { - Some(snapshot) => snapshot.accumulator_info, + Some(snapshot) => snapshot.dag_accumulator_info, None => panic!("failed to get the snapshot, it is none."), }; diff --git a/types/src/block.rs b/types/src/block.rs index a72050786e..7ae775dc40 100644 --- a/types/src/block.rs +++ b/types/src/block.rs @@ -118,6 +118,65 @@ impl From for BlockIdAndNumber { /// block timestamp allowed future times pub const ALLOWED_FUTURE_BLOCKTIME: u64 = 30000; // 30 second; +#[derive(Clone, Debug, Hash, Eq, PartialEq, Serialize, Deserialize, CryptoHasher, CryptoHash, JsonSchema)] +pub struct BlockHeaderExtForVerification { + block_header: BlockHeader, + + /// Dag block parents. + dag_parent_hashes: Vec, +} + +#[derive(Clone, Debug, Hash, Eq, PartialEq, Serialize, Deserialize, CryptoHasher, CryptoHash, JsonSchema)] +pub struct BlockHeaderExt { + #[serde(skip)] + dag_header_id: Option, + + #[serde(skip)] + block_header: Option, + + /// Dag block parents. + dag_parent_hashes: Vec, + + /// Dag block number. + dag_number: BlockNumber, +} + +impl BlockHeaderExt { + pub fn new(block_header: BlockHeader, dag_parent_hashes: Vec, dag_number: BlockNumber) -> Self { + Self { + dag_header_id: None, + block_header: Some(block_header), + dag_parent_hashes, + dag_number, + } + } + + pub fn dag_header_id(&self) -> Option { + self.dag_header_id + } + + pub fn block_header(&self) -> &Option { + &self.block_header + } + + pub fn dag_parent_hashes(&self) -> &Vec { + &self.dag_parent_hashes + } + + pub fn dag_number(&self) -> BlockNumber { + self.dag_number + } +} + +impl Into for BlockHeaderExt { + fn into(self) -> BlockHeaderExtForVerification { + BlockHeaderExtForVerification { + block_header: self.block_header.expect("block_header should not be None"), + dag_parent_hashes: self.dag_parent_hashes, + } + } +} + #[derive(Clone, Debug, Hash, Eq, PartialEq, Serialize, CryptoHasher, CryptoHash, JsonSchema)] pub struct BlockHeader { #[serde(skip)] @@ -775,11 +834,8 @@ impl Sample for Block { Clone, Default, Debug, Hash, Eq, PartialEq, Serialize, Deserialize, CryptoHasher, CryptoHash, JsonSchema, )] pub struct BlockInfoExt { - /// Block ext id - pub block_ext_id: HashValue, - #[serde(skip)] - pub block_info: BlockInfo, + pub block_info: Option, /// The chid hashes. pub child_hashes: Vec, // child nodes, to get the relationship, use dag's relationship store