From 37cfe0879118c928198e279e5597eb0d92fbeb95 Mon Sep 17 00:00:00 2001 From: Kirill Mikheev Date: Thu, 9 May 2024 17:14:53 +0300 Subject: [PATCH] chore: apply rustfmt --- cli/src/node/mod.rs | 8 ++-- collator/src/mempool/mempool_adapter.rs | 11 ++--- collator/src/mempool/mempool_adapter_stub.rs | 37 ++++++-------- consensus/examples/consensus_node.rs | 3 +- consensus/src/dag/anchor_stage.rs | 1 - consensus/src/dag/dag.rs | 5 +- consensus/src/dag/dag_location.rs | 3 +- consensus/src/dag/dag_round.rs | 1 - consensus/src/dag/producer.rs | 1 - consensus/src/dag/verifier.rs | 48 +++++++++++-------- consensus/src/engine/engine.rs | 1 - .../intercom/broadcast/broadcast_filter.rs | 4 +- .../src/intercom/broadcast/broadcaster.rs | 4 +- consensus/src/intercom/broadcast/collector.rs | 5 +- consensus/src/intercom/core/dispatcher.rs | 1 - consensus/src/intercom/core/dto.rs | 1 - consensus/src/intercom/core/responder.rs | 1 - .../src/intercom/dependency/downloader.rs | 1 - consensus/src/intercom/mod.rs | 3 +- .../intercom/peer_schedule/peer_schedule.rs | 19 ++++---- .../peer_schedule/peer_schedule_updater.rs | 1 - consensus/src/models/node_count.rs | 8 ++-- consensus/src/models/point.rs | 1 - consensus/src/test_utils.rs | 4 +- .../src/store/shard_state/store_state_raw.rs | 9 ++-- 25 files changed, 74 insertions(+), 107 deletions(-) diff --git a/cli/src/node/mod.rs b/cli/src/node/mod.rs index 9b5e55e08..f90b474af 100644 --- a/cli/src/node/mod.rs +++ b/cli/src/node/mod.rs @@ -439,14 +439,12 @@ impl Node { let state_storage = self.storage.shard_state_storage(); for state in to_import { - let (handle, status) = handle_storage.create_or_load_handle( - state.block_id(), - BlockMetaData { + let (handle, status) = + handle_storage.create_or_load_handle(state.block_id(), BlockMetaData { is_key_block: true, gen_utime, mc_ref_seqno: 0, - }, - ); + }); let stored = state_storage .store_state(&handle, &state) diff --git a/collator/src/mempool/mempool_adapter.rs b/collator/src/mempool/mempool_adapter.rs index 1feb65048..90742c341 100644 --- a/collator/src/mempool/mempool_adapter.rs +++ b/collator/src/mempool/mempool_adapter.rs @@ -247,13 +247,10 @@ fn _stub_create_random_anchor_with_stub_externals( msg_cell_builder.store_u64(chain_time).unwrap(); msg_cell_builder.store_u32(i as u32).unwrap(); let msg_cell = msg_cell_builder.build().unwrap(); - let msg = ExternalMessage::new( - msg_cell, - ExtInMsgInfo { - dst: IntAddr::Std(StdAddr::new(0, rand_addr)), - ..Default::default() - }, - ); + let msg = ExternalMessage::new(msg_cell, ExtInMsgInfo { + dst: IntAddr::Std(StdAddr::new(0, rand_addr)), + ..Default::default() + }); externals.push(Arc::new(msg)); } diff --git a/collator/src/mempool/mempool_adapter_stub.rs b/collator/src/mempool/mempool_adapter_stub.rs index 1ed74d9b6..969930f84 100644 --- a/collator/src/mempool/mempool_adapter_stub.rs +++ b/collator/src/mempool/mempool_adapter_stub.rs @@ -1,22 +1,16 @@ -use std::{ - collections::BTreeMap, - sync::{Arc, RwLock}, -}; +use std::collections::BTreeMap; +use std::sync::{Arc, RwLock}; use anyhow::{anyhow, Result}; use async_trait::async_trait; - -use crate::mempool::{MempoolAdapter, MempoolEventListener}; -use everscale_types::{ - cell::{CellBuilder, CellSliceRange, HashBytes}, - models::{ExtInMsgInfo, IntAddr, MsgInfo, OwnedMessage, StdAddr}, -}; +use everscale_types::cell::{CellBuilder, CellSliceRange, HashBytes}; +use everscale_types::models::{ExtInMsgInfo, IntAddr, MsgInfo, OwnedMessage, StdAddr}; use rand::Rng; use tycho_block_util::state::ShardStateStuff; -use crate::tracing_targets; - use super::types::{ExternalMessage, MempoolAnchor, MempoolAnchorId}; +use crate::mempool::{MempoolAdapter, MempoolEventListener}; +use crate::tracing_targets; #[cfg(test)] #[path = "tests/mempool_adapter_tests.rs"] @@ -34,7 +28,7 @@ impl MempoolAdapterStubImpl { pub fn new(listener: Arc) -> Self { tracing::info!(target: tracing_targets::MEMPOOL_ADAPTER, "Creating mempool adapter..."); - //TODO: make real implementation, currently runs stub task + // TODO: make real implementation, currently runs stub task // that produces the repeating set of anchors let stub_anchors_cache = Arc::new(RwLock::new(BTreeMap::new())); @@ -81,7 +75,7 @@ impl MempoolAdapterStubImpl { #[async_trait] impl MempoolAdapter for MempoolAdapterStubImpl { async fn enqueue_process_new_mc_block_state(&self, mc_state: ShardStateStuff) -> Result<()> { - //TODO: make real implementation, currently does nothing + // TODO: make real implementation, currently does nothing tracing::info!( target: tracing_targets::MEMPOOL_ADAPTER, "STUB: New masterchain state (block_id: {}) processing enqueued to mempool", @@ -94,7 +88,7 @@ impl MempoolAdapter for MempoolAdapterStubImpl { &self, anchor_id: MempoolAnchorId, ) -> Result>> { - //TODO: make real implementation, currently only return anchor from local cache + // TODO: make real implementation, currently only return anchor from local cache let res = { let anchors_cache_r = self ._stub_anchors_cache @@ -124,7 +118,7 @@ impl MempoolAdapter for MempoolAdapterStubImpl { } async fn get_next_anchor(&self, prev_anchor_id: MempoolAnchorId) -> Result> { - //TODO: make real implementation, currently only return anchor from local cache + // TODO: make real implementation, currently only return anchor from local cache let mut stub_first_attempt = true; let mut request_timer = std::time::Instant::now(); @@ -201,13 +195,10 @@ fn _stub_create_random_anchor_with_stub_externals( msg_cell_builder.store_u32(i as u32).unwrap(); let msg_cell = msg_cell_builder.build().unwrap(); let msg_cell_range = CellSliceRange::full(&*msg_cell); - let msg = ExternalMessage::new( - msg_cell, - ExtInMsgInfo { - dst: IntAddr::Std(StdAddr::new(0, rand_addr)), - ..Default::default() - }, - ); + let msg = ExternalMessage::new(msg_cell, ExtInMsgInfo { + dst: IntAddr::Std(StdAddr::new(0, rand_addr)), + ..Default::default() + }); externals.push(Arc::new(msg)); } diff --git a/consensus/examples/consensus_node.rs b/consensus/examples/consensus_node.rs index 069ae7a69..c455ab190 100644 --- a/consensus/examples/consensus_node.rs +++ b/consensus/examples/consensus_node.rs @@ -14,7 +14,6 @@ use serde::{Deserialize, Serialize}; use tokio::sync::mpsc; use tracing_subscriber::layer::SubscriberExt; use tracing_subscriber::{fmt, EnvFilter, Layer}; - use tycho_consensus::test_utils::drain_anchors; use tycho_consensus::Engine; use tycho_network::{DhtConfig, NetworkConfig, PeerId, PeerInfo}; @@ -53,7 +52,7 @@ impl Cli { .with_ansi(false) .compact() .with_writer(non_blocking) - .with_filter(EnvFilter::new("trace")), //todo: update with needed crates + .with_filter(EnvFilter::new("trace")), // todo: update with needed crates ); tracing::subscriber::set_global_default(collector)?; } else { diff --git a/consensus/src/dag/anchor_stage.rs b/consensus/src/dag/anchor_stage.rs index adbfdfd33..a3b7efcd3 100644 --- a/consensus/src/dag/anchor_stage.rs +++ b/consensus/src/dag/anchor_stage.rs @@ -1,7 +1,6 @@ use std::sync::atomic::AtomicBool; use rand::{Rng, SeedableRng}; - use tycho_network::PeerId; use crate::intercom::PeerSchedule; diff --git a/consensus/src/dag/dag.rs b/consensus/src/dag/dag.rs index 9988fd9e6..60a94bcde 100644 --- a/consensus/src/dag/dag.rs +++ b/consensus/src/dag/dag.rs @@ -17,7 +17,6 @@ use crate::models::{Point, Round, Ugly, ValidPoint}; #[derive(Clone)] pub struct Dag { // from the oldest to the current round; newer ones are in the future; - // rounds: Arc>>, } @@ -203,8 +202,8 @@ impl Dag { /// /// Note: at this point there is no way to check if passed point is really an anchor async fn gather_uncommitted( - anchor /* @ r+1 */: &Point, - anchor_round /* r+1 */: &DagRound, + anchor: &Point, // @ r+1 + anchor_round: &DagRound, // r+1 ) -> Vec> { assert_eq!( *anchor_round.round(), diff --git a/consensus/src/dag/dag_location.rs b/consensus/src/dag/dag_location.rs index c93df8101..5b50c802b 100644 --- a/consensus/src/dag/dag_location.rs +++ b/consensus/src/dag/dag_location.rs @@ -5,7 +5,6 @@ use std::sync::{Arc, OnceLock}; use everscale_crypto::ed25519::KeyPair; use futures_util::FutureExt; - use tycho_util::futures::{JoinTask, Shared}; use crate::models::{DagPoint, Digest, Round, Signature, UnixTime, ValidPoint}; @@ -22,7 +21,7 @@ pub struct DagLocation { // was proven by the next point of a node; // even if we marked this point as invalid, consensus may override our decision // and we will have to sync - /* vertex: Option, */ + // vertex: Option, /// We can sign or reject just a single (e.g. first validated) point at the current location; /// other (equivocated) points may be received as includes, witnesses or a proven vertex; /// we have to include signed points @ r+0 & @ r-1 as dependencies in our point @ r+1. diff --git a/consensus/src/dag/dag_round.rs b/consensus/src/dag/dag_round.rs index 084aca0c8..7da9b4575 100644 --- a/consensus/src/dag/dag_round.rs +++ b/consensus/src/dag/dag_round.rs @@ -3,7 +3,6 @@ use std::sync::{Arc, Weak}; use everscale_crypto::ed25519::KeyPair; use futures_util::future::BoxFuture; use futures_util::FutureExt; - use tycho_network::PeerId; use tycho_util::FastDashMap; diff --git a/consensus/src/dag/producer.rs b/consensus/src/dag/producer.rs index e0acaed30..3c2f702f4 100644 --- a/consensus/src/dag/producer.rs +++ b/consensus/src/dag/producer.rs @@ -2,7 +2,6 @@ use std::collections::BTreeMap; use std::sync::Arc; use bytes::Bytes; - use tycho_network::PeerId; use crate::dag::anchor_stage::AnchorStage; diff --git a/consensus/src/dag/verifier.rs b/consensus/src/dag/verifier.rs index 38df53792..8a18d1347 100644 --- a/consensus/src/dag/verifier.rs +++ b/consensus/src/dag/verifier.rs @@ -2,7 +2,6 @@ use std::sync::Arc; use futures_util::FutureExt; use tokio::task::JoinSet; - use tycho_network::PeerId; use crate::dag::anchor_stage::AnchorStage; @@ -11,19 +10,17 @@ use crate::engine::MempoolConfig; use crate::intercom::{Downloader, PeerSchedule}; use crate::models::{DagPoint, Digest, Link, Location, NodeCount, Point, PointId, ValidPoint}; -/* -Note on equivocation. -Detected point equivocation does not invalidate the point, it just - prevents us (as a fair actor) from returning our signature to the author. -Such a point may be included in our next "includes" or "witnesses", - but neither its inclusion nor omitting is required: as we don't - return our signature, our dependencies cannot be validated against it. -Equally, we immediately stop communicating with the equivocating node, - without invalidating any of its points (no matter historical or future). -We will not sign the proof for equivocated point - as we've banned the author on network layer. -Anyway, no more than one of equivocated points may become a vertex. -*/ +// Note on equivocation. +// Detected point equivocation does not invalidate the point, it just +// prevents us (as a fair actor) from returning our signature to the author. +// Such a point may be included in our next "includes" or "witnesses", +// but neither its inclusion nor omitting is required: as we don't +// return our signature, our dependencies cannot be validated against it. +// Equally, we immediately stop communicating with the equivocating node, +// without invalidating any of its points (no matter historical or future). +// We will not sign the proof for equivocated point +// as we've banned the author on network layer. +// Anyway, no more than one of equivocated points may become a vertex. pub struct Verifier; @@ -48,8 +45,8 @@ impl Verifier { /// must be called iff [Self::verify] succeeded pub async fn validate( - point /* @ r+0 */: Arc, - r_0 /* r+0 */: DagRound, + point: Arc, // @ r+0 + r_0: DagRound, // r+0 downloader: Downloader, ) -> DagPoint { // TODO upgrade Weak whenever used to let Dag Round drop if some future hangs up for long @@ -75,7 +72,10 @@ impl Verifier { DagPoint::Trusted(ValidPoint::new(point.clone())) } - fn is_self_links_ok(point /* @ r+0 */: &Point, dag_round /* r+0 */: &DagRound) -> bool { + fn is_self_links_ok( + point: &Point, // @ r+0 + dag_round: &DagRound, // r+0 + ) -> bool { // existence of proofs in leader points is a part of point's well-form-ness check match &dag_round.anchor_stage() { // no one may link to self @@ -181,8 +181,8 @@ impl Verifier { } fn gather_deps( - point /* @ r+0 */: &Point, - r_1 /* r-1 */: &DagRound, + point: &Point, // @ r+0 + r_1: &DagRound, // r-1 downloader: &Downloader, dependencies: &mut JoinSet, ) { @@ -310,7 +310,10 @@ impl Verifier { } /// blame author and every dependent point's author - fn is_list_of_signers_ok(point /* @ r+0 */: &Point, peer_schedule: &PeerSchedule) -> bool { + fn is_list_of_signers_ok( + point: &Point, // @ r+0 + peer_schedule: &PeerSchedule, + ) -> bool { if point.body.location.round == MempoolConfig::GENESIS_ROUND { return true; // all maps are empty for a well-formed genesis } @@ -363,7 +366,10 @@ impl Verifier { } /// blame author and every dependent point's author - fn is_proof_ok(point /* @ r+0 */: &Point, proven: &Point /* @ r-1 */) -> bool { + fn is_proof_ok( + point: &Point, // @ r+0 + proven: &Point, // @ r-1 + ) -> bool { if point.body.location.author != proven.body.location.author { panic!("Coding error: mismatched authors of proof and its vertex") } diff --git a/consensus/src/engine/engine.rs b/consensus/src/engine/engine.rs index e7a963308..e959b744c 100644 --- a/consensus/src/engine/engine.rs +++ b/consensus/src/engine/engine.rs @@ -5,7 +5,6 @@ use itertools::Itertools; use tokio::sync::mpsc::UnboundedSender; use tokio::sync::{mpsc, oneshot, watch}; use tokio::task::JoinSet; - use tycho_network::{DhtClient, OverlayService, PeerId}; use crate::dag::{Dag, DagRound, InclusionState, Producer}; diff --git a/consensus/src/intercom/broadcast/broadcast_filter.rs b/consensus/src/intercom/broadcast/broadcast_filter.rs index 27d2b7fab..e958e189a 100644 --- a/consensus/src/intercom/broadcast/broadcast_filter.rs +++ b/consensus/src/intercom/broadcast/broadcast_filter.rs @@ -4,18 +4,16 @@ use std::sync::Arc; use tokio::sync::broadcast::error::RecvError; use tokio::sync::mpsc; - use tycho_network::PeerId; use tycho_util::FastDashMap; +use super::dto::ConsensusEvent; use crate::dag::Verifier; use crate::engine::MempoolConfig; use crate::intercom::dto::PeerState; use crate::intercom::PeerSchedule; use crate::models::{Digest, Location, NodeCount, Point, PointId, Round}; -use super::dto::ConsensusEvent; - #[derive(Clone)] pub struct BroadcastFilter(Arc); diff --git a/consensus/src/intercom/broadcast/broadcaster.rs b/consensus/src/intercom/broadcast/broadcaster.rs index b4a45cbd3..39e94be61 100644 --- a/consensus/src/intercom/broadcast/broadcaster.rs +++ b/consensus/src/intercom/broadcast/broadcaster.rs @@ -4,9 +4,9 @@ use std::sync::Arc; use futures_util::future::BoxFuture; use futures_util::stream::FuturesUnordered; use futures_util::StreamExt; -use tokio::sync::broadcast::{self, error::RecvError}; +use tokio::sync::broadcast::error::RecvError; +use tokio::sync::broadcast::{self}; use tokio::sync::mpsc; - use tycho_network::PeerId; use tycho_util::{FastHashMap, FastHashSet}; diff --git a/consensus/src/intercom/broadcast/collector.rs b/consensus/src/intercom/broadcast/collector.rs index 50a916988..df29f4b7e 100644 --- a/consensus/src/intercom/broadcast/collector.rs +++ b/consensus/src/intercom/broadcast/collector.rs @@ -6,7 +6,6 @@ use futures_util::future::BoxFuture; use futures_util::stream::FuturesUnordered; use futures_util::{FutureExt, StreamExt}; use tokio::sync::{mpsc, oneshot}; - use tycho_network::PeerId; use tycho_util::FastHashSet; @@ -126,7 +125,7 @@ struct CollectorTask { log_id: Arc, downloader: Downloader, current_round: DagRound, // = r+0 - next_dag_round: DagRound, // = r+1 is always in DAG; contains the keypair to produce point @ r+1 + next_dag_round: DagRound, /* = r+1 is always in DAG; contains the keypair to produce point @ r+1 */ // @ r+0, will become includes in point @ r+1 // needed in order to not include same point twice - as an include and as a witness; @@ -307,7 +306,7 @@ impl CollectorTask { self.includes.push(task) } } - _ => _ = self.current_round.add(&point, &self.downloader), // maybe other's dependency + _ => _ = self.current_round.add(&point, &self.downloader), /* maybe other's dependency */ }, ConsensusEvent::Invalid(dag_point) => { if &dag_point.location().round > self.next_dag_round.round() { diff --git a/consensus/src/intercom/core/dispatcher.rs b/consensus/src/intercom/core/dispatcher.rs index 835faa9f1..5c6901ae2 100644 --- a/consensus/src/intercom/core/dispatcher.rs +++ b/consensus/src/intercom/core/dispatcher.rs @@ -1,7 +1,6 @@ use anyhow::{anyhow, Result}; use futures_util::future::BoxFuture; use futures_util::FutureExt; - use tycho_network::{DhtClient, Network, OverlayId, OverlayService, PeerId, PrivateOverlay}; use crate::intercom::core::dto::{MPQuery, MPResponse}; diff --git a/consensus/src/intercom/core/dto.rs b/consensus/src/intercom/core/dto.rs index 3926d4961..17ede7dc6 100644 --- a/consensus/src/intercom/core/dto.rs +++ b/consensus/src/intercom/core/dto.rs @@ -1,7 +1,6 @@ use anyhow::anyhow; use bytes::Bytes; use serde::{Deserialize, Serialize}; - use tycho_network::{Response, ServiceRequest, Version}; use crate::intercom::dto::{PointByIdResponse, SignatureResponse}; diff --git a/consensus/src/intercom/core/responder.rs b/consensus/src/intercom/core/responder.rs index ba9d65851..072af9ecc 100644 --- a/consensus/src/intercom/core/responder.rs +++ b/consensus/src/intercom/core/responder.rs @@ -1,7 +1,6 @@ use std::sync::Arc; use tokio::sync::{mpsc, oneshot}; - use tycho_network::{PeerId, Response, Service, ServiceRequest}; use tycho_util::futures::BoxFutureOrNoop; diff --git a/consensus/src/intercom/dependency/downloader.rs b/consensus/src/intercom/dependency/downloader.rs index 0d656eeff..d50ba662b 100644 --- a/consensus/src/intercom/dependency/downloader.rs +++ b/consensus/src/intercom/dependency/downloader.rs @@ -9,7 +9,6 @@ use rand::SeedableRng; use tokio::sync::broadcast::error::RecvError; use tokio::sync::{broadcast, watch}; use tokio::time::error::Elapsed; - use tycho_network::PeerId; use tycho_util::{FastHashMap, FastHashSet}; diff --git a/consensus/src/intercom/mod.rs b/consensus/src/intercom/mod.rs index 60d1b6c11..a8f5064b5 100644 --- a/consensus/src/intercom/mod.rs +++ b/consensus/src/intercom/mod.rs @@ -1,5 +1,6 @@ -pub use broadcast::*; pub use core::*; + +pub use broadcast::*; pub use dependency::*; pub use peer_schedule::*; diff --git a/consensus/src/intercom/peer_schedule/peer_schedule.rs b/consensus/src/intercom/peer_schedule/peer_schedule.rs index 630011e77..2c51d0005 100644 --- a/consensus/src/intercom/peer_schedule/peer_schedule.rs +++ b/consensus/src/intercom/peer_schedule/peer_schedule.rs @@ -6,23 +6,20 @@ use std::sync::Arc; use everscale_crypto::ed25519::KeyPair; use parking_lot::Mutex; use tokio::sync::broadcast; - use tycho_network::{PeerId, PrivateOverlay}; use tycho_util::FastHashSet; use crate::intercom::dto::PeerState; use crate::models::{NodeCount, Round}; -/* - As validators are elected for wall-clock time range, - the round of validator set switch is not known beforehand - and will be determined by the time in anchor vertices: - it must reach some predefined time range, - when the new set is supposed to be online and start to request points, - and a (relatively high) predefined number of support rounds must follow - for the anchor chain to be committed by majority and for the new nodes to gather data. - The switch will occur for validator sets as a whole. -*/ +// As validators are elected for wall-clock time range, +// the round of validator set switch is not known beforehand +// and will be determined by the time in anchor vertices: +// it must reach some predefined time range, +// when the new set is supposed to be online and start to request points, +// and a (relatively high) predefined number of support rounds must follow +// for the anchor chain to be committed by majority and for the new nodes to gather data. +// The switch will occur for validator sets as a whole. #[derive(Clone)] pub struct PeerSchedule { diff --git a/consensus/src/intercom/peer_schedule/peer_schedule_updater.rs b/consensus/src/intercom/peer_schedule/peer_schedule_updater.rs index df039621b..fd2c3d5af 100644 --- a/consensus/src/intercom/peer_schedule/peer_schedule_updater.rs +++ b/consensus/src/intercom/peer_schedule/peer_schedule_updater.rs @@ -5,7 +5,6 @@ use parking_lot::Mutex; use rand::prelude::IteratorRandom; use tokio::sync::broadcast::error::RecvError; use tokio::task::AbortHandle; - use tycho_network::{PeerId, PrivateOverlay, PrivateOverlayEntriesEvent}; use crate::intercom::PeerSchedule; diff --git a/consensus/src/models/node_count.rs b/consensus/src/models/node_count.rs index d2b91a13a..7bedca054 100644 --- a/consensus/src/models/node_count.rs +++ b/consensus/src/models/node_count.rs @@ -57,9 +57,7 @@ impl NodeCount { pub fn reliable_minority(&self) -> usize { self.0 as usize + 1 } - /* - pub fn unreliable(&self) -> usize { - self.0 - } - */ + // pub fn unreliable(&self) -> usize { + // self.0 + // } } diff --git a/consensus/src/models/point.rs b/consensus/src/models/point.rs index 37df9258c..4a74285da 100644 --- a/consensus/src/models/point.rs +++ b/consensus/src/models/point.rs @@ -6,7 +6,6 @@ use bytes::Bytes; use everscale_crypto::ed25519::KeyPair; use serde::{Deserialize, Serialize}; use sha2::{Digest as Sha2Digest, Sha256}; - use tycho_network::PeerId; use crate::engine::MempoolConfig; diff --git a/consensus/src/test_utils.rs b/consensus/src/test_utils.rs index 22084634a..35a29ef50 100644 --- a/consensus/src/test_utils.rs +++ b/consensus/src/test_utils.rs @@ -4,7 +4,6 @@ use std::sync::Arc; use everscale_crypto::ed25519::{KeyPair, PublicKey, SecretKey}; use tokio::sync::mpsc::UnboundedReceiver; use tokio::task::JoinHandle; - use tycho_network::{ Address, DhtClient, DhtConfig, DhtService, Network, NetworkConfig, OverlayService, PeerId, PeerInfo, Router, ToSocket, @@ -113,9 +112,8 @@ mod tests { use tokio::sync::mpsc; use tokio::task::JoinSet; - use crate::engine::Engine; - use super::*; + use crate::engine::Engine; async fn make_network(node_count: usize) -> Vec { let keys = (0..node_count) diff --git a/storage/src/store/shard_state/store_state_raw.rs b/storage/src/store/shard_state/store_state_raw.rs index 78cedb954..ebcbed638 100644 --- a/storage/src/store/shard_state/store_state_raw.rs +++ b/storage/src/store/shard_state/store_state_raw.rs @@ -592,12 +592,9 @@ mod test { } tracing::info!("Decompressed the archive"); - let db = Db::open( - current_test_path.join("rocksdb"), - DbConfig { - rocksdb_lru_capacity: ByteSize::mb(256), - }, - )?; + let db = Db::open(current_test_path.join("rocksdb"), DbConfig { + rocksdb_lru_capacity: ByteSize::mb(256), + })?; let file_db = FileDb::new(current_test_path.join("file_db"))?; let cells_storage = CellStorage::new(db.clone(), 100_000_000);