Skip to content

Commit

Permalink
chore: apply rustfmt
Browse files Browse the repository at this point in the history
  • Loading branch information
Mododo committed May 9, 2024
1 parent 21f6e94 commit 37cfe08
Show file tree
Hide file tree
Showing 25 changed files with 74 additions and 107 deletions.
8 changes: 3 additions & 5 deletions cli/src/node/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -439,14 +439,12 @@ impl Node {
let state_storage = self.storage.shard_state_storage();

for state in to_import {
let (handle, status) = handle_storage.create_or_load_handle(
state.block_id(),
BlockMetaData {
let (handle, status) =
handle_storage.create_or_load_handle(state.block_id(), BlockMetaData {
is_key_block: true,
gen_utime,
mc_ref_seqno: 0,
},
);
});

let stored = state_storage
.store_state(&handle, &state)
Expand Down
11 changes: 4 additions & 7 deletions collator/src/mempool/mempool_adapter.rs
Original file line number Diff line number Diff line change
Expand Up @@ -247,13 +247,10 @@ fn _stub_create_random_anchor_with_stub_externals(
msg_cell_builder.store_u64(chain_time).unwrap();
msg_cell_builder.store_u32(i as u32).unwrap();
let msg_cell = msg_cell_builder.build().unwrap();
let msg = ExternalMessage::new(
msg_cell,
ExtInMsgInfo {
dst: IntAddr::Std(StdAddr::new(0, rand_addr)),
..Default::default()
},
);
let msg = ExternalMessage::new(msg_cell, ExtInMsgInfo {
dst: IntAddr::Std(StdAddr::new(0, rand_addr)),
..Default::default()
});
externals.push(Arc::new(msg));
}

Expand Down
37 changes: 14 additions & 23 deletions collator/src/mempool/mempool_adapter_stub.rs
Original file line number Diff line number Diff line change
@@ -1,22 +1,16 @@
use std::{
collections::BTreeMap,
sync::{Arc, RwLock},
};
use std::collections::BTreeMap;
use std::sync::{Arc, RwLock};

use anyhow::{anyhow, Result};
use async_trait::async_trait;

use crate::mempool::{MempoolAdapter, MempoolEventListener};
use everscale_types::{
cell::{CellBuilder, CellSliceRange, HashBytes},
models::{ExtInMsgInfo, IntAddr, MsgInfo, OwnedMessage, StdAddr},
};
use everscale_types::cell::{CellBuilder, CellSliceRange, HashBytes};
use everscale_types::models::{ExtInMsgInfo, IntAddr, MsgInfo, OwnedMessage, StdAddr};
use rand::Rng;
use tycho_block_util::state::ShardStateStuff;

use crate::tracing_targets;

use super::types::{ExternalMessage, MempoolAnchor, MempoolAnchorId};
use crate::mempool::{MempoolAdapter, MempoolEventListener};
use crate::tracing_targets;

#[cfg(test)]
#[path = "tests/mempool_adapter_tests.rs"]
Expand All @@ -34,7 +28,7 @@ impl MempoolAdapterStubImpl {
pub fn new(listener: Arc<dyn MempoolEventListener>) -> Self {
tracing::info!(target: tracing_targets::MEMPOOL_ADAPTER, "Creating mempool adapter...");

//TODO: make real implementation, currently runs stub task
// TODO: make real implementation, currently runs stub task
// that produces the repeating set of anchors
let stub_anchors_cache = Arc::new(RwLock::new(BTreeMap::new()));

Expand Down Expand Up @@ -81,7 +75,7 @@ impl MempoolAdapterStubImpl {
#[async_trait]
impl MempoolAdapter for MempoolAdapterStubImpl {
async fn enqueue_process_new_mc_block_state(&self, mc_state: ShardStateStuff) -> Result<()> {
//TODO: make real implementation, currently does nothing
// TODO: make real implementation, currently does nothing
tracing::info!(
target: tracing_targets::MEMPOOL_ADAPTER,
"STUB: New masterchain state (block_id: {}) processing enqueued to mempool",
Expand All @@ -94,7 +88,7 @@ impl MempoolAdapter for MempoolAdapterStubImpl {
&self,
anchor_id: MempoolAnchorId,
) -> Result<Option<Arc<MempoolAnchor>>> {
//TODO: make real implementation, currently only return anchor from local cache
// TODO: make real implementation, currently only return anchor from local cache
let res = {
let anchors_cache_r = self
._stub_anchors_cache
Expand Down Expand Up @@ -124,7 +118,7 @@ impl MempoolAdapter for MempoolAdapterStubImpl {
}

async fn get_next_anchor(&self, prev_anchor_id: MempoolAnchorId) -> Result<Arc<MempoolAnchor>> {
//TODO: make real implementation, currently only return anchor from local cache
// TODO: make real implementation, currently only return anchor from local cache

let mut stub_first_attempt = true;
let mut request_timer = std::time::Instant::now();
Expand Down Expand Up @@ -201,13 +195,10 @@ fn _stub_create_random_anchor_with_stub_externals(
msg_cell_builder.store_u32(i as u32).unwrap();
let msg_cell = msg_cell_builder.build().unwrap();
let msg_cell_range = CellSliceRange::full(&*msg_cell);
let msg = ExternalMessage::new(
msg_cell,
ExtInMsgInfo {
dst: IntAddr::Std(StdAddr::new(0, rand_addr)),
..Default::default()
},
);
let msg = ExternalMessage::new(msg_cell, ExtInMsgInfo {
dst: IntAddr::Std(StdAddr::new(0, rand_addr)),
..Default::default()
});
externals.push(Arc::new(msg));
}

Expand Down
3 changes: 1 addition & 2 deletions consensus/examples/consensus_node.rs
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,6 @@ use serde::{Deserialize, Serialize};
use tokio::sync::mpsc;
use tracing_subscriber::layer::SubscriberExt;
use tracing_subscriber::{fmt, EnvFilter, Layer};

use tycho_consensus::test_utils::drain_anchors;
use tycho_consensus::Engine;
use tycho_network::{DhtConfig, NetworkConfig, PeerId, PeerInfo};
Expand Down Expand Up @@ -53,7 +52,7 @@ impl Cli {
.with_ansi(false)
.compact()
.with_writer(non_blocking)
.with_filter(EnvFilter::new("trace")), //todo: update with needed crates
.with_filter(EnvFilter::new("trace")), // todo: update with needed crates
);
tracing::subscriber::set_global_default(collector)?;
} else {
Expand Down
1 change: 0 additions & 1 deletion consensus/src/dag/anchor_stage.rs
Original file line number Diff line number Diff line change
@@ -1,7 +1,6 @@
use std::sync::atomic::AtomicBool;

use rand::{Rng, SeedableRng};

use tycho_network::PeerId;

use crate::intercom::PeerSchedule;
Expand Down
5 changes: 2 additions & 3 deletions consensus/src/dag/dag.rs
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,6 @@ use crate::models::{Point, Round, Ugly, ValidPoint};
#[derive(Clone)]
pub struct Dag {
// from the oldest to the current round; newer ones are in the future;
//
rounds: Arc<Mutex<BTreeMap<Round, DagRound>>>,
}

Expand Down Expand Up @@ -203,8 +202,8 @@ impl Dag {
///
/// Note: at this point there is no way to check if passed point is really an anchor
async fn gather_uncommitted(
anchor /* @ r+1 */: &Point,
anchor_round /* r+1 */: &DagRound,
anchor: &Point, // @ r+1
anchor_round: &DagRound, // r+1
) -> Vec<Arc<Point>> {
assert_eq!(
*anchor_round.round(),
Expand Down
3 changes: 1 addition & 2 deletions consensus/src/dag/dag_location.rs
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,6 @@ use std::sync::{Arc, OnceLock};

use everscale_crypto::ed25519::KeyPair;
use futures_util::FutureExt;

use tycho_util::futures::{JoinTask, Shared};

use crate::models::{DagPoint, Digest, Round, Signature, UnixTime, ValidPoint};
Expand All @@ -22,7 +21,7 @@ pub struct DagLocation {
// was proven by the next point of a node;
// even if we marked this point as invalid, consensus may override our decision
// and we will have to sync
/* vertex: Option<Digest>, */
// vertex: Option<Digest>,
/// We can sign or reject just a single (e.g. first validated) point at the current location;
/// other (equivocated) points may be received as includes, witnesses or a proven vertex;
/// we have to include signed points @ r+0 & @ r-1 as dependencies in our point @ r+1.
Expand Down
1 change: 0 additions & 1 deletion consensus/src/dag/dag_round.rs
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,6 @@ use std::sync::{Arc, Weak};
use everscale_crypto::ed25519::KeyPair;
use futures_util::future::BoxFuture;
use futures_util::FutureExt;

use tycho_network::PeerId;
use tycho_util::FastDashMap;

Expand Down
1 change: 0 additions & 1 deletion consensus/src/dag/producer.rs
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,6 @@ use std::collections::BTreeMap;
use std::sync::Arc;

use bytes::Bytes;

use tycho_network::PeerId;

use crate::dag::anchor_stage::AnchorStage;
Expand Down
48 changes: 27 additions & 21 deletions consensus/src/dag/verifier.rs
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,6 @@ use std::sync::Arc;

use futures_util::FutureExt;
use tokio::task::JoinSet;

use tycho_network::PeerId;

use crate::dag::anchor_stage::AnchorStage;
Expand All @@ -11,19 +10,17 @@ use crate::engine::MempoolConfig;
use crate::intercom::{Downloader, PeerSchedule};
use crate::models::{DagPoint, Digest, Link, Location, NodeCount, Point, PointId, ValidPoint};

/*
Note on equivocation.
Detected point equivocation does not invalidate the point, it just
prevents us (as a fair actor) from returning our signature to the author.
Such a point may be included in our next "includes" or "witnesses",
but neither its inclusion nor omitting is required: as we don't
return our signature, our dependencies cannot be validated against it.
Equally, we immediately stop communicating with the equivocating node,
without invalidating any of its points (no matter historical or future).
We will not sign the proof for equivocated point
as we've banned the author on network layer.
Anyway, no more than one of equivocated points may become a vertex.
*/
// Note on equivocation.
// Detected point equivocation does not invalidate the point, it just
// prevents us (as a fair actor) from returning our signature to the author.
// Such a point may be included in our next "includes" or "witnesses",
// but neither its inclusion nor omitting is required: as we don't
// return our signature, our dependencies cannot be validated against it.
// Equally, we immediately stop communicating with the equivocating node,
// without invalidating any of its points (no matter historical or future).
// We will not sign the proof for equivocated point
// as we've banned the author on network layer.
// Anyway, no more than one of equivocated points may become a vertex.

pub struct Verifier;

Expand All @@ -48,8 +45,8 @@ impl Verifier {

/// must be called iff [Self::verify] succeeded
pub async fn validate(
point /* @ r+0 */: Arc<Point>,
r_0 /* r+0 */: DagRound,
point: Arc<Point>, // @ r+0
r_0: DagRound, // r+0
downloader: Downloader,
) -> DagPoint {
// TODO upgrade Weak whenever used to let Dag Round drop if some future hangs up for long
Expand All @@ -75,7 +72,10 @@ impl Verifier {
DagPoint::Trusted(ValidPoint::new(point.clone()))
}

fn is_self_links_ok(point /* @ r+0 */: &Point, dag_round /* r+0 */: &DagRound) -> bool {
fn is_self_links_ok(
point: &Point, // @ r+0
dag_round: &DagRound, // r+0
) -> bool {
// existence of proofs in leader points is a part of point's well-form-ness check
match &dag_round.anchor_stage() {
// no one may link to self
Expand Down Expand Up @@ -181,8 +181,8 @@ impl Verifier {
}

fn gather_deps(
point /* @ r+0 */: &Point,
r_1 /* r-1 */: &DagRound,
point: &Point, // @ r+0
r_1: &DagRound, // r-1
downloader: &Downloader,
dependencies: &mut JoinSet<DagPoint>,
) {
Expand Down Expand Up @@ -310,7 +310,10 @@ impl Verifier {
}

/// blame author and every dependent point's author
fn is_list_of_signers_ok(point /* @ r+0 */: &Point, peer_schedule: &PeerSchedule) -> bool {
fn is_list_of_signers_ok(
point: &Point, // @ r+0
peer_schedule: &PeerSchedule,
) -> bool {
if point.body.location.round == MempoolConfig::GENESIS_ROUND {
return true; // all maps are empty for a well-formed genesis
}
Expand Down Expand Up @@ -363,7 +366,10 @@ impl Verifier {
}

/// blame author and every dependent point's author
fn is_proof_ok(point /* @ r+0 */: &Point, proven: &Point /* @ r-1 */) -> bool {
fn is_proof_ok(
point: &Point, // @ r+0
proven: &Point, // @ r-1
) -> bool {
if point.body.location.author != proven.body.location.author {
panic!("Coding error: mismatched authors of proof and its vertex")
}
Expand Down
1 change: 0 additions & 1 deletion consensus/src/engine/engine.rs
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,6 @@ use itertools::Itertools;
use tokio::sync::mpsc::UnboundedSender;
use tokio::sync::{mpsc, oneshot, watch};
use tokio::task::JoinSet;

use tycho_network::{DhtClient, OverlayService, PeerId};

use crate::dag::{Dag, DagRound, InclusionState, Producer};
Expand Down
4 changes: 1 addition & 3 deletions consensus/src/intercom/broadcast/broadcast_filter.rs
Original file line number Diff line number Diff line change
Expand Up @@ -4,18 +4,16 @@ use std::sync::Arc;

use tokio::sync::broadcast::error::RecvError;
use tokio::sync::mpsc;

use tycho_network::PeerId;
use tycho_util::FastDashMap;

use super::dto::ConsensusEvent;
use crate::dag::Verifier;
use crate::engine::MempoolConfig;
use crate::intercom::dto::PeerState;
use crate::intercom::PeerSchedule;
use crate::models::{Digest, Location, NodeCount, Point, PointId, Round};

use super::dto::ConsensusEvent;

#[derive(Clone)]
pub struct BroadcastFilter(Arc<BroadcastFilterInner>);

Expand Down
4 changes: 2 additions & 2 deletions consensus/src/intercom/broadcast/broadcaster.rs
Original file line number Diff line number Diff line change
Expand Up @@ -4,9 +4,9 @@ use std::sync::Arc;
use futures_util::future::BoxFuture;
use futures_util::stream::FuturesUnordered;
use futures_util::StreamExt;
use tokio::sync::broadcast::{self, error::RecvError};
use tokio::sync::broadcast::error::RecvError;
use tokio::sync::broadcast::{self};
use tokio::sync::mpsc;

use tycho_network::PeerId;
use tycho_util::{FastHashMap, FastHashSet};

Expand Down
5 changes: 2 additions & 3 deletions consensus/src/intercom/broadcast/collector.rs
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,6 @@ use futures_util::future::BoxFuture;
use futures_util::stream::FuturesUnordered;
use futures_util::{FutureExt, StreamExt};
use tokio::sync::{mpsc, oneshot};

use tycho_network::PeerId;
use tycho_util::FastHashSet;

Expand Down Expand Up @@ -126,7 +125,7 @@ struct CollectorTask {
log_id: Arc<String>,
downloader: Downloader,
current_round: DagRound, // = r+0
next_dag_round: DagRound, // = r+1 is always in DAG; contains the keypair to produce point @ r+1
next_dag_round: DagRound, /* = r+1 is always in DAG; contains the keypair to produce point @ r+1 */

// @ r+0, will become includes in point @ r+1
// needed in order to not include same point twice - as an include and as a witness;
Expand Down Expand Up @@ -307,7 +306,7 @@ impl CollectorTask {
self.includes.push(task)
}
}
_ => _ = self.current_round.add(&point, &self.downloader), // maybe other's dependency
_ => _ = self.current_round.add(&point, &self.downloader), /* maybe other's dependency */
},
ConsensusEvent::Invalid(dag_point) => {
if &dag_point.location().round > self.next_dag_round.round() {
Expand Down
1 change: 0 additions & 1 deletion consensus/src/intercom/core/dispatcher.rs
Original file line number Diff line number Diff line change
@@ -1,7 +1,6 @@
use anyhow::{anyhow, Result};
use futures_util::future::BoxFuture;
use futures_util::FutureExt;

use tycho_network::{DhtClient, Network, OverlayId, OverlayService, PeerId, PrivateOverlay};

use crate::intercom::core::dto::{MPQuery, MPResponse};
Expand Down
1 change: 0 additions & 1 deletion consensus/src/intercom/core/dto.rs
Original file line number Diff line number Diff line change
@@ -1,7 +1,6 @@
use anyhow::anyhow;
use bytes::Bytes;
use serde::{Deserialize, Serialize};

use tycho_network::{Response, ServiceRequest, Version};

use crate::intercom::dto::{PointByIdResponse, SignatureResponse};
Expand Down
1 change: 0 additions & 1 deletion consensus/src/intercom/core/responder.rs
Original file line number Diff line number Diff line change
@@ -1,7 +1,6 @@
use std::sync::Arc;

use tokio::sync::{mpsc, oneshot};

use tycho_network::{PeerId, Response, Service, ServiceRequest};
use tycho_util::futures::BoxFutureOrNoop;

Expand Down
1 change: 0 additions & 1 deletion consensus/src/intercom/dependency/downloader.rs
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,6 @@ use rand::SeedableRng;
use tokio::sync::broadcast::error::RecvError;
use tokio::sync::{broadcast, watch};
use tokio::time::error::Elapsed;

use tycho_network::PeerId;
use tycho_util::{FastHashMap, FastHashSet};

Expand Down
3 changes: 2 additions & 1 deletion consensus/src/intercom/mod.rs
Original file line number Diff line number Diff line change
@@ -1,5 +1,6 @@
pub use broadcast::*;
pub use core::*;

pub use broadcast::*;
pub use dependency::*;
pub use peer_schedule::*;

Expand Down
Loading

0 comments on commit 37cfe08

Please sign in to comment.