From ff7925917c8412f1796f65590859349dd02a21de Mon Sep 17 00:00:00 2001 From: Ivan Kalinin Date: Tue, 28 Nov 2023 20:34:01 +0100 Subject: [PATCH 01/35] network: Describe DHT proto --- Cargo.lock | 124 ++++++++++++++++++ network/Cargo.toml | 2 + network/src/connection.rs | 27 ++-- network/src/network/connection_manager.rs | 21 ++- network/src/network/mod.rs | 74 ++++++++--- network/src/network/request_handler.rs | 150 ++++++++++++++++++++++ network/src/proto.tl | 131 +++++++++++++++++-- network/src/types/mod.rs | 20 +++ network/src/types/peer_id.rs | 4 + 9 files changed, 506 insertions(+), 47 deletions(-) create mode 100644 network/src/network/request_handler.rs diff --git a/Cargo.lock b/Cargo.lock index b38c716a8..a6b760118 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -215,6 +215,34 @@ dependencies = [ "typenum", ] +[[package]] +name = "curve25519-dalek" +version = "4.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e89b8c6a2e4b1f45971ad09761aafb85514a84744b67a95e32c3cc1352d1f65c" +dependencies = [ + "cfg-if", + "cpufeatures", + "curve25519-dalek-derive", + "digest", + "fiat-crypto", + "platforms", + "rustc_version", + "subtle", + "zeroize", +] + +[[package]] +name = "curve25519-dalek-derive" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f46882e17999c6cc590af592290432be3bce0428cb0d5f8b6715e4dc7b383eb3" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.39", +] + [[package]] name = "dashmap" version = "5.5.3" @@ -298,6 +326,26 @@ dependencies = [ "signature", ] +[[package]] +name = "ed25519-dalek" +version = "2.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1f628eaec48bfd21b865dc2950cfa014450c01d2fa2b69a86c2fd5844ec523c0" +dependencies = [ + "curve25519-dalek", + "ed25519", + "serde", + "sha2", + "subtle", + "zeroize", +] + +[[package]] +name = "fiat-crypto" +version = "0.2.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "27573eac26f4dd11e2b1916c3fe1baa56407c83c71a773a8ba17ec0bca03b6b7" + [[package]] name = "futures-core" version = "0.3.29" @@ -635,6 +683,26 @@ dependencies = [ "sha2", ] +[[package]] +name = "pin-project" +version = "1.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fda4ed1c6c173e3fc7a83629421152e01d7b1f9b7f65fb301e490e8cfc656422" +dependencies = [ + "pin-project-internal", +] + +[[package]] +name = "pin-project-internal" +version = "1.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4359fd9c9171ec6e8c62926d6faaf553a8dc3f64e1507e76da7911b4f6a04405" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.39", +] + [[package]] name = "pin-project-lite" version = "0.2.13" @@ -657,6 +725,12 @@ dependencies = [ "spki", ] +[[package]] +name = "platforms" +version = "3.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "14e6ab3f592e6fb464fc9712d8d6e6912de6473954635fd76a589d832cffcbb0" + [[package]] name = "powerfmt" version = "0.2.0" @@ -871,6 +945,15 @@ version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "08d43f7aa6b08d49f382cde6a7982047c3426db949b1424bc4b7ec9ae12c6ce2" +[[package]] +name = "rustc_version" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bfa0f585226d2e68097d4f95d113b15b83a82e819ab25717ec0590d9584ef366" +dependencies = [ + "semver", +] + [[package]] name = "rusticata-macros" version = "4.1.0" @@ -971,6 +1054,12 @@ dependencies = [ "libc", ] +[[package]] +name = "semver" +version = "1.0.20" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "836fa6a3e1e547f9a2c4040802ec865b5d85f4014efe00555d7090a3dcaa1090" + [[package]] name = "serde" version = "1.0.193" @@ -1067,6 +1156,12 @@ dependencies = [ "der", ] +[[package]] +name = "subtle" +version = "2.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "81cdd64d312baedb58e21336b31bc043b77e01cc99033ce76ef539f78e965ebc" + [[package]] name = "syn" version = "1.0.109" @@ -1257,6 +1352,33 @@ dependencies = [ "tracing", ] +[[package]] +name = "tower" +version = "0.4.13" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b8fa9be0de6cf49e536ce1851f987bd21a43b771b09473c3549a6c853db37c1c" +dependencies = [ + "futures-core", + "futures-util", + "pin-project", + "pin-project-lite", + "tower-layer", + "tower-service", + "tracing", +] + +[[package]] +name = "tower-layer" +version = "0.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c20c8dbed6283a09604c3e69b4b7eeb54e298b8a600d4d5ecb5ad39de609f1d0" + +[[package]] +name = "tower-service" +version = "0.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b6bc1c9ce2b5135ac7f93c72918fc37feb872bdc6a5533a8b85eb4b86bfdae52" + [[package]] name = "tracing" version = "0.1.40" @@ -1369,6 +1491,7 @@ dependencies = [ "bytes", "dashmap", "ed25519", + "ed25519-dalek", "futures-util", "hex", "pin-project-lite", @@ -1385,6 +1508,7 @@ dependencies = [ "tl-proto", "tokio", "tokio-util", + "tower", "tracing", "tracing-test", "tycho-util", diff --git a/network/Cargo.toml b/network/Cargo.toml index 112d558b2..4b8b01c2b 100644 --- a/network/Cargo.toml +++ b/network/Cargo.toml @@ -12,6 +12,7 @@ anyhow = "1.0" bytes = "1.0" dashmap = "5.4" ed25519 = { version = "2.0", features = ["alloc", "pkcs8"] } +ed25519-dalek = { version = "2.0" } futures-util = { version = "0.3", features = ["sink"] } hex = "0.4" pin-project-lite = "0.2" @@ -28,6 +29,7 @@ thiserror = "1" tl-proto = "0.4" tokio = { version = "1", features = ["rt", "sync", "io-util", "macros"] } tokio-util = { version = "0.7", features = ["codec"] } +tower = { version = "0.4", features = ["util"] } tracing = "0.1" x509-parser = "0.15" diff --git a/network/src/connection.rs b/network/src/connection.rs index 91912dcf0..c6fd9b851 100644 --- a/network/src/connection.rs +++ b/network/src/connection.rs @@ -1,32 +1,39 @@ use std::net::SocketAddr; use std::pin::Pin; +use std::sync::Arc; use std::task::{Context, Poll}; use anyhow::{Context as _, Result}; use bytes::Bytes; use quinn::{ConnectionError, RecvStream}; -use crate::types::{Direction, PeerId}; +use crate::types::{Direction, InboundRequestMeta, PeerId}; #[derive(Clone)] pub struct Connection { inner: quinn::Connection, - peer_id: PeerId, - origin: Direction, + request_meta: Arc, } impl Connection { pub fn new(inner: quinn::Connection, origin: Direction) -> Result { let peer_id = extract_peer_id(&inner)?; Ok(Self { + request_meta: Arc::new(InboundRequestMeta { + peer_id, + origin, + remote_address: inner.remote_address(), + }), inner, - peer_id, - origin, }) } + pub fn request_meta(&self) -> &Arc { + &self.request_meta + } + pub fn peer_id(&self) -> &PeerId { - &self.peer_id + &self.request_meta.peer_id } pub fn stable_id(&self) -> usize { @@ -34,11 +41,11 @@ impl Connection { } pub fn origin(&self) -> Direction { - self.origin + self.request_meta.origin } pub fn remote_address(&self) -> SocketAddr { - self.inner.remote_address() + self.request_meta.remote_address } pub fn close(&self) { @@ -75,10 +82,10 @@ impl Connection { impl std::fmt::Debug for Connection { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { f.debug_struct("Connection") - .field("origin", &self.origin) + .field("origin", &self.request_meta.origin) .field("id", &self.stable_id()) .field("remote_address", &self.remote_address()) - .field("peer_id", &self.peer_id) + .field("peer_id", &self.request_meta.peer_id) .finish_non_exhaustive() } } diff --git a/network/src/network/connection_manager.rs b/network/src/network/connection_manager.rs index b7f9845a1..8d25eb24c 100644 --- a/network/src/network/connection_manager.rs +++ b/network/src/network/connection_manager.rs @@ -1,3 +1,4 @@ +use std::convert::Infallible; use std::net::SocketAddr; use std::sync::atomic::{AtomicUsize, Ordering}; use std::sync::{Arc, Weak}; @@ -5,17 +6,21 @@ use std::time::{Duration, Instant}; use ahash::HashMap; use anyhow::Result; +use bytes::Bytes; use tokio::sync::{broadcast, mpsc, oneshot}; use tokio::task::JoinSet; +use tower::util::BoxCloneService; use crate::config::Config; use crate::connection::Connection; use crate::endpoint::{Connecting, Endpoint}; use crate::types::{ - Direction, DisconnectReason, FastDashMap, FastHashMap, PeerAffinity, PeerEvent, PeerId, - PeerInfo, + Direction, DisconnectReason, FastDashMap, FastHashMap, InboundServiceRequest, PeerAffinity, + PeerEvent, PeerId, PeerInfo, Response, }; +use super::request_handler::InboundRequestHandler; + #[derive(Debug)] pub enum ConnectionManagerRequest { Connect(SocketAddr, Option, oneshot::Sender>), @@ -36,6 +41,8 @@ pub struct ConnectionManager { active_peers: ActivePeers, known_peers: KnownPeers, + + service: BoxCloneService, Response, Infallible>, } impl Drop for ConnectionManager { @@ -50,6 +57,7 @@ impl ConnectionManager { endpoint: Arc, active_peers: ActivePeers, known_peers: KnownPeers, + service: BoxCloneService, Response, Infallible>, ) -> (Self, mpsc::Sender) { let (mailbox_tx, mailbox) = mpsc::channel(config.connection_manager_channel_capacity); let connection_manager = Self { @@ -62,6 +70,7 @@ impl ConnectionManager { dial_backoff_states: Default::default(), active_peers, known_peers, + service, }; (connection_manager, mailbox_tx) } @@ -292,7 +301,13 @@ impl ConnectionManager { fn add_peer(&mut self, connection: Connection) { if let Some(connection) = self.active_peers.add(self.endpoint.peer_id(), connection) { - // TODO: spawn request handler + let handler = InboundRequestHandler::new( + self.config.clone(), + connection, + self.service.clone(), + self.active_peers.clone(), + ); + self.connection_handlers.spawn(handler.start()); } } diff --git a/network/src/network/mod.rs b/network/src/network/mod.rs index d2c2f87c1..2a201f074 100644 --- a/network/src/network/mod.rs +++ b/network/src/network/mod.rs @@ -1,13 +1,16 @@ +use std::convert::Infallible; use std::net::{SocketAddr, ToSocketAddrs}; use std::sync::Arc; use anyhow::Result; +use bytes::Bytes; use rand::Rng; use tokio::sync::{mpsc, oneshot}; +use tower::ServiceExt; use crate::config::{Config, EndpointConfig}; use crate::endpoint::Endpoint; -use crate::types::{DisconnectReason, PeerId}; +use crate::types::{DisconnectReason, InboundServiceRequest, PeerId, Response}; use self::connection_manager::{ ActivePeers, ConnectionManager, ConnectionManagerRequest, KnownPeers, WeakActivePeers, @@ -16,6 +19,7 @@ use self::peer::Peer; pub mod connection_manager; pub mod peer; +pub mod request_handler; pub struct Builder { mandatory_fields: MandatoryFields, @@ -59,7 +63,16 @@ impl Builder<(T1, ())> { } impl Builder { - pub fn build(self, bind_address: T) -> Result { + pub fn build(self, bind_address: T, service: S) -> Result + where + S: Clone + Send + 'static, + S: tower::Service< + InboundServiceRequest, + Response = Response, + Error = Infallible, + >, + >>::Future: Send + 'static, + { use socket2::{Domain, Protocol, Socket, Type}; let config = self.optional_fields.config.unwrap_or_default(); @@ -109,22 +122,29 @@ impl Builder { let weak_active_peers = ActivePeers::downgrade(&active_peers); let known_peers = KnownPeers::new(); - let (connection_manager, connection_manager_handle) = ConnectionManager::new( - config.clone(), - endpoint.clone(), - active_peers, - known_peers.clone(), - ); - - tokio::spawn(connection_manager.start()); + let inner = Arc::new_cyclic(move |_weak| { + let service = service.boxed_clone(); + + let (connection_manager, connection_manager_handle) = ConnectionManager::new( + config.clone(), + endpoint.clone(), + active_peers, + known_peers.clone(), + service, + ); + + tokio::spawn(connection_manager.start()); + + NetworkInner { + config, + endpoint, + active_peers: weak_active_peers, + known_peers, + connection_manager_handle, + } + }); - Ok(Network(Arc::new(NetworkInner { - config, - endpoint, - active_peers: weak_active_peers, - known_peers, - connection_manager_handle, - }))) + Ok(Network(inner)) } } @@ -218,27 +238,41 @@ impl NetworkInner { #[cfg(test)] mod tests { + use tower::util::BoxCloneService; use tracing_test::traced_test; use super::*; + fn echo_service() -> BoxCloneService, Response, Infallible> + { + let handle = |request: InboundServiceRequest| async move { + tracing::trace!("received: {}", request.body.escape_ascii()); + let response = Response { + version: Default::default(), + body: request.body, + }; + Ok::<_, Infallible>(response) + }; + tower::service_fn(handle).boxed_clone() + } + #[tokio::test] #[traced_test] async fn connection_manager_works() -> anyhow::Result<()> { let peer1 = Network::builder() .with_random_private_key() .with_service_name("tycho") - .build("127.0.0.1:0")?; + .build("127.0.0.1:0", echo_service())?; let peer2 = Network::builder() .with_random_private_key() .with_service_name("tycho") - .build("127.0.0.1:0")?; + .build("127.0.0.1:0", echo_service())?; let peer3 = Network::builder() .with_random_private_key() .with_service_name("not-tycho") - .build("127.0.0.1:0")?; + .build("127.0.0.1:0", echo_service())?; assert!(peer1.connect(peer2.local_addr()).await.is_ok()); assert!(peer2.connect(peer1.local_addr()).await.is_ok()); diff --git a/network/src/network/request_handler.rs b/network/src/network/request_handler.rs new file mode 100644 index 000000000..1edd4b574 --- /dev/null +++ b/network/src/network/request_handler.rs @@ -0,0 +1,150 @@ +use std::convert::Infallible; +use std::sync::Arc; + +use anyhow::Result; +use bytes::Bytes; +use quinn::RecvStream; +use tokio::task::JoinSet; +use tokio_util::codec::{FramedRead, FramedWrite, LengthDelimitedCodec}; +use tower::util::{BoxCloneService, ServiceExt}; + +use crate::config::Config; +use crate::connection::{Connection, SendStream}; +use crate::types::{DisconnectReason, InboundRequestMeta, InboundServiceRequest, Response}; + +use super::connection_manager::ActivePeers; + +pub struct InboundRequestHandler { + config: Arc, + connection: Connection, + service: BoxCloneService, Response, Infallible>, + active_peers: ActivePeers, +} + +impl InboundRequestHandler { + pub fn new( + config: Arc, + connection: Connection, + service: BoxCloneService, Response, Infallible>, + active_peers: ActivePeers, + ) -> Self { + Self { + config, + connection, + service, + active_peers, + } + } + + pub async fn start(self) { + tracing::debug!(peer_id = %self.connection.peer_id(), "request handler started"); + + let mut inflight_requests = JoinSet::<()>::new(); + + let reason: quinn::ConnectionError = loop { + tokio::select! { + uni = self.connection.accept_uni() => match uni { + Ok(stream) => tracing::trace!(id = %stream.id(), "incoming uni stream"), + Err(e) => { + tracing::trace!("failed to accept an incoming uni stream: {e:?}"); + break e; + } + }, + bi = self.connection.accept_bi() => match bi { + Ok((tx, rx)) => { + tracing::trace!(id = %tx.id(), "incoming bi stream"); + let handler = BiStreamRequestHandler::new( + &self.config, + self.connection.request_meta().clone(), + self.service.clone(), + tx, + rx, + ); + inflight_requests.spawn(handler.handle()); + } + Err(e) => { + tracing::trace!("failed to accept an incoming bi stream: {e:?}"); + break e; + } + }, + datagram = self.connection.read_datagram() => match datagram { + Ok(datagram) => tracing::trace!(byte_len = datagram.len(), "incoming datagram"), + Err(e) => { + tracing::trace!("failed to read datagram: {e:?}"); + break e; + } + }, + Some(req) = inflight_requests.join_next() => match req { + Ok(()) => tracing::trace!("requrest handler task completed"), + Err(e) => { + if e.is_panic() { + std::panic::resume_unwind(e.into_panic()); + } else { + tracing::trace!("request handler task cancelled"); + } + } + } + } + }; + + self.active_peers.remove_with_stable_id( + self.connection.peer_id(), + self.connection.stable_id(), + DisconnectReason::from(reason), + ); + + inflight_requests.shutdown().await; + tracing::debug!(peer_id = %self.connection.peer_id(), "request handler stopped"); + } +} + +struct BiStreamRequestHandler { + meta: Arc, + service: BoxCloneService, Response, Infallible>, + send_stream: FramedWrite, + recv_stream: FramedRead, +} + +impl BiStreamRequestHandler { + fn new( + config: &Config, + meta: Arc, + service: BoxCloneService, Response, Infallible>, + send_stream: SendStream, + recv_stream: RecvStream, + ) -> Self { + Self { + meta, + service, + send_stream: FramedWrite::new(send_stream, crate::proto::make_codec(config)), + recv_stream: FramedRead::new(recv_stream, crate::proto::make_codec(config)), + } + } + + async fn handle(self) { + if let Err(e) = self.do_handle().await { + tracing::trace!("request handler task failed: {e:?}"); + } + } + + async fn do_handle(mut self) -> Result<()> { + let req = crate::proto::recv_request(&mut self.recv_stream).await?; + let res = { + let handler = self.service.oneshot(InboundServiceRequest { + metadata: self.meta, + body: req.body, + }); + + let stopped = self.send_stream.get_mut().stopped(); + tokio::select! { + res = handler => res.expect("infallible always succeeds"), + _ = stopped => anyhow::bail!("send_stream closed by remote"), + } + }; + + crate::proto::send_response(&mut self.send_stream, res).await?; + self.send_stream.get_mut().finish().await?; + + Ok(()) + } +} diff --git a/network/src/proto.tl b/network/src/proto.tl index 2e6b5205e..11e714e55 100644 --- a/network/src/proto.tl +++ b/network/src/proto.tl @@ -6,32 +6,135 @@ int256 8*[ int ] = Int256; ---types--- -/** -* @param id hash of the TL repr of the full key (@see transport.id.Full) -*/ -transport.id.short id:int256 = transport.id.Short; - /** * @param key compressed ed25519 verifying key */ -transport.id.full.ed25519 key:int256 = transport.id.Full; +transport.peerId key:int256 = transport.PeerId; transport.address.ipv4 ip:int port:int = transport.Address; transport.address.ipv6 ip:int128 port:int = transport.Address; /** * @param items multiple possible addresses for the same peer -* @param version unix timestamp when the list was generated -* @param created_at unix timestamp when the peer was started -* @param expire_at unix timestamp up to which this list is valid +* @param created_at unix timestamp when the list was generated +* @param expires_at unix timestamp up to which this list is valid */ transport.addressList items:(vector transport.Address) - version:int created_at:int - expire_at:int + expires_at:int = transport.AddressList; -transport.message.notify data:bytes = transport.Message; -transport.message.query data:bytes = transport.Message; -transport.message.answer data:bytes = transport.Message; +// DHT +//////////////////////////////////////////////////////////////////////////////// + +---types--- + +/** +* @param id node public key +* @param addr_list list of possible peer addresses +* @param created_at unix timestamp when the entry was generated +* @param signature a ed25519 signature of the entry +*/ +dht.node + id:transport.PeerId + addr_list:transport.addressList + created_at:int + signature:bytes + = dht.Node; + +/** +* @param nodes list of DHT nodes +*/ +dht.nodes nodes:(vector dht.node) = dht.Nodes; + + +/** +* Key for the value that can only be updated by an owner +* +* @param name key name as UTF-8 string +* @param idx key index used for versioning +* @param peer_id owner id +* @param signature a ed25519 signature of this structure by the owner +*/ +dht.signedKey + name:bytes + idx:int + peer_id:transport.PeerId + signature:bytes + = dht.Key; + +/** +* Key for the overlay-managed value +* +* @param id overlay id +* @param name key name as UTF-8 string +* @param idx key index used for versioning +*/ +dht.overlayKey id:int256 name:bytes idx:int = dht.Key; + + +/** +* A value with an exact owner +* +* @param key signed key +* @param value any data +* @param expires_at unix timestamp up to which this value is valid +*/ +dht.signedValue key:dht.signedKey data:bytes expires_at:int signature:bytes = dht.Value; + +/** +* An overlay-managed value +* +* @param key overlay key +* @param value any data +* @param expires_at unix timestamp up to which this value is valid +*/ +dht.signedValue key:dht.overlayKey data:bytes expires_at:int = dht.Value; + + +/** +* A response for the `dht.store` query +*/ +dht.stored = dht.Stored; + + +/** +* A successful response for the `dht.findValue` query +* +* @param value an existing value +*/ +dht.valueFound value:dht.Value = dht.ValueResponse; +/** +* An unsuccessul response for the `dht.findValue` query +* +* @param value a list of nodes with the shortest distances +*/ +dht.valueNotFound nodes:dht.nodes = dht.ValueResponse; + +---functions--- + +/** +* Suggest a node to store that value +* +* @param value value to store +*/ +dht.store value:dht.value = dht.Stored; +/** +* Searches for k closest nodes +* +* @param peer_id peer_id to measure distances +* @param k max length of the result list +*/ +dht.findNode peer_id:transport.PeerId k:int = dht.Nodes; +/** +* Searches for a value by the hash of its key +* +* @param peer_id peer_id to measure distances +* @param k max length of the nodes list if it is not found +*/ +dht.findValue key:int256 k:int = dht.ValueResponse; +/** +* Requests a signed address list +*/ +dht.getNodeInfo = dht.Node; diff --git a/network/src/types/mod.rs b/network/src/types/mod.rs index 222a09887..b2a743329 100644 --- a/network/src/types/mod.rs +++ b/network/src/types/mod.rs @@ -1,5 +1,6 @@ use std::collections::HashMap; use std::net::SocketAddr; +use std::sync::Arc; pub use self::peer_id::*; @@ -42,6 +43,18 @@ pub struct Response { pub body: T, } +pub struct InboundServiceRequest { + pub metadata: Arc, + pub body: T, +} + +#[derive(Debug, Clone)] +pub struct InboundRequestMeta { + pub peer_id: PeerId, + pub origin: Direction, + pub remote_address: SocketAddr, +} + #[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord)] pub enum PeerAffinity { High, @@ -75,7 +88,14 @@ pub enum DisconnectReason { } impl From for DisconnectReason { + #[inline] fn from(value: quinn::ConnectionError) -> Self { + Self::from(&value) + } +} + +impl From<&quinn::ConnectionError> for DisconnectReason { + fn from(value: &quinn::ConnectionError) -> Self { match value { quinn::ConnectionError::VersionMismatch => Self::VersionMismatch, quinn::ConnectionError::TransportError(_) => Self::TransportError, diff --git a/network/src/types/peer_id.rs b/network/src/types/peer_id.rs index dac90a8bf..c4ab64e15 100644 --- a/network/src/types/peer_id.rs +++ b/network/src/types/peer_id.rs @@ -7,6 +7,10 @@ impl PeerId { // SAFETY: `[u8; 32]` has the same layout as `PeerId`. unsafe { &*(bytes as *const [u8; 32] as *const Self) } } + + pub fn as_bytes(&self) -> &[u8; 32] { + &self.0 + } } impl std::fmt::Display for PeerId { From 62ba0cb5c134a84257feafaeacaebd46532d9cb8 Mon Sep 17 00:00:00 2001 From: Ivan Kalinin Date: Thu, 30 Nov 2023 16:49:14 +0100 Subject: [PATCH 02/35] network: Wrap `std::net::SocketAddr` into a newtype --- network/src/connection.rs | 2 +- network/src/crypto.rs | 8 +- network/src/endpoint.rs | 10 +- network/src/network/connection_manager.rs | 29 ++-- network/src/network/mod.rs | 20 ++- network/src/proto.tl | 14 +- network/src/types/address.rs | 168 ++++++++++++++++++++++ network/src/types/mod.rs | 7 +- network/src/types/peer_id.rs | 7 +- 9 files changed, 224 insertions(+), 41 deletions(-) create mode 100644 network/src/types/address.rs diff --git a/network/src/connection.rs b/network/src/connection.rs index c6fd9b851..583eb7191 100644 --- a/network/src/connection.rs +++ b/network/src/connection.rs @@ -49,7 +49,7 @@ impl Connection { } pub fn close(&self) { - self.inner.close(0u8.into(), b"connection closed") + self.inner.close(0u8.into(), b"connection closed"); } pub async fn open_uni(&self) -> Result { diff --git a/network/src/crypto.rs b/network/src/crypto.rs index 3122f2b53..ea31db560 100644 --- a/network/src/crypto.rs +++ b/network/src/crypto.rs @@ -32,7 +32,7 @@ pub fn peer_id_from_certificate( use x509_parser::prelude::{FromDer, X509Certificate}; let (_, cert) = X509Certificate::from_der(certificate.0.as_ref()) - .map_err(|_| rustls::Error::InvalidCertificate(rustls::CertificateError::BadEncoding))?; + .map_err(|_e| rustls::Error::InvalidCertificate(rustls::CertificateError::BadEncoding))?; let spki = cert.public_key(); let public_key = ed25519::pkcs8::PublicKeyBytes::from_public_key_der(spki.raw).map_err(|e| { @@ -118,7 +118,8 @@ impl rustls::server::ClientCertVerifier for CertVerifier { ) -> Result { // Parse the certificate let prepared = prepare_for_self_signed(end_entity, intermediates)?; - let now = webpki::Time::try_from(now).map_err(|_| rustls::Error::FailedToGetCurrentTime)?; + let now = + webpki::Time::try_from(now).map_err(|_e| rustls::Error::FailedToGetCurrentTime)?; // Verify the certificate prepared @@ -173,7 +174,8 @@ impl rustls::client::ServerCertVerifier for CertVerifier { // Parse the certificate let prepared = prepare_for_self_signed(end_entity, intermediates)?; - let now = webpki::Time::try_from(now).map_err(|_| rustls::Error::FailedToGetCurrentTime)?; + let now = + webpki::Time::try_from(now).map_err(|_e| rustls::Error::FailedToGetCurrentTime)?; // Verify the certificate prepared diff --git a/network/src/endpoint.rs b/network/src/endpoint.rs index 556b2949e..925ff8062 100644 --- a/network/src/endpoint.rs +++ b/network/src/endpoint.rs @@ -9,7 +9,7 @@ use anyhow::Result; use crate::config::EndpointConfig; use crate::connection::Connection; -use crate::types::{Direction, PeerId}; +use crate::types::{Address, Direction, PeerId}; pub struct Endpoint { inner: quinn::Endpoint, @@ -73,14 +73,14 @@ impl Endpoint { } /// Connect to a remote endpoint using the endpoint configuration. - pub fn connect(&self, address: SocketAddr) -> Result { + pub fn connect(&self, address: Address) -> Result { self.connect_with_client_config(self.config.quinn_client_config.clone(), address) } /// Connect to a remote endpoint expecting it to have the provided peer id. pub fn connect_with_expected_id( &self, - address: SocketAddr, + address: Address, peer_id: PeerId, ) -> Result { let config = self.config.make_client_config_for_peer_id(peer_id)?; @@ -91,8 +91,10 @@ impl Endpoint { fn connect_with_client_config( &self, config: quinn::ClientConfig, - address: SocketAddr, + address: Address, ) -> Result { + let address = address.resolve()?; + self.inner .connect_with(config, address, &self.config.service_name) .map_err(Into::into) diff --git a/network/src/network/connection_manager.rs b/network/src/network/connection_manager.rs index 8d25eb24c..08115065d 100644 --- a/network/src/network/connection_manager.rs +++ b/network/src/network/connection_manager.rs @@ -1,5 +1,4 @@ use std::convert::Infallible; -use std::net::SocketAddr; use std::sync::atomic::{AtomicUsize, Ordering}; use std::sync::{Arc, Weak}; use std::time::{Duration, Instant}; @@ -15,15 +14,15 @@ use crate::config::Config; use crate::connection::Connection; use crate::endpoint::{Connecting, Endpoint}; use crate::types::{ - Direction, DisconnectReason, FastDashMap, FastHashMap, InboundServiceRequest, PeerAffinity, - PeerEvent, PeerId, PeerInfo, Response, + Address, Direction, DisconnectReason, FastDashMap, FastHashMap, InboundServiceRequest, + PeerAffinity, PeerEvent, PeerId, PeerInfo, Response, }; use super::request_handler::InboundRequestHandler; #[derive(Debug)] pub enum ConnectionManagerRequest { - Connect(SocketAddr, Option, oneshot::Sender>), + Connect(Address, Option, oneshot::Sender>), Shutdown(oneshot::Sender<()>), } @@ -47,7 +46,7 @@ pub struct ConnectionManager { impl Drop for ConnectionManager { fn drop(&mut self) { - self.endpoint.close() + self.endpoint.close(); } } @@ -192,8 +191,7 @@ impl ConnectionManager { && self .dial_backoff_states .get(&peer_info.peer_id) - .map(|state| now > state.next_attempt_at) - .unwrap_or(true) + .map_or(true, |state| now > state.next_attempt_at) }) .take(outstanding_connections_limit) .map(|item| item.value().clone()) @@ -208,11 +206,11 @@ impl ConnectionManager { fn handle_connect_request( &mut self, - address: SocketAddr, + address: Address, peer_id: Option, callback: oneshot::Sender>, ) { - self.dial_peer(address, peer_id, callback) + self.dial_peer(address, peer_id, callback); } fn handle_incoming(&mut self, connecting: Connecting) { @@ -314,13 +312,13 @@ impl ConnectionManager { #[tracing::instrument(level = "trace", skip_all, fields(peer_id = ?peer_id, address = %address))] fn dial_peer( &mut self, - address: SocketAddr, + address: Address, peer_id: Option, callback: oneshot::Sender>, ) { async fn dial_peer_task( connecting: Result, - address: SocketAddr, + address: Address, peer_id: Option, callback: oneshot::Sender>, config: Arc, @@ -343,13 +341,14 @@ impl ConnectionManager { } } + let target_address = address.clone(); let connecting = match peer_id { None => self.endpoint.connect(address), Some(peer_id) => self.endpoint.connect_with_expected_id(address, peer_id), }; self.pending_connections.spawn(dial_peer_task( connecting, - address, + target_address, peer_id, callback, self.config.clone(), @@ -360,7 +359,7 @@ impl ConnectionManager { struct ConnectingOutput { connecting_result: Result, callback: Option>>, - target_address: Option, + target_address: Option
, target_peer_id: Option, } @@ -411,7 +410,7 @@ impl ActivePeers { } pub fn remove(&self, peer_id: &PeerId, reason: DisconnectReason) { - self.0.remove(peer_id, reason) + self.0.remove(peer_id, reason); } pub fn remove_with_stable_id( @@ -420,7 +419,7 @@ impl ActivePeers { stable_id: usize, reason: DisconnectReason, ) { - self.0.remove_with_stable_id(peer_id, stable_id, reason) + self.0.remove_with_stable_id(peer_id, stable_id, reason); } pub fn is_empty(&self) -> bool { diff --git a/network/src/network/mod.rs b/network/src/network/mod.rs index 2a201f074..e4c748f4e 100644 --- a/network/src/network/mod.rs +++ b/network/src/network/mod.rs @@ -10,7 +10,7 @@ use tower::ServiceExt; use crate::config::{Config, EndpointConfig}; use crate::endpoint::Endpoint; -use crate::types::{DisconnectReason, InboundServiceRequest, PeerId, Response}; +use crate::types::{Address, DisconnectReason, InboundServiceRequest, PeerId, Response}; use self::connection_manager::{ ActivePeers, ConnectionManager, ConnectionManagerRequest, KnownPeers, WeakActivePeers, @@ -174,12 +174,18 @@ impl Network { self.0.known_peers() } - pub async fn connect(&self, addr: SocketAddr) -> Result { - self.0.connect(addr, None).await + pub async fn connect(&self, addr: T) -> Result + where + T: Into
, + { + self.0.connect(addr.into(), None).await } - pub async fn connect_with_peer_id(&self, addr: SocketAddr, peer_id: &PeerId) -> Result { - self.0.connect(addr, Some(peer_id)).await + pub async fn connect_with_peer_id(&self, addr: T, peer_id: &PeerId) -> Result + where + T: Into
, + { + self.0.connect(addr.into(), Some(peer_id)).await } pub fn disconnect(&self, peer_id: &PeerId) -> Result<()> { @@ -208,7 +214,7 @@ impl NetworkInner { &self.known_peers } - async fn connect(&self, addr: SocketAddr, peer_id: Option<&PeerId>) -> Result { + async fn connect(&self, addr: Address, peer_id: Option<&PeerId>) -> Result { let (tx, rx) = oneshot::channel(); self.connection_manager_handle .send(ConnectionManagerRequest::Connect( @@ -217,7 +223,7 @@ impl NetworkInner { tx, )) .await - .map_err(|_| anyhow::anyhow!("network has been shutdown"))?; + .map_err(|_e| anyhow::anyhow!("network has been shutdown"))?; rx.await? } diff --git a/network/src/proto.tl b/network/src/proto.tl index 11e714e55..c4add1e84 100644 --- a/network/src/proto.tl +++ b/network/src/proto.tl @@ -90,7 +90,7 @@ dht.signedValue key:dht.signedKey data:bytes expires_at:int signature:bytes = dh * @param value any data * @param expires_at unix timestamp up to which this value is valid */ -dht.signedValue key:dht.overlayKey data:bytes expires_at:int = dht.Value; +dht.overlayValue key:dht.overlayKey data:bytes expires_at:int = dht.Value; /** @@ -123,15 +123,15 @@ dht.store value:dht.value = dht.Stored; /** * Searches for k closest nodes * -* @param peer_id peer_id to measure distances -* @param k max length of the result list +* @param key key hash +* @param k max length of the result list */ -dht.findNode peer_id:transport.PeerId k:int = dht.Nodes; +dht.findNode key:int256 k:int = dht.Nodes; /** -* Searches for a value by the hash of its key +* Searches for a value if stored or k closest nodes * -* @param peer_id peer_id to measure distances -* @param k max length of the nodes list if it is not found +* @param key key hash +* @param k max length of the nodes list if it is not found */ dht.findValue key:int256 k:int = dht.ValueResponse; /** diff --git a/network/src/types/address.rs b/network/src/types/address.rs new file mode 100644 index 000000000..cde43e124 --- /dev/null +++ b/network/src/types/address.rs @@ -0,0 +1,168 @@ +use std::net::{SocketAddr, SocketAddrV4, SocketAddrV6}; +use std::str::FromStr; + +use tl_proto::{TlRead, TlWrite}; + +#[derive(Debug, Clone, Hash, PartialEq, Eq, PartialOrd, Ord)] +pub struct Address(SocketAddr); + +impl Address { + pub fn resolve(&self) -> std::io::Result { + std::net::ToSocketAddrs::to_socket_addrs(&self).and_then(|mut iter| { + iter.next().ok_or_else(|| { + std::io::Error::new(std::io::ErrorKind::NotFound, "unable to resolve host") + }) + }) + } +} + +impl std::fmt::Display for Address { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + std::fmt::Display::fmt(&self.0, f) + } +} + +impl std::net::ToSocketAddrs for Address { + type Iter = ::Iter; + + fn to_socket_addrs(&self) -> std::io::Result { + self.0.to_socket_addrs() + } +} + +impl TlWrite for Address { + type Repr = tl_proto::Boxed; + + fn max_size_hint(&self) -> usize { + 4 + match &self.0 { + SocketAddr::V4(_) => 4 + 4, + SocketAddr::V6(_) => 16 + 4, + } + } + + fn write_to

(&self, packet: &mut P) + where + P: tl_proto::TlPacket, + { + match &self.0 { + SocketAddr::V4(addr) => { + packet.write_u32(ADDRESS_V4_TL_ID); + packet.write_u32(u32::from(*addr.ip())); + packet.write_u32(addr.port() as u32); + } + SocketAddr::V6(addr) => { + packet.write_u32(ADDRESS_V6_TL_ID); + packet.write_raw_slice(&addr.ip().octets()); + packet.write_u32(addr.port() as u32); + } + }; + } +} + +impl<'a> TlRead<'a> for Address { + type Repr = tl_proto::Boxed; + + fn read_from(packet: &'a [u8], offset: &mut usize) -> tl_proto::TlResult { + use tl_proto::TlError; + + Ok(Address(match u32::read_from(packet, offset)? { + ADDRESS_V4_TL_ID => { + let ip = u32::read_from(packet, offset)?; + let Ok(port) = u32::read_from(packet, offset)?.try_into() else { + return Err(TlError::InvalidData); + }; + SocketAddr::V4(SocketAddrV4::new(ip.into(), port)) + } + ADDRESS_V6_TL_ID => { + let octets = <[u8; 16]>::read_from(packet, offset)?; + let Ok(port) = u32::read_from(packet, offset)?.try_into() else { + return Err(TlError::InvalidData); + }; + SocketAddr::V6(SocketAddrV6::new(octets.into(), port, 0, 0)) + } + _ => return Err(TlError::UnknownConstructor), + })) + } +} + +impl From for Address { + #[inline] + fn from(value: SocketAddr) -> Self { + Self(value) + } +} + +impl From for Address { + #[inline] + fn from(value: SocketAddrV4) -> Self { + Self(SocketAddr::V4(value)) + } +} + +impl From for Address { + #[inline] + fn from(value: SocketAddrV6) -> Self { + Self(SocketAddr::V6(value)) + } +} + +impl From<(std::net::Ipv4Addr, u16)> for Address { + #[inline] + fn from((ip, port): (std::net::Ipv4Addr, u16)) -> Self { + Self(SocketAddr::V4(SocketAddrV4::new(ip, port))) + } +} + +impl From<(std::net::Ipv6Addr, u16)> for Address { + #[inline] + fn from((ip, port): (std::net::Ipv6Addr, u16)) -> Self { + Self(SocketAddr::V6(SocketAddrV6::new(ip, port, 0, 0))) + } +} + +impl FromStr for Address { + type Err = std::net::AddrParseError; + + #[inline] + fn from_str(s: &str) -> Result { + SocketAddr::from_str(s).map(Self) + } +} + +#[derive(Debug, Clone, TlWrite, Eq, PartialEq)] +pub struct AddressList { + pub items: Vec

, + pub created_at: u32, + pub expires_at: u32, +} + +impl AddressList { + pub const MAX_LEN: usize = 4; +} + +impl<'a> TlRead<'a> for AddressList { + type Repr = tl_proto::Bare; + + fn read_from(packet: &'a [u8], offset: &mut usize) -> tl_proto::TlResult { + use tl_proto::TlError; + + let len = u32::read_from(packet, offset)? as usize; + if len > Self::MAX_LEN { + return Err(TlError::InvalidData); + } + + let mut items = Vec::with_capacity(len); + for _ in 0..len { + items.push(Address::read_from(packet, offset)?); + } + + Ok(Self { + items, + created_at: u32::read_from(packet, offset)?, + expires_at: u32::read_from(packet, offset)?, + }) + } +} + +const ADDRESS_V4_TL_ID: u32 = tl_proto::id!("transport.address.ipv4", scheme = "proto.tl"); +const ADDRESS_V6_TL_ID: u32 = tl_proto::id!("transport.address.ipv6", scheme = "proto.tl"); diff --git a/network/src/types/mod.rs b/network/src/types/mod.rs index b2a743329..d99c16118 100644 --- a/network/src/types/mod.rs +++ b/network/src/types/mod.rs @@ -2,8 +2,10 @@ use std::collections::HashMap; use std::net::SocketAddr; use std::sync::Arc; -pub use self::peer_id::*; +pub use self::address::{Address, AddressList}; +pub use self::peer_id::{Direction, PeerId}; +mod address; mod peer_id; pub type FastDashMap = dashmap::DashMap; @@ -66,7 +68,8 @@ pub enum PeerAffinity { pub struct PeerInfo { pub peer_id: PeerId, pub affinity: PeerAffinity, - pub address: SocketAddr, + // TODO: change to address list + pub address: Address, } #[derive(Debug, Clone, PartialEq, Eq)] diff --git a/network/src/types/peer_id.rs b/network/src/types/peer_id.rs index c4ab64e15..18fa54fae 100644 --- a/network/src/types/peer_id.rs +++ b/network/src/types/peer_id.rs @@ -1,11 +1,14 @@ -#[derive(Clone, Copy, Hash, PartialEq, Eq, PartialOrd, Ord)] +use tl_proto::{TlRead, TlWrite}; + +#[derive(Clone, Copy, TlRead, TlWrite, Hash, PartialEq, Eq, PartialOrd, Ord)] +#[tl(boxed, id = "transport.peerId", scheme = "proto.tl")] #[repr(transparent)] pub struct PeerId(pub [u8; 32]); impl PeerId { pub fn wrap(bytes: &[u8; 32]) -> &Self { // SAFETY: `[u8; 32]` has the same layout as `PeerId`. - unsafe { &*(bytes as *const [u8; 32] as *const Self) } + unsafe { &*(bytes as *const [u8; 32]).cast::() } } pub fn as_bytes(&self) -> &[u8; 32] { From 4a9a9dda8dc0bcc0f198e32bc3356e353f7f8c7d Mon Sep 17 00:00:00 2001 From: Ivan Kalinin Date: Wed, 6 Dec 2023 17:03:51 +0100 Subject: [PATCH 03/35] Add dht tl models --- Cargo.lock | 472 ++++++++++++++++++++++++++--------- network/Cargo.toml | 3 +- network/src/dht/mod.rs | 2 + network/src/dht/proto.rs | 212 ++++++++++++++++ network/src/dht/storage.rs | 7 + network/src/lib.rs | 1 + network/src/proto.tl | 22 +- network/src/types/mod.rs | 2 + network/src/types/peer_id.rs | 12 + network/src/types/rpc.rs | 5 + 10 files changed, 611 insertions(+), 127 deletions(-) create mode 100644 network/src/dht/mod.rs create mode 100644 network/src/dht/proto.rs create mode 100644 network/src/dht/storage.rs create mode 100644 network/src/types/rpc.rs diff --git a/Cargo.lock b/Cargo.lock index a6b760118..6c41e2164 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -123,6 +123,12 @@ version = "1.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a" +[[package]] +name = "bitflags" +version = "2.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "327762f6e5a765692301e5bb513e0d9fef63be86bbc14528052b1cd3e6f03e07" + [[package]] name = "block-buffer" version = "0.10.4" @@ -138,12 +144,49 @@ version = "3.14.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7f30e7476521f6f8af1a1c4c0b8cc94f0bee37d91763d0ca2665f299b6cd8aec" +[[package]] +name = "bytecount" +version = "0.6.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e1e5f035d16fc623ae5f74981db80a439803888314e3a555fd6f04acd51a3205" + [[package]] name = "bytes" version = "1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a2bd12c1caf447e69cd4528f47f94d203fd2582878ecb9e9465484c4148a8223" +[[package]] +name = "camino" +version = "1.1.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c59e92b5a388f549b863a7bea62612c09f24c8393560709a54558a9abdfb3b9c" +dependencies = [ + "serde", +] + +[[package]] +name = "cargo-platform" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e34637b3140142bdf929fb439e8aa4ebad7651ebf7b1080b3930aa16ac1459ff" +dependencies = [ + "serde", +] + +[[package]] +name = "cargo_metadata" +version = "0.14.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4acbb09d9ee8e23699b9634375c72795d095bf268439da88562cf9b501f181fa" +dependencies = [ + "camino", + "cargo-platform", + "semver", + "serde", + "serde_json", +] + [[package]] name = "cc" version = "1.0.83" @@ -165,22 +208,6 @@ version = "0.9.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "28c122c3980598d243d63d9a704629a2d748d101f278052ff068be5a4423ab6f" -[[package]] -name = "core-foundation" -version = "0.9.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "194a7a9e6de53fa55116934067c844d9d749312f75c6f6d0980e8c252f8c2146" -dependencies = [ - "core-foundation-sys", - "libc", -] - -[[package]] -name = "core-foundation-sys" -version = "0.8.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e496a50fda8aacccc86d7529e2c1e0892dbd0f898a6b5645b5561b89c3210efa" - [[package]] name = "cpufeatures" version = "0.2.11" @@ -205,6 +232,25 @@ version = "2.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "19d374276b40fb8bbdee95aef7c7fa6b5316ec764510eb64b8dd0e2ed0d7e7f5" +[[package]] +name = "crossbeam-channel" +version = "0.5.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a33c2bf77f2df06183c3aa30d1e96c0695a313d4f9c453cc3762a6db39f99200" +dependencies = [ + "cfg-if", + "crossbeam-utils", +] + +[[package]] +name = "crossbeam-utils" +version = "0.8.16" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5a22b2d63d4d1dc0b7f1b6b2747dd0088008a9be28b6ddf0b1e7d335e3037294" +dependencies = [ + "cfg-if", +] + [[package]] name = "crypto-common" version = "0.1.6" @@ -288,9 +334,9 @@ dependencies = [ [[package]] name = "deranged" -version = "0.3.9" +version = "0.3.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0f32d04922c60427da6f9fef14d042d9edddef64cb9d4ce0d64d0685fbeb1fd3" +checksum = "8eb30d70a07a3b04884d2677f06bec33509dc67ca60d92949e5535352d3191dc" dependencies = [ "powerfmt", ] @@ -340,6 +386,31 @@ dependencies = [ "zeroize", ] +[[package]] +name = "errno" +version = "0.3.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a258e46cdc063eb8519c00b9fc845fc47bcfca4130e2f08e88665ceda8474245" +dependencies = [ + "libc", + "windows-sys 0.52.0", +] + +[[package]] +name = "error-chain" +version = "0.12.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2d2f06b9cac1506ece98fe3231e3cc9c4410ec3d5b1f24ae1c8946f0742cdefc" +dependencies = [ + "version_check", +] + +[[package]] +name = "fastrand" +version = "2.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "25cbce373ec4653f1a01a31e8a5e5ec0c622dc27ff9c4e6606eefef5cbbed4a5" + [[package]] name = "fiat-crypto" version = "0.2.5" @@ -413,15 +484,21 @@ dependencies = [ [[package]] name = "gimli" -version = "0.28.0" +version = "0.28.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6fb8d784f27acf97159b40fc4db5ecd8aa23b9ad5ef69cdd136d3bc80665f0c0" +checksum = "4271d37baee1b8c7e4b708028c57d816cf9d2434acb33a549475f78c181f6253" + +[[package]] +name = "glob" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d2fabcfbdc87f4758337ca535fb41a6d701b65693ce38287d856d1674551ec9b" [[package]] name = "hashbrown" -version = "0.14.2" +version = "0.14.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f93e7192158dbcda357bdec5fb5788eebf8bbac027f3f33e719d29135ae84156" +checksum = "290f1a1d9242c78d09ce40a5e87e7554ee637af1351968159f4952f028f75604" [[package]] name = "hermit-abi" @@ -443,9 +520,9 @@ checksum = "af150ab688ff2122fcef229be89cb50dd66af9e01a4ff320cc137eecc9bacc38" [[package]] name = "js-sys" -version = "0.3.65" +version = "0.3.66" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "54c0c35952f67de54bb584e9fd912b3023117cbafc0a77d8f3dee1fb5f572fe8" +checksum = "cee9c64da59eae3b50095c18d3e74f8b73c0b86d2792824ff01bbce68ba229ca" dependencies = [ "wasm-bindgen", ] @@ -462,6 +539,12 @@ version = "0.2.150" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "89d92a4743f9a61002fae18374ed11e7973f530cb3a3255fb354818118b2203c" +[[package]] +name = "linux-raw-sys" +version = "0.4.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c4cd1a83af159aa67994778be9070f0ae1bd732942279cabb14f86f986a21456" + [[package]] name = "lock_api" version = "0.4.11" @@ -493,6 +576,21 @@ version = "2.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f665ee40bc4a3c5590afb1e9677db74a508659dfd71e126420da8274909a0167" +[[package]] +name = "mini-moka" +version = "0.10.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "23e0b72e7c9042467008b10279fc732326bd605459ae03bda88825909dd19b56" +dependencies = [ + "crossbeam-channel", + "crossbeam-utils", + "dashmap", + "skeptic", + "smallvec", + "tagptr", + "triomphe", +] + [[package]] name = "minimal-lexical" version = "0.2.1" @@ -510,13 +608,13 @@ dependencies = [ [[package]] name = "mio" -version = "0.8.9" +version = "0.8.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3dce281c5e46beae905d4de1870d8b1509a9142b62eedf18b443b011ca8343d0" +checksum = "8f3d0b296e374a4e6f3c7b0a1f5a51d748a0d34c85e7dc48fc3fa9a87657fe09" dependencies = [ "libc", "wasi", - "windows-sys", + "windows-sys 0.48.0", ] [[package]] @@ -603,12 +701,6 @@ version = "1.18.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "dd8b5dd2ae5ed71462c540258bedcb51965123ad7e7ccf4b9a8cafaa4a63576d" -[[package]] -name = "openssl-probe" -version = "0.1.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ff011a302c396a5197692431fc1948019154afc178baf7d8e37367442a4601cf" - [[package]] name = "overload" version = "0.1.1" @@ -625,7 +717,7 @@ dependencies = [ "libc", "redox_syscall", "smallvec", - "windows-targets", + "windows-targets 0.48.5", ] [[package]] @@ -745,13 +837,24 @@ checksum = "5b40af805b3121feab8a3c29f04d8ad262fa8e0561883e7653e024ae4479e6de" [[package]] name = "proc-macro2" -version = "1.0.69" +version = "1.0.70" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "134c189feb4956b20f6f547d2cf727d4c0fe06722b20a0eec87ed445a97f92da" +checksum = "39278fbbf5fb4f646ce651690877f89d1c5811a3d4acb27700c1cb3cdb78fd3b" dependencies = [ "unicode-ident", ] +[[package]] +name = "pulldown-cmark" +version = "0.9.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "77a1a2f1f0a7ecff9c31abbe177637be0e97a0aef46cf8738ece09327985d998" +dependencies = [ + "bitflags 1.3.2", + "memchr", + "unicase", +] + [[package]] name = "quinn" version = "0.10.2" @@ -780,7 +883,6 @@ dependencies = [ "ring 0.16.20", "rustc-hash", "rustls", - "rustls-native-certs", "slab", "thiserror", "tinyvec", @@ -797,7 +899,7 @@ dependencies = [ "libc", "socket2", "tracing", - "windows-sys", + "windows-sys 0.48.0", ] [[package]] @@ -857,7 +959,7 @@ version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4722d768eff46b75989dd134e5c353f0d6296e5aaa3132e776cbdb56be7731aa" dependencies = [ - "bitflags", + "bitflags 1.3.2", ] [[package]] @@ -921,16 +1023,16 @@ dependencies = [ [[package]] name = "ring" -version = "0.17.5" +version = "0.17.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fb0205304757e5d899b9c2e448b867ffd03ae7f988002e47cd24954391394d0b" +checksum = "688c63d65483050968b2a8937f7995f443e27041a0f7700aa59b0822aedebb74" dependencies = [ "cc", "getrandom", "libc", "spin 0.9.8", "untrusted 0.9.0", - "windows-sys", + "windows-sys 0.48.0", ] [[package]] @@ -963,6 +1065,19 @@ dependencies = [ "nom", ] +[[package]] +name = "rustix" +version = "0.38.26" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9470c4bf8246c8daf25f9598dca807fb6510347b1e1cfa55749113850c79d88a" +dependencies = [ + "bitflags 2.4.1", + "errno", + "libc", + "linux-raw-sys", + "windows-sys 0.52.0", +] + [[package]] name = "rustls" version = "0.21.9" @@ -970,49 +1085,34 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "629648aced5775d558af50b2b4c7b02983a04b312126d45eeead26e7caa498b9" dependencies = [ "log", - "ring 0.17.5", + "ring 0.17.7", "rustls-webpki", "sct", ] -[[package]] -name = "rustls-native-certs" -version = "0.6.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a9aace74cb666635c918e9c12bc0d348266037aa8eb599b5cba565709a8dff00" -dependencies = [ - "openssl-probe", - "rustls-pemfile", - "schannel", - "security-framework", -] - -[[package]] -name = "rustls-pemfile" -version = "1.0.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1c74cae0a4cf6ccbbf5f359f08efdf8ee7e1dc532573bf0db71968cb56b1448c" -dependencies = [ - "base64", -] - [[package]] name = "rustls-webpki" version = "0.101.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8b6275d1ee7a1cd780b64aca7726599a1dbc893b1e64144529e55c3c2f745765" dependencies = [ - "ring 0.17.5", + "ring 0.17.7", "untrusted 0.9.0", ] [[package]] -name = "schannel" -version = "0.1.22" +name = "ryu" +version = "1.0.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0c3733bf4cf7ea0880754e19cb5a462007c4a8c1914bff372ccc95b464f1df88" +checksum = "1ad4cc8da4ef723ed60bced201181d83791ad433213d8c24efffda1eec85d741" + +[[package]] +name = "same-file" +version = "1.0.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "93fc1dc3aaa9bfed95e02e6eadabb4baf7e3078b0bd1b4d7b6b0b68378900502" dependencies = [ - "windows-sys", + "winapi-util", ] [[package]] @@ -1027,38 +1127,18 @@ version = "0.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "da046153aa2352493d6cb7da4b6e5c0c057d8a1d0a9aa8560baffdd945acd414" dependencies = [ - "ring 0.17.5", + "ring 0.17.7", "untrusted 0.9.0", ] -[[package]] -name = "security-framework" -version = "2.9.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "05b64fb303737d99b81884b2c63433e9ae28abebe5eb5045dcdd175dc2ecf4de" -dependencies = [ - "bitflags", - "core-foundation", - "core-foundation-sys", - "libc", - "security-framework-sys", -] - -[[package]] -name = "security-framework-sys" -version = "2.9.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e932934257d3b408ed8f30db49d85ea163bfe74961f017f405b025af298f0c7a" -dependencies = [ - "core-foundation-sys", - "libc", -] - [[package]] name = "semver" version = "1.0.20" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "836fa6a3e1e547f9a2c4040802ec865b5d85f4014efe00555d7090a3dcaa1090" +dependencies = [ + "serde", +] [[package]] name = "serde" @@ -1080,6 +1160,17 @@ dependencies = [ "syn 2.0.39", ] +[[package]] +name = "serde_json" +version = "1.0.108" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3d1c7e3eac408d115102c4c24ad393e0821bb3a5df4d506a80f85f7a742a526b" +dependencies = [ + "itoa", + "ryu", + "serde", +] + [[package]] name = "sha2" version = "0.10.8" @@ -1109,6 +1200,21 @@ dependencies = [ "rand_core", ] +[[package]] +name = "skeptic" +version = "0.13.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "16d23b015676c90a0f01c197bfdc786c20342c73a0afdda9025adb0bc42940a8" +dependencies = [ + "bytecount", + "cargo_metadata", + "error-chain", + "glob", + "pulldown-cmark", + "tempfile", + "walkdir", +] + [[package]] name = "slab" version = "0.4.9" @@ -1131,7 +1237,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7b5fac59a5cb5dd637972e5fca70daf0523c9067fcdc4842f053dae04a18f8e9" dependencies = [ "libc", - "windows-sys", + "windows-sys 0.48.0", ] [[package]] @@ -1148,9 +1254,9 @@ checksum = "6980e8d7511241f8acf4aebddbb1ff938df5eebe98691418c4468d0b72a96a67" [[package]] name = "spki" -version = "0.7.2" +version = "0.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9d1e996ef02c474957d681f1b05213dfb0abab947b446a62d37770b23500184a" +checksum = "d91ed6c858b01f942cd56b37a94b3e0a1798290327d1236e4d9cf4eaca44d29d" dependencies = [ "base64ct", "der", @@ -1196,6 +1302,25 @@ dependencies = [ "unicode-xid", ] +[[package]] +name = "tagptr" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7b2093cf4c8eb1e67749a6762251bc9cd836b6fc171623bd0a9d324d37af2417" + +[[package]] +name = "tempfile" +version = "3.8.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7ef1adac450ad7f4b3c28589471ade84f25f731a7a0fe30d71dfa9f60fd808e5" +dependencies = [ + "cfg-if", + "fastrand", + "redox_syscall", + "rustix", + "windows-sys 0.48.0", +] + [[package]] name = "thiserror" version = "1.0.50" @@ -1272,9 +1397,9 @@ checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20" [[package]] name = "tl-proto" -version = "0.4.3" +version = "0.4.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b3b51063c4076ddf3c068738d65dca0946937894ac8139508b6693dd3414c3f3" +checksum = "3418163db528cc2324ed7bc9d52aa3ca7a8f73d685f8b21b319d2a08ee4b36d3" dependencies = [ "bytes", "digest", @@ -1324,7 +1449,7 @@ dependencies = [ "pin-project-lite", "socket2", "tokio-macros", - "windows-sys", + "windows-sys 0.48.0", ] [[package]] @@ -1464,6 +1589,12 @@ dependencies = [ "syn 1.0.109", ] +[[package]] +name = "triomphe" +version = "0.1.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "859eb650cfee7434994602c3a68b25d77ad9e68c8a6cd491616ef86661382eb3" + [[package]] name = "tycho-consensus" version = "0.0.1" @@ -1494,6 +1625,7 @@ dependencies = [ "ed25519-dalek", "futures-util", "hex", + "mini-moka", "pin-project-lite", "pkcs8", "quinn", @@ -1548,6 +1680,15 @@ version = "0.1.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ed646292ffc8188ef8ea4d1e0e0150fb15a5c2e12ad9b8fc191ae7a8a7f3c4b9" +[[package]] +name = "unicase" +version = "2.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f7d2d4dafb69621809a81864c9c1b864479e1235c0dd4e199924b9742439ed89" +dependencies = [ + "version_check", +] + [[package]] name = "unicode-ident" version = "1.0.12" @@ -1584,6 +1725,16 @@ version = "0.9.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "49874b5167b65d7193b8aba1567f5c7d93d001cafc34600cee003eda787e483f" +[[package]] +name = "walkdir" +version = "2.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d71d857dc86794ca4c280d616f7da00d2dbfd8cd788846559a6813e6aa4b54ee" +dependencies = [ + "same-file", + "winapi-util", +] + [[package]] name = "wasi" version = "0.11.0+wasi-snapshot-preview1" @@ -1592,9 +1743,9 @@ checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423" [[package]] name = "wasm-bindgen" -version = "0.2.88" +version = "0.2.89" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7daec296f25a1bae309c0cd5c29c4b260e510e6d813c286b19eaadf409d40fce" +checksum = "0ed0d4f68a3015cc185aff4db9506a015f4b96f95303897bfa23f846db54064e" dependencies = [ "cfg-if", "wasm-bindgen-macro", @@ -1602,9 +1753,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-backend" -version = "0.2.88" +version = "0.2.89" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e397f4664c0e4e428e8313a469aaa58310d302159845980fd23b0f22a847f217" +checksum = "1b56f625e64f3a1084ded111c4d5f477df9f8c92df113852fa5a374dbda78826" dependencies = [ "bumpalo", "log", @@ -1617,9 +1768,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro" -version = "0.2.88" +version = "0.2.89" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5961017b3b08ad5f3fe39f1e79877f8ee7c23c5e5fd5eb80de95abc41f1f16b2" +checksum = "0162dbf37223cd2afce98f3d0785506dcb8d266223983e4b5b525859e6e182b2" dependencies = [ "quote", "wasm-bindgen-macro-support", @@ -1627,9 +1778,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro-support" -version = "0.2.88" +version = "0.2.89" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c5353b8dab669f5e10f5bd76df26a9360c748f054f862ff5f3f8aae0c7fb3907" +checksum = "f0eb82fcb7930ae6219a7ecfd55b217f5f0893484b7a13022ebb2b2bf20b5283" dependencies = [ "proc-macro2", "quote", @@ -1640,15 +1791,15 @@ dependencies = [ [[package]] name = "wasm-bindgen-shared" -version = "0.2.88" +version = "0.2.89" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0d046c5d029ba91a1ed14da14dca44b68bf2f124cfbaf741c54151fdb3e0750b" +checksum = "7ab9b36309365056cd639da3134bf87fa8f3d86008abf99e612384a6eecd459f" [[package]] name = "web-sys" -version = "0.3.65" +version = "0.3.66" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5db499c5f66323272151db0e666cd34f78617522fb0c1604d31a27c50c206a85" +checksum = "50c24a44ec86bb68fbecd1b3efed7e85ea5621b39b35ef2766b66cd984f8010f" dependencies = [ "js-sys", "wasm-bindgen", @@ -1670,6 +1821,15 @@ version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6" +[[package]] +name = "winapi-util" +version = "0.1.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f29e6f9198ba0d26b4c9f07dbe6f9ed633e1f3d5b8b414090084349e46a52596" +dependencies = [ + "winapi", +] + [[package]] name = "winapi-x86_64-pc-windows-gnu" version = "0.4.0" @@ -1682,7 +1842,16 @@ version = "0.48.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "677d2418bec65e3338edb076e806bc1ec15693c5d0104683f2efe857f61056a9" dependencies = [ - "windows-targets", + "windows-targets 0.48.5", +] + +[[package]] +name = "windows-sys" +version = "0.52.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "282be5f36a8ce781fad8c8ae18fa3f9beff57ec1b52cb3de0789201425d9a33d" +dependencies = [ + "windows-targets 0.52.0", ] [[package]] @@ -1691,13 +1860,28 @@ version = "0.48.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9a2fa6e2155d7247be68c096456083145c183cbbbc2764150dda45a87197940c" dependencies = [ - "windows_aarch64_gnullvm", - "windows_aarch64_msvc", - "windows_i686_gnu", - "windows_i686_msvc", - "windows_x86_64_gnu", - "windows_x86_64_gnullvm", - "windows_x86_64_msvc", + "windows_aarch64_gnullvm 0.48.5", + "windows_aarch64_msvc 0.48.5", + "windows_i686_gnu 0.48.5", + "windows_i686_msvc 0.48.5", + "windows_x86_64_gnu 0.48.5", + "windows_x86_64_gnullvm 0.48.5", + "windows_x86_64_msvc 0.48.5", +] + +[[package]] +name = "windows-targets" +version = "0.52.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8a18201040b24831fbb9e4eb208f8892e1f50a37feb53cc7ff887feb8f50e7cd" +dependencies = [ + "windows_aarch64_gnullvm 0.52.0", + "windows_aarch64_msvc 0.52.0", + "windows_i686_gnu 0.52.0", + "windows_i686_msvc 0.52.0", + "windows_x86_64_gnu 0.52.0", + "windows_x86_64_gnullvm 0.52.0", + "windows_x86_64_msvc 0.52.0", ] [[package]] @@ -1706,42 +1890,84 @@ version = "0.48.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2b38e32f0abccf9987a4e3079dfb67dcd799fb61361e53e2882c3cbaf0d905d8" +[[package]] +name = "windows_aarch64_gnullvm" +version = "0.52.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cb7764e35d4db8a7921e09562a0304bf2f93e0a51bfccee0bd0bb0b666b015ea" + [[package]] name = "windows_aarch64_msvc" version = "0.48.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "dc35310971f3b2dbbf3f0690a219f40e2d9afcf64f9ab7cc1be722937c26b4bc" +[[package]] +name = "windows_aarch64_msvc" +version = "0.52.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bbaa0368d4f1d2aaefc55b6fcfee13f41544ddf36801e793edbbfd7d7df075ef" + [[package]] name = "windows_i686_gnu" version = "0.48.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a75915e7def60c94dcef72200b9a8e58e5091744960da64ec734a6c6e9b3743e" +[[package]] +name = "windows_i686_gnu" +version = "0.52.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a28637cb1fa3560a16915793afb20081aba2c92ee8af57b4d5f28e4b3e7df313" + [[package]] name = "windows_i686_msvc" version = "0.48.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8f55c233f70c4b27f66c523580f78f1004e8b5a8b659e05a4eb49d4166cca406" +[[package]] +name = "windows_i686_msvc" +version = "0.52.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ffe5e8e31046ce6230cc7215707b816e339ff4d4d67c65dffa206fd0f7aa7b9a" + [[package]] name = "windows_x86_64_gnu" version = "0.48.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "53d40abd2583d23e4718fddf1ebec84dbff8381c07cae67ff7768bbf19c6718e" +[[package]] +name = "windows_x86_64_gnu" +version = "0.52.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3d6fa32db2bc4a2f5abeacf2b69f7992cd09dca97498da74a151a3132c26befd" + [[package]] name = "windows_x86_64_gnullvm" version = "0.48.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0b7b52767868a23d5bab768e390dc5f5c55825b6d30b86c844ff2dc7414044cc" +[[package]] +name = "windows_x86_64_gnullvm" +version = "0.52.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1a657e1e9d3f514745a572a6846d3c7aa7dbe1658c056ed9c3344c4109a6949e" + [[package]] name = "windows_x86_64_msvc" version = "0.48.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ed94fce61571a4006852b7389a063ab983c02eb1bb37b47f8272ce92d06d9538" +[[package]] +name = "windows_x86_64_msvc" +version = "0.52.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dff9641d1cd4be8d1a070daf9e3773c5f67e78b4d9d42263020c057706765c04" + [[package]] name = "x509-parser" version = "0.15.1" @@ -1770,18 +1996,18 @@ dependencies = [ [[package]] name = "zerocopy" -version = "0.7.26" +version = "0.7.29" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e97e415490559a91254a2979b4829267a57d2fcd741a98eee8b722fb57289aa0" +checksum = "5d075cf85bbb114e933343e087b92f2146bac0d55b534cbb8188becf0039948e" dependencies = [ "zerocopy-derive", ] [[package]] name = "zerocopy-derive" -version = "0.7.26" +version = "0.7.29" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dd7e48ccf166952882ca8bd778a43502c64f33bf94c12ebe2a7f08e5a0f6689f" +checksum = "86cd5ca076997b97ef09d3ad65efe811fa68c9e874cb636ccb211223a813b0c2" dependencies = [ "proc-macro2", "quote", diff --git a/network/Cargo.toml b/network/Cargo.toml index 4b8b01c2b..1e26485e3 100644 --- a/network/Cargo.toml +++ b/network/Cargo.toml @@ -15,9 +15,10 @@ ed25519 = { version = "2.0", features = ["alloc", "pkcs8"] } ed25519-dalek = { version = "2.0" } futures-util = { version = "0.3", features = ["sink"] } hex = "0.4" +mini-moka = "0.10" pin-project-lite = "0.2" pkcs8 = "0.10" -quinn = { version = "0.10", features = ["runtime-tokio", "tls-rustls"] } +quinn = { version = "0.10", default-features = false, features = ["runtime-tokio", "tls-rustls"] } rand = "0.8" rcgen = "0.11" ring = "0.16" diff --git a/network/src/dht/mod.rs b/network/src/dht/mod.rs new file mode 100644 index 000000000..6dbda82dc --- /dev/null +++ b/network/src/dht/mod.rs @@ -0,0 +1,2 @@ +pub mod proto; +pub mod storage; diff --git a/network/src/dht/proto.rs b/network/src/dht/proto.rs new file mode 100644 index 000000000..70b54a2cc --- /dev/null +++ b/network/src/dht/proto.rs @@ -0,0 +1,212 @@ +use bytes::Bytes; +use tl_proto::{TlRead, TlWrite}; + +use crate::types::{AddressList, PeerId}; + +/// A signed DHT node info. +#[derive(Debug, Clone, TlRead, TlWrite)] +pub struct NodeInfo { + /// Node public key. + pub id: PeerId, + /// A list of possible peer addresses. + pub address_list: AddressList, + /// Unix timestamp when the entry was generated. + pub created_at: u32, + /// A `ed25519` signature of this entry. + #[tl(signature)] + pub signature: Bytes, +} + +/// Key for values that can only be updated by the owner. +#[derive(Debug, Clone, TlRead, TlWrite)] +#[tl(boxed, id = "dht.signedKey", scheme = "proto.tl")] +pub struct SignedKey { + /// Key name. + pub name: Bytes, + /// Key index (version). + pub idx: u32, + /// Public key of the owner. + pub peer_id: PeerId, + /// A `ed25519` signature of this entry. + #[tl(signature)] + pub signature: Bytes, +} + +/// Key for overlay-managed values. +#[derive(Debug, Clone, TlRead, TlWrite)] +#[tl(boxed, id = "dht.overlayKey", scheme = "proto.tl")] +pub struct OverlayKey { + /// Overlay id. + pub id: [u8; 32], + /// Key name. + pub name: Bytes, + /// Key index (version). + pub idx: u32, +} + +/// Value with a known owner. +#[derive(Debug, Clone, TlRead, TlWrite)] +#[tl(boxed, id = "dht.signedValue", scheme = "proto.tl")] +pub struct SignedValue { + /// Signed key. + pub key: SignedKey, + /// Any data. + pub data: Bytes, + /// Unix timestamp up to which this value is valid. + pub expires_at: u32, + /// A `ed25519` signature of this entry. + #[tl(signature)] + pub signature: Bytes, +} + +/// Overlay-managed value. +#[derive(Debug, Clone, TlRead, TlWrite)] +#[tl(boxed, id = "dht.overlayValue", scheme = "proto.tl")] +pub struct OverlayValue { + /// Overlay key. + pub key: OverlayKey, + /// Any data. + pub data: Bytes, + /// Unix timestamp up to which this value is valid. + pub expires_at: u32, +} + +/// Stored value. +#[derive(Debug, Clone)] +pub enum Value { + /// Value with a known owner. + Signed(SignedValue), + /// Overlay-managed value. + Overlay(OverlayValue), +} + +impl TlWrite for Value { + type Repr = tl_proto::Boxed; + + fn max_size_hint(&self) -> usize { + match self { + Self::Signed(value) => value.max_size_hint(), + Self::Overlay(value) => value.max_size_hint(), + } + } + + fn write_to

(&self, packet: &mut P) + where + P: tl_proto::TlPacket, + { + match self { + Self::Signed(value) => value.write_to(packet), + Self::Overlay(value) => value.write_to(packet), + } + } +} + +impl<'a> TlRead<'a> for Value { + type Repr = tl_proto::Boxed; + + fn read_from(packet: &'a [u8], offset: &mut usize) -> tl_proto::TlResult { + let id = u32::read_from(packet, offset)?; + *offset -= 4; + match id { + SignedValue::TL_ID => SignedValue::read_from(packet, offset).map(Self::Signed), + OverlayValue::TL_ID => OverlayValue::read_from(packet, offset).map(Self::Overlay), + _ => Err(tl_proto::TlError::UnknownConstructor), + } + } +} + +/// A response for the [`rpc::Store`] query. +#[derive(Debug, Clone, Copy, TlRead, TlWrite)] +#[tl(boxed, id = "dht.stored", scheme = "proto.tl")] +pub struct Stored; + +/// A response for the [`rpc::FindNode`] query. +#[derive(Debug, Clone, TlRead, TlWrite)] +#[tl(boxed, id = "dht.nodesFound", scheme = "proto.tl")] +pub struct NodeResponse { + /// List of nodes closest to the key. + pub nodes: Vec, +} + +/// A response for the [`rpc::FindValue`] query. +#[derive(Debug, Clone, TlRead, TlWrite)] +#[tl(boxed, scheme = "proto.tl")] +pub enum ValueResponse { + /// An existing value for the specified key. + #[tl(id = "dht.valueFound")] + Found(Value), + /// List of nodes closest to the key. + #[tl(id = "dht.valueNotFound")] + NotFound(Vec), +} + +/// A response for the [`rpc::GetNodeInfo`] query. +#[derive(Debug, Clone, TlRead, TlWrite)] +#[tl(boxed, id = "dht.nodeInfoFound", scheme = "proto.tl")] +pub struct NodeInfoResponse { + /// Signed node info. + pub info: NodeInfo, +} + +pub mod rpc { + use crate::types::RpcQuery; + + use super::*; + + /// Suggest a node to store that value. + /// + /// See [`Stored`]. + #[derive(Debug, Clone, TlRead, TlWrite)] + #[tl(boxed, id = "dht.store", scheme = "proto.tl")] + pub struct Store { + /// A value to store. + pub value: Value, + } + + impl RpcQuery for Store { + type Response = Stored; + } + + /// Search for `k` closest nodes. + /// + /// See [`NodeResponse`]. + #[derive(Debug, Clone, TlRead, TlWrite)] + #[tl(boxed, id = "dht.findNode", scheme = "proto.tl")] + pub struct FindNode { + /// Key hash. + pub key: [u8; 32], + /// Maximum number of nodes to return. + pub k: u32, + } + + impl RpcQuery for FindNode { + type Response = NodeResponse; + } + + /// Search for a value if stored or `k` closest nodes. + /// + /// See [`ValueResponse`]. + #[derive(Debug, Clone, TlRead, TlWrite)] + #[tl(boxed, id = "dht.findValue", scheme = "proto.tl")] + pub struct FindValue { + /// Key hash. + pub key: [u8; 32], + /// Maximum number of nodes to return. + pub k: u32, + } + + impl RpcQuery for FindValue { + type Response = ValueResponse; + } + + /// Requests a signed address list from the node. + /// + /// See [`NodeInfoResponse`]. + #[derive(Debug, Clone, TlRead, TlWrite)] + #[tl(boxed, id = "dht.getNodeInfo", scheme = "proto.tl")] + pub struct GetNodeInfo; + + impl RpcQuery for GetNodeInfo { + type Response = NodeInfoResponse; + } +} diff --git a/network/src/dht/storage.rs b/network/src/dht/storage.rs new file mode 100644 index 000000000..d6a98fdc3 --- /dev/null +++ b/network/src/dht/storage.rs @@ -0,0 +1,7 @@ +use mini_moka::sync::Cache; + +pub struct Storage { + _storage: Cache, +} + +pub type StorageKeyId = [u8; 32]; diff --git a/network/src/lib.rs b/network/src/lib.rs index 1e604f0a3..f7ed02019 100644 --- a/network/src/lib.rs +++ b/network/src/lib.rs @@ -1,6 +1,7 @@ pub mod config; pub mod connection; pub mod crypto; +pub mod dht; pub mod endpoint; pub mod network; pub mod proto; diff --git a/network/src/proto.tl b/network/src/proto.tl index c4add1e84..da780075f 100644 --- a/network/src/proto.tl +++ b/network/src/proto.tl @@ -99,6 +99,14 @@ dht.overlayValue key:dht.overlayKey data:bytes expires_at:int = dht.Value; dht.stored = dht.Stored; +/** +* A response for the `dht.findNode` query +* +* @param value a list of nodes with the shortest distances +*/ +dht.nodesFound nodes:dht.nodes = dht.NodeResponse; + + /** * A successful response for the `dht.findValue` query * @@ -112,6 +120,14 @@ dht.valueFound value:dht.Value = dht.ValueResponse; */ dht.valueNotFound nodes:dht.nodes = dht.ValueResponse; + +/* +* A response for the `dht.getNodeInfo` query +* +* @param info a signed node info +*/ +dht.nodeInfoFound info:dht.node = dht.NodeInfoResponse; + ---functions--- /** @@ -119,14 +135,14 @@ dht.valueNotFound nodes:dht.nodes = dht.ValueResponse; * * @param value value to store */ -dht.store value:dht.value = dht.Stored; +dht.store value:dht.Value = dht.Stored; /** * Searches for k closest nodes * * @param key key hash * @param k max length of the result list */ -dht.findNode key:int256 k:int = dht.Nodes; +dht.findNode key:int256 k:int = dht.NodeResponse; /** * Searches for a value if stored or k closest nodes * @@ -137,4 +153,4 @@ dht.findValue key:int256 k:int = dht.ValueResponse; /** * Requests a signed address list */ -dht.getNodeInfo = dht.Node; +dht.getNodeInfo = dht.NodeInfoResponse; diff --git a/network/src/types/mod.rs b/network/src/types/mod.rs index d99c16118..9f8fbac2b 100644 --- a/network/src/types/mod.rs +++ b/network/src/types/mod.rs @@ -4,9 +4,11 @@ use std::sync::Arc; pub use self::address::{Address, AddressList}; pub use self::peer_id::{Direction, PeerId}; +pub use self::rpc::RpcQuery; mod address; mod peer_id; +mod rpc; pub type FastDashMap = dashmap::DashMap; pub type FastHashMap = HashMap; diff --git a/network/src/types/peer_id.rs b/network/src/types/peer_id.rs index 18fa54fae..38abe4682 100644 --- a/network/src/types/peer_id.rs +++ b/network/src/types/peer_id.rs @@ -16,6 +16,18 @@ impl PeerId { } } +impl<'a> TlRead<'a> for &'a PeerId { + type Repr = tl_proto::Boxed; + + #[inline] + fn read_from(packet: &'a [u8], offset: &mut usize) -> tl_proto::TlResult { + if u32::read_from(packet, offset)? != PeerId::TL_ID { + return Err(tl_proto::TlError::UnknownConstructor); + } + <_>::read_from(packet, offset).map(PeerId::wrap) + } +} + impl std::fmt::Display for PeerId { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { let len = f.precision().unwrap_or(32); diff --git a/network/src/types/rpc.rs b/network/src/types/rpc.rs new file mode 100644 index 000000000..5dc2b062a --- /dev/null +++ b/network/src/types/rpc.rs @@ -0,0 +1,5 @@ +use tl_proto::{Boxed, TlRead}; + +pub trait RpcQuery { + type Response: for<'a> TlRead<'a, Repr = Boxed>; +} From 0958279df6e22dbc53f44a695329622f388990f8 Mon Sep 17 00:00:00 2001 From: Ivan Kalinin Date: Mon, 11 Dec 2023 18:55:03 +0100 Subject: [PATCH 04/35] Add dht storage --- Cargo.lock | 134 ++++++++++++++++++----- network/Cargo.toml | 4 +- network/src/dht/proto.rs | 26 ++++- network/src/dht/storage.rs | 199 ++++++++++++++++++++++++++++++++++- network/src/proto.tl | 2 - network/src/types/peer_id.rs | 5 + util/src/lib.rs | 2 +- util/src/time.rs | 6 ++ 8 files changed, 341 insertions(+), 37 deletions(-) create mode 100644 util/src/time.rs diff --git a/Cargo.lock b/Cargo.lock index 6c41e2164..2d0a9fcb1 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -242,6 +242,19 @@ dependencies = [ "crossbeam-utils", ] +[[package]] +name = "crossbeam-epoch" +version = "0.9.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ae211234986c545741a7dc064309f67ee1e5ad243d0e48335adc0484d960bcc7" +dependencies = [ + "autocfg", + "cfg-if", + "crossbeam-utils", + "memoffset", + "scopeguard", +] + [[package]] name = "crossbeam-utils" version = "0.8.16" @@ -270,7 +283,6 @@ dependencies = [ "cfg-if", "cpufeatures", "curve25519-dalek-derive", - "digest", "fiat-crypto", "platforms", "rustc_version", @@ -372,20 +384,6 @@ dependencies = [ "signature", ] -[[package]] -name = "ed25519-dalek" -version = "2.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1f628eaec48bfd21b865dc2950cfa014450c01d2fa2b69a86c2fd5844ec523c0" -dependencies = [ - "curve25519-dalek", - "ed25519", - "serde", - "sha2", - "subtle", - "zeroize", -] - [[package]] name = "errno" version = "0.3.8" @@ -405,6 +403,20 @@ dependencies = [ "version_check", ] +[[package]] +name = "everscale-crypto" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d3b3e4fc7882223c86a7cfd8ccdb58e017b89a9f91d90114beafa0e8d35b45fb" +dependencies = [ + "curve25519-dalek", + "generic-array", + "hex", + "rand", + "sha2", + "tl-proto", +] + [[package]] name = "fastrand" version = "2.0.1" @@ -561,6 +573,15 @@ version = "0.4.20" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b5e6163cb8c49088c2c36f57875e58ccd8c87c7427f7fbd50ea6710b2f3f2e8f" +[[package]] +name = "mach2" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6d0d1830bcd151a6fc4aea1369af235b36c1528fe976b8ff678683c9995eade8" +dependencies = [ + "libc", +] + [[package]] name = "matchers" version = "0.1.0" @@ -577,18 +598,12 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f665ee40bc4a3c5590afb1e9677db74a508659dfd71e126420da8274909a0167" [[package]] -name = "mini-moka" -version = "0.10.2" +name = "memoffset" +version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "23e0b72e7c9042467008b10279fc732326bd605459ae03bda88825909dd19b56" +checksum = "5a634b1c61a95585bd15607c6ab0c4e5b226e695ff2800ba0cdccddf208c406c" dependencies = [ - "crossbeam-channel", - "crossbeam-utils", - "dashmap", - "skeptic", - "smallvec", - "tagptr", - "triomphe", + "autocfg", ] [[package]] @@ -617,6 +632,27 @@ dependencies = [ "windows-sys 0.48.0", ] +[[package]] +name = "moka" +version = "0.12.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d8017ec3548ffe7d4cef7ac0e12b044c01164a74c0f3119420faeaf13490ad8b" +dependencies = [ + "crossbeam-channel", + "crossbeam-epoch", + "crossbeam-utils", + "once_cell", + "parking_lot", + "quanta", + "rustc_version", + "skeptic", + "smallvec", + "tagptr", + "thiserror", + "triomphe", + "uuid", +] + [[package]] name = "nom" version = "7.1.3" @@ -707,6 +743,16 @@ version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b15813163c1d831bf4a13c3610c05c0d03b39feb07f7e09fa234dac9b15aaf39" +[[package]] +name = "parking_lot" +version = "0.12.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3742b2c103b9f06bc9fff0a37ff4912935851bee6d36f3c02bcc755bcfec228f" +dependencies = [ + "lock_api", + "parking_lot_core", +] + [[package]] name = "parking_lot_core" version = "0.9.9" @@ -855,6 +901,22 @@ dependencies = [ "unicase", ] +[[package]] +name = "quanta" +version = "0.11.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a17e662a7a8291a865152364c20c7abc5e60486ab2001e8ec10b24862de0b9ab" +dependencies = [ + "crossbeam-utils", + "libc", + "mach2", + "once_cell", + "raw-cpuid", + "wasi", + "web-sys", + "winapi", +] + [[package]] name = "quinn" version = "0.10.2" @@ -941,6 +1003,15 @@ dependencies = [ "getrandom", ] +[[package]] +name = "raw-cpuid" +version = "10.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6c297679cb867470fa8c9f67dbba74a78d78e3e98d7cf2b08d6d71540f797332" +dependencies = [ + "bitflags 1.3.2", +] + [[package]] name = "rcgen" version = "0.11.3" @@ -1622,10 +1693,10 @@ dependencies = [ "bytes", "dashmap", "ed25519", - "ed25519-dalek", + "everscale-crypto", "futures-util", "hex", - "mini-moka", + "moka", "pin-project-lite", "pkcs8", "quinn", @@ -1713,6 +1784,15 @@ version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8ecb6da28b8a351d773b68d5825ac39017e680750f980f3a1a85cd8dd28a47c1" +[[package]] +name = "uuid" +version = "1.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5e395fcf16a7a3d8127ec99782007af141946b4795001f876d54fb0d55978560" +dependencies = [ + "getrandom", +] + [[package]] name = "valuable" version = "0.1.0" diff --git a/network/Cargo.toml b/network/Cargo.toml index 1e26485e3..00c31f74a 100644 --- a/network/Cargo.toml +++ b/network/Cargo.toml @@ -12,10 +12,10 @@ anyhow = "1.0" bytes = "1.0" dashmap = "5.4" ed25519 = { version = "2.0", features = ["alloc", "pkcs8"] } -ed25519-dalek = { version = "2.0" } +everscale-crypto = { version = "0.2", features = ["tl-proto"] } futures-util = { version = "0.3", features = ["sink"] } hex = "0.4" -mini-moka = "0.10" +moka = { version = "0.12", features = ["sync"] } pin-project-lite = "0.2" pkcs8 = "0.10" quinn = { version = "0.10", default-features = false, features = ["runtime-tokio", "tls-rustls"] } diff --git a/network/src/dht/proto.rs b/network/src/dht/proto.rs index 70b54a2cc..c200cef26 100644 --- a/network/src/dht/proto.rs +++ b/network/src/dht/proto.rs @@ -27,9 +27,6 @@ pub struct SignedKey { pub idx: u32, /// Public key of the owner. pub peer_id: PeerId, - /// A `ed25519` signature of this entry. - #[tl(signature)] - pub signature: Bytes, } /// Key for overlay-managed values. @@ -80,6 +77,29 @@ pub enum Value { Overlay(OverlayValue), } +impl Value { + pub fn key_name(&self) -> &[u8] { + match self { + Self::Signed(value) => value.key.name.as_ref(), + Self::Overlay(value) => value.key.name.as_ref(), + } + } + + pub const fn key_index(&self) -> u32 { + match self { + Self::Signed(value) => value.key.idx, + Self::Overlay(value) => value.key.idx, + } + } + + pub const fn expires_at(&self) -> u32 { + match self { + Self::Signed(value) => value.expires_at, + Self::Overlay(value) => value.expires_at, + } + } +} + impl TlWrite for Value { type Repr = tl_proto::Boxed; diff --git a/network/src/dht/storage.rs b/network/src/dht/storage.rs index d6a98fdc3..5065e200b 100644 --- a/network/src/dht/storage.rs +++ b/network/src/dht/storage.rs @@ -1,7 +1,202 @@ -use mini_moka::sync::Cache; +use std::time::Duration; + +use anyhow::Result; +use bytes::{Bytes, BytesMut}; +use moka::sync::{Cache, CacheBuilder}; +use moka::Expiry; +use tl_proto::TlWrite; +use tycho_util::time::*; + +use crate::dht::proto; + +type DhtCache = Cache; +type DhtCacheBuilder = CacheBuilder>; + +pub struct Builder { + cache_builder: DhtCacheBuilder, + max_ttl: Duration, + max_key_name_len: usize, + max_key_index: u32, + // TODO: add a hashset for allowed keys (maybe separate signed keys from overlay keys) +} + +impl Default for Builder { + fn default() -> Self { + Self { + cache_builder: Default::default(), + max_ttl: Duration::from_secs(3600), + max_key_name_len: 128, + max_key_index: 4, + } + } +} + +impl Builder { + pub fn build(self) -> Storage { + fn weigher(_key: &StorageKeyId, value: &StoredValue) -> u32 { + 32 + 4 + value.data.len() as u32 + } + + Storage { + cache: self + .cache_builder + .time_to_live(self.max_ttl) + .weigher(weigher) + .expire_after(ValueExpiry) + .build_with_hasher(ahash::RandomState::default()), + max_ttl_sec: self.max_ttl.as_secs().try_into().unwrap_or(u32::MAX), + max_key_name_len: self.max_key_name_len, + max_key_index: self.max_key_index, + } + } + + pub fn with_max_key_name_len(mut self, len: usize) -> Self { + self.max_key_name_len = len; + self + } + + pub fn with_max_key_index(mut self, index: u32) -> Self { + self.max_key_index = index; + self + } + + pub fn with_max_capacity(mut self, max_capacity: u64) -> Self { + self.cache_builder = self.cache_builder.max_capacity(max_capacity); + self + } + + pub fn with_max_ttl(mut self, ttl: Duration) -> Self { + self.max_ttl = ttl; + self + } + + pub fn with_max_idle(mut self, duration: Duration) -> Self { + self.cache_builder = self.cache_builder.time_to_idle(duration); + self + } +} pub struct Storage { - _storage: Cache, + cache: DhtCache, + max_ttl_sec: u32, + max_key_name_len: usize, + max_key_index: u32, +} + +impl Storage { + pub fn builder() -> Builder { + Builder::default() + } + + pub fn insert(&self, value: &proto::Value) -> Result { + match value.expires_at().checked_sub(now_sec()) { + Some(0) | None => return Err(StorageError::ValueExpired), + Some(remaining_ttl) if remaining_ttl > self.max_ttl_sec => { + return Err(StorageError::UnsupportedTtl) + } + _ => {} + } + + if !(0..=self.max_key_name_len).contains(&value.key_name().len()) + || value.key_index() > self.max_key_index + { + return Err(StorageError::InvalidKey); + } + + match value { + proto::Value::Signed(value) => self.insert_signed_value(value), + proto::Value::Overlay(value) => self.insert_overlay_value(value), + } + } + + fn insert_signed_value(&self, value: &proto::SignedValue) -> Result { + let Some(public_key) = value.key.peer_id.as_public_key() else { + return Err(StorageError::InvalidSignature); + }; + + if !matches!( + <&[u8; 64]>::try_from(value.signature.as_ref()), + Ok(signature) if public_key.verify(value, signature) + ) { + return Err(StorageError::InvalidSignature); + } + + Ok(self + .cache + .entry(tl_proto::hash(&value.key)) + .or_insert_with_if( + || StoredValue::new(value, value.expires_at), + |prev| prev.expires_at < value.expires_at, + ) + .is_fresh()) + } + + fn insert_overlay_value(&self, _value: &proto::OverlayValue) -> Result { + todo!() + } +} + +#[derive(Clone)] +struct StoredValue { + expires_at: u32, + data: Bytes, +} + +impl StoredValue { + fn new>(value: &T, expires_at: u32) -> Self { + let mut data = BytesMut::with_capacity(value.max_size_hint()); + value.write_to(&mut data); + + StoredValue { + expires_at, + data: data.freeze(), + } + } +} + +struct ValueExpiry; + +impl Expiry for ValueExpiry { + fn expire_after_create( + &self, + _key: &StorageKeyId, + value: &StoredValue, + _created_at: std::time::Instant, + ) -> Option { + Some(ttl_since_now(value.expires_at)) + } + + fn expire_after_update( + &self, + _key: &StorageKeyId, + value: &StoredValue, + _updated_at: std::time::Instant, + _duration_until_expiry: Option, + ) -> Option { + Some(ttl_since_now(value.expires_at)) + } +} + +fn ttl_since_now(expires_at: u32) -> Duration { + let now = std::time::SystemTime::now() + .duration_since(std::time::SystemTime::UNIX_EPOCH) + .unwrap(); + + Duration::from_secs(expires_at as u64).saturating_sub(now) } pub type StorageKeyId = [u8; 32]; + +#[derive(Debug, thiserror::Error)] +pub enum StorageError { + #[error("value expired")] + ValueExpired, + #[error("unsupported ttl")] + UnsupportedTtl, + #[error("invalid key")] + InvalidKey, + #[error("invalid signature")] + InvalidSignature, + #[error("value too big")] + ValueTooBig, +} diff --git a/network/src/proto.tl b/network/src/proto.tl index da780075f..84b67f2ce 100644 --- a/network/src/proto.tl +++ b/network/src/proto.tl @@ -55,13 +55,11 @@ dht.nodes nodes:(vector dht.node) = dht.Nodes; * @param name key name as UTF-8 string * @param idx key index used for versioning * @param peer_id owner id -* @param signature a ed25519 signature of this structure by the owner */ dht.signedKey name:bytes idx:int peer_id:transport.PeerId - signature:bytes = dht.Key; /** diff --git a/network/src/types/peer_id.rs b/network/src/types/peer_id.rs index 38abe4682..83b30cf41 100644 --- a/network/src/types/peer_id.rs +++ b/network/src/types/peer_id.rs @@ -1,3 +1,4 @@ +use everscale_crypto::ed25519; use tl_proto::{TlRead, TlWrite}; #[derive(Clone, Copy, TlRead, TlWrite, Hash, PartialEq, Eq, PartialOrd, Ord)] @@ -14,6 +15,10 @@ impl PeerId { pub fn as_bytes(&self) -> &[u8; 32] { &self.0 } + + pub fn as_public_key(&self) -> Option { + ed25519::PublicKey::from_bytes(self.0) + } } impl<'a> TlRead<'a> for &'a PeerId { diff --git a/util/src/lib.rs b/util/src/lib.rs index 8b1378917..077885d7b 100644 --- a/util/src/lib.rs +++ b/util/src/lib.rs @@ -1 +1 @@ - +pub mod time; diff --git a/util/src/time.rs b/util/src/time.rs new file mode 100644 index 000000000..ec8812309 --- /dev/null +++ b/util/src/time.rs @@ -0,0 +1,6 @@ +pub fn now_sec() -> u32 { + std::time::SystemTime::now() + .duration_since(std::time::UNIX_EPOCH) + .unwrap() + .as_secs() as u32 +} From 747fa191124c351cc78b89e410cade7b39aaadbf Mon Sep 17 00:00:00 2001 From: Ivan Kalinin Date: Mon, 18 Dec 2023 15:14:56 +0100 Subject: [PATCH 05/35] Add `OverlayValueMerger` --- Cargo.lock | 95 +++++++++++++++++++------------------- network/src/dht/storage.rs | 68 +++++++++++++++++++++++++-- 2 files changed, 112 insertions(+), 51 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 2d0a9fcb1..f902f4d99 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -204,9 +204,9 @@ checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" [[package]] name = "const-oid" -version = "0.9.5" +version = "0.9.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "28c122c3980598d243d63d9a704629a2d748d101f278052ff068be5a4423ab6f" +checksum = "c2459377285ad874054d797f3ccebf984978aa39129f6eafde5cdc8315b612f8" [[package]] name = "cpufeatures" @@ -234,9 +234,9 @@ checksum = "19d374276b40fb8bbdee95aef7c7fa6b5316ec764510eb64b8dd0e2ed0d7e7f5" [[package]] name = "crossbeam-channel" -version = "0.5.8" +version = "0.5.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a33c2bf77f2df06183c3aa30d1e96c0695a313d4f9c453cc3762a6db39f99200" +checksum = "14c3242926edf34aec4ac3a77108ad4854bffaa2e4ddc1824124ce59231302d5" dependencies = [ "cfg-if", "crossbeam-utils", @@ -244,22 +244,21 @@ dependencies = [ [[package]] name = "crossbeam-epoch" -version = "0.9.15" +version = "0.9.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ae211234986c545741a7dc064309f67ee1e5ad243d0e48335adc0484d960bcc7" +checksum = "2d2fe95351b870527a5d09bf563ed3c97c0cffb87cf1c78a591bf48bb218d9aa" dependencies = [ "autocfg", "cfg-if", "crossbeam-utils", "memoffset", - "scopeguard", ] [[package]] name = "crossbeam-utils" -version = "0.8.16" +version = "0.8.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5a22b2d63d4d1dc0b7f1b6b2747dd0088008a9be28b6ddf0b1e7d335e3037294" +checksum = "c06d96137f14f244c37f989d9fff8f95e6c18b918e71f36638f8c49112e4c78f" dependencies = [ "cfg-if", ] @@ -298,7 +297,7 @@ checksum = "f46882e17999c6cc590af592290432be3bce0428cb0d5f8b6715e4dc7b383eb3" dependencies = [ "proc-macro2", "quote", - "syn 2.0.39", + "syn 2.0.41", ] [[package]] @@ -371,7 +370,7 @@ checksum = "487585f4d0c6655fe74905e2504d8ad6908e4db67f744eb140876906c2f3175d" dependencies = [ "proc-macro2", "quote", - "syn 2.0.39", + "syn 2.0.41", ] [[package]] @@ -443,7 +442,7 @@ checksum = "53b153fd91e4b0147f4aced87be237c98248656bb01050b96bf3ee89220a8ddb" dependencies = [ "proc-macro2", "quote", - "syn 2.0.39", + "syn 2.0.41", ] [[package]] @@ -526,9 +525,9 @@ checksum = "7f24254aa9a54b5c858eaee2f5bccdb46aaf0e486a595ed5fd8f86ba55232a70" [[package]] name = "itoa" -version = "1.0.9" +version = "1.0.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "af150ab688ff2122fcef229be89cb50dd66af9e01a4ff320cc137eecc9bacc38" +checksum = "b1a46d1a171d865aa5f83f92695765caa047a9b4cbae2cbf37dbd613a793fd4c" [[package]] name = "js-sys" @@ -547,9 +546,9 @@ checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646" [[package]] name = "libc" -version = "0.2.150" +version = "0.2.151" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "89d92a4743f9a61002fae18374ed11e7973f530cb3a3255fb354818118b2203c" +checksum = "302d7ab3130588088d277783b1e2d2e10c9e9e4a16dd9050e6ec93fb3e7048f4" [[package]] name = "linux-raw-sys" @@ -733,9 +732,9 @@ dependencies = [ [[package]] name = "once_cell" -version = "1.18.0" +version = "1.19.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dd8b5dd2ae5ed71462c540258bedcb51965123ad7e7ccf4b9a8cafaa4a63576d" +checksum = "3fdb12b2476b595f9358c5161aa467c2438859caa136dec86c26fdd2efe17b92" [[package]] name = "overload" @@ -768,9 +767,9 @@ dependencies = [ [[package]] name = "pem" -version = "3.0.2" +version = "3.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3163d2912b7c3b52d651a055f2c7eec9ba5cd22d26ef75b8dd3a59980b185923" +checksum = "1b8fcc794035347fb64beda2d3b462595dd2753e3f268d89c5aae77e8cf2c310" dependencies = [ "base64", "serde", @@ -807,7 +806,7 @@ dependencies = [ "pest_meta", "proc-macro2", "quote", - "syn 2.0.39", + "syn 2.0.41", ] [[package]] @@ -838,7 +837,7 @@ checksum = "4359fd9c9171ec6e8c62926d6faaf553a8dc3f64e1507e76da7911b4f6a04405" dependencies = [ "proc-macro2", "quote", - "syn 2.0.39", + "syn 2.0.41", ] [[package]] @@ -1138,9 +1137,9 @@ dependencies = [ [[package]] name = "rustix" -version = "0.38.26" +version = "0.38.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9470c4bf8246c8daf25f9598dca807fb6510347b1e1cfa55749113850c79d88a" +checksum = "72e572a5e8ca657d7366229cdde4bd14c4eb5499a9573d4d366fe1b599daa316" dependencies = [ "bitflags 2.4.1", "errno", @@ -1151,9 +1150,9 @@ dependencies = [ [[package]] name = "rustls" -version = "0.21.9" +version = "0.21.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "629648aced5775d558af50b2b4c7b02983a04b312126d45eeead26e7caa498b9" +checksum = "f9d5a6813c0759e4609cd494e8e725babae6a2ca7b62a5536a13daaec6fcb7ba" dependencies = [ "log", "ring 0.17.7", @@ -1173,9 +1172,9 @@ dependencies = [ [[package]] name = "ryu" -version = "1.0.15" +version = "1.0.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1ad4cc8da4ef723ed60bced201181d83791ad433213d8c24efffda1eec85d741" +checksum = "f98d2aa92eebf49b69786be48e4477826b256916e84a57ff2a4f21923b48eb4c" [[package]] name = "same-file" @@ -1228,7 +1227,7 @@ checksum = "43576ca501357b9b071ac53cdc7da8ef0cbd9493d8df094cd821777ea6e894d3" dependencies = [ "proc-macro2", "quote", - "syn 2.0.39", + "syn 2.0.41", ] [[package]] @@ -1352,9 +1351,9 @@ dependencies = [ [[package]] name = "syn" -version = "2.0.39" +version = "2.0.41" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "23e78b90f2fcf45d3e842032ce32e3f2d1545ba6636271dcbf24fa306d87be7a" +checksum = "44c8b28c477cc3bf0e7966561e3460130e1255f7a1cf71931075f1c5e7a7e269" dependencies = [ "proc-macro2", "quote", @@ -1394,22 +1393,22 @@ dependencies = [ [[package]] name = "thiserror" -version = "1.0.50" +version = "1.0.51" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f9a7210f5c9a7156bb50aa36aed4c95afb51df0df00713949448cf9e97d382d2" +checksum = "f11c217e1416d6f036b870f14e0413d480dbf28edbee1f877abaf0206af43bb7" dependencies = [ "thiserror-impl", ] [[package]] name = "thiserror-impl" -version = "1.0.50" +version = "1.0.51" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "266b2e40bc00e5a6c09c3584011e08b06f123c00362c92b975ba9843aaaa14b8" +checksum = "01742297787513b79cf8e29d1056ede1313e2420b7b3b15d0a768b4921f549df" dependencies = [ "proc-macro2", "quote", - "syn 2.0.39", + "syn 2.0.41", ] [[package]] @@ -1489,7 +1488,7 @@ dependencies = [ "proc-macro2", "quote", "rustc-hash", - "syn 2.0.39", + "syn 2.0.41", "tl-scheme", ] @@ -1508,9 +1507,9 @@ dependencies = [ [[package]] name = "tokio" -version = "1.34.0" +version = "1.35.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d0c014766411e834f7af5b8f4cf46257aab4036ca95e9d2c144a10f59ad6f5b9" +checksum = "841d45b238a16291a4e1584e61820b8ae57d696cc5015c459c229ccc6990cc1c" dependencies = [ "backtrace", "bytes", @@ -1531,7 +1530,7 @@ checksum = "5b8a1e28f2deaa14e508979454cb3a223b10b938b45af148bc0986de36f1923b" dependencies = [ "proc-macro2", "quote", - "syn 2.0.39", + "syn 2.0.41", ] [[package]] @@ -1595,7 +1594,7 @@ checksum = "34704c8d6ebcbc939824180af020566b01a7c01f80641264eba0999f6c2b6be7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.39", + "syn 2.0.41", ] [[package]] @@ -1842,7 +1841,7 @@ dependencies = [ "once_cell", "proc-macro2", "quote", - "syn 2.0.39", + "syn 2.0.41", "wasm-bindgen-shared", ] @@ -1864,7 +1863,7 @@ checksum = "f0eb82fcb7930ae6219a7ecfd55b217f5f0893484b7a13022ebb2b2bf20b5283" dependencies = [ "proc-macro2", "quote", - "syn 2.0.39", + "syn 2.0.41", "wasm-bindgen-backend", "wasm-bindgen-shared", ] @@ -2076,22 +2075,22 @@ dependencies = [ [[package]] name = "zerocopy" -version = "0.7.29" +version = "0.7.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5d075cf85bbb114e933343e087b92f2146bac0d55b534cbb8188becf0039948e" +checksum = "1c4061bedbb353041c12f413700357bec76df2c7e2ca8e4df8bac24c6bf68e3d" dependencies = [ "zerocopy-derive", ] [[package]] name = "zerocopy-derive" -version = "0.7.29" +version = "0.7.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "86cd5ca076997b97ef09d3ad65efe811fa68c9e874cb636ccb211223a813b0c2" +checksum = "b3c129550b3e6de3fd0ba67ba5c81818f9805e58b8d7fee80a3a59d2c9fc601a" dependencies = [ "proc-macro2", "quote", - "syn 2.0.39", + "syn 2.0.41", ] [[package]] diff --git a/network/src/dht/storage.rs b/network/src/dht/storage.rs index 5065e200b..cad1cc4e1 100644 --- a/network/src/dht/storage.rs +++ b/network/src/dht/storage.rs @@ -1,3 +1,4 @@ +use std::sync::{Arc, Weak}; use std::time::Duration; use anyhow::Result; @@ -12,8 +13,25 @@ use crate::dht::proto; type DhtCache = Cache; type DhtCacheBuilder = CacheBuilder>; +pub trait OverlayValueMerger { + fn check_value(&self, new: &proto::OverlayValue) -> Result<(), StorageError>; + + fn merge_value(&self, new: &proto::OverlayValue, stored: &mut proto::OverlayValue) -> bool; +} + +impl OverlayValueMerger for () { + fn check_value(&self, _new: &proto::OverlayValue) -> Result<(), StorageError> { + Err(StorageError::InvalidKey) + } + + fn merge_value(&self, _new: &proto::OverlayValue, _stored: &mut proto::OverlayValue) -> bool { + false + } +} + pub struct Builder { cache_builder: DhtCacheBuilder, + overlay_value_merger: Weak, max_ttl: Duration, max_key_name_len: usize, max_key_index: u32, @@ -24,6 +42,7 @@ impl Default for Builder { fn default() -> Self { Self { cache_builder: Default::default(), + overlay_value_merger: Weak::<()>::new(), max_ttl: Duration::from_secs(3600), max_key_name_len: 128, max_key_index: 4, @@ -34,7 +53,9 @@ impl Default for Builder { impl Builder { pub fn build(self) -> Storage { fn weigher(_key: &StorageKeyId, value: &StoredValue) -> u32 { - 32 + 4 + value.data.len() as u32 + std::mem::size_of::() as u32 + + std::mem::size_of::() as u32 + + value.data.len() as u32 } Storage { @@ -44,12 +65,18 @@ impl Builder { .weigher(weigher) .expire_after(ValueExpiry) .build_with_hasher(ahash::RandomState::default()), + overlay_value_merger: self.overlay_value_merger, max_ttl_sec: self.max_ttl.as_secs().try_into().unwrap_or(u32::MAX), max_key_name_len: self.max_key_name_len, max_key_index: self.max_key_index, } } + pub fn with_overlay_value_merger(mut self, merger: &Arc) -> Self { + self.overlay_value_merger = Arc::downgrade(merger); + self + } + pub fn with_max_key_name_len(mut self, len: usize) -> Self { self.max_key_name_len = len; self @@ -78,6 +105,7 @@ impl Builder { pub struct Storage { cache: DhtCache, + overlay_value_merger: Weak, max_ttl_sec: u32, max_key_name_len: usize, max_key_index: u32, @@ -131,8 +159,42 @@ impl Storage { .is_fresh()) } - fn insert_overlay_value(&self, _value: &proto::OverlayValue) -> Result { - todo!() + fn insert_overlay_value(&self, value: &proto::OverlayValue) -> Result { + use std::borrow::Cow; + use std::cell::RefCell; + + let Some(merger) = self.overlay_value_merger.upgrade() else { + return Ok(false); + }; + + merger.check_value(value)?; + + let new_value = RefCell::new(Cow::Borrowed(value)); + + Ok(self + .cache + .entry(tl_proto::hash(&value.key)) + .or_insert_with_if( + || { + let value = new_value.borrow(); + StoredValue::new(value.as_ref(), value.expires_at) + }, + |prev| { + let Ok(mut prev) = tl_proto::deserialize::(&prev.data) + else { + // Invalid values are always replaced with new values + return true; + }; + + if merger.merge_value(value, &mut prev) { + *new_value.borrow_mut() = Cow::Owned(prev); + true + } else { + false + } + }, + ) + .is_fresh()) } } From a26233d821c0e6c7af91339c6c1c288f049f9404 Mon Sep 17 00:00:00 2001 From: Ivan Kalinin Date: Mon, 18 Dec 2023 20:39:02 +0100 Subject: [PATCH 06/35] wip: Routing --- network/src/dht/mod.rs | 1 + network/src/dht/routing.rs | 161 +++++++++++++++++++++++++++++++++++ network/src/types/peer_id.rs | 45 ++++++++++ 3 files changed, 207 insertions(+) create mode 100644 network/src/dht/routing.rs diff --git a/network/src/dht/mod.rs b/network/src/dht/mod.rs index 6dbda82dc..4b6f0156e 100644 --- a/network/src/dht/mod.rs +++ b/network/src/dht/mod.rs @@ -1,2 +1,3 @@ +pub mod routing; pub mod proto; pub mod storage; diff --git a/network/src/dht/routing.rs b/network/src/dht/routing.rs new file mode 100644 index 000000000..12ef9830a --- /dev/null +++ b/network/src/dht/routing.rs @@ -0,0 +1,161 @@ +use std::collections::{BTreeMap, VecDeque}; +use std::time::{Duration, Instant}; + +use crate::types::PeerId; + +pub struct Builder { + local_id: PeerId, + max_k: usize, + node_timeout: Duration, +} + +impl Builder { + pub fn build(self) -> RoutingTable { + RoutingTable { + local_id: self.local_id, + buckets: BTreeMap::default(), + max_k: self.max_k, + node_timeout: self.node_timeout, + } + } + + pub fn with_node_timeout(mut self, timeout: Duration) -> Self { + self.node_timeout = timeout; + self + } +} + +pub struct RoutingTable { + local_id: PeerId, + buckets: BTreeMap, + max_k: usize, + node_timeout: Duration, +} + +impl RoutingTable { + pub fn builder(local_id: PeerId) -> Builder { + Builder { + local_id, + max_k: 20, + node_timeout: Duration::from_secs(15 * 60), + } + } + + pub fn add(&mut self, key: &PeerId) -> bool { + let distance = distance(&self.local_id, key); + if distance == 0 { + return false; + } + + self.buckets + .entry(distance) + .or_insert_with(|| Bucket::with_capacity(self.max_k)) + .insert(key, self.max_k, &self.node_timeout) + } + + pub fn remove(&mut self, key: &PeerId) -> bool { + let distance = distance(&self.local_id, key); + if let Some(bucket) = self.buckets.get_mut(&distance) { + bucket.remove(key) + } else { + false + } + } + + pub fn closest(&self, key: &PeerId, count: usize) -> Vec { + let index = self.get_bucket_index(key); + let mut result = Vec::::new(); + + { + let (first, second) = self.buckets[index].nodes.as_slices(); + result.extend_from_slice(first); + result.extend_from_slice(second); + } + + result + } + + fn get_bucket_index(&mut self, key: &PeerId) -> usize { + debug_assert!(!self.buckets.is_empty()); + std::cmp::min(self.local_id.common_prefix_len(key), self.buckets.len() - 1) + } +} + +struct Bucket { + nodes: VecDeque, +} + +impl Bucket { + fn with_capacity(capacity: usize) -> Self { + Self { + nodes: VecDeque::with_capacity(capacity), + } + } + + fn insert(&mut self, key: &PeerId, max_k: usize, timeout: &Duration) -> bool { + if let Some(index) = self.nodes.iter_mut().position(|node| &node.id == key) { + self.nodes.remove(index); + } else if self.nodes.len() >= max_k { + if matches!(self.nodes.front(), Some(node) if node.is_expired(timeout)) { + self.nodes.pop_front(); + } else { + return false; + } + } + + self.nodes.push_back(Node::new(key)); + true + } + + fn remove(&mut self, key: &PeerId) -> bool { + if let Some(index) = self.nodes.iter().position(|node| &node.id == key) { + self.nodes.remove(index); + true + } else { + false + } + } + + fn contains(&self, key: &PeerId) -> bool { + self.nodes.iter().any(|node| &node.id == key) + } + + fn is_empty(&self) -> bool { + self.nodes.is_empty() + } +} + +struct Node { + id: PeerId, + last_updated_at: Instant, +} + +impl Node { + fn new(peer_id: &PeerId) -> Self { + Self { + id: *peer_id, + last_updated_at: Instant::now(), + } + } + + fn is_expired(&self, timeout: &Duration) -> bool { + &self.last_updated_at.elapsed() >= timeout + } +} + +pub fn distance(left: &PeerId, right: &PeerId) -> usize { + const MAX_DISTANCE: usize = 256; + + for (i, (left, right)) in std::iter::zip(left.0.chunks(8), right.0.chunks(8)).enumerate() { + let left = u64::from_be_bytes(left.try_into().unwrap()); + let right = u64::from_be_bytes(right.try_into().unwrap()); + let diff = left ^ right; + if diff != 0 { + return MAX_DISTANCE - i * 64 + diff.leading_zeros() as usize; + } + } + + 0 +} + +const MAX_BUCKET_SIZE_K: usize = 20; diff --git a/network/src/types/peer_id.rs b/network/src/types/peer_id.rs index 83b30cf41..0057594da 100644 --- a/network/src/types/peer_id.rs +++ b/network/src/types/peer_id.rs @@ -49,6 +49,51 @@ impl std::fmt::Debug for PeerId { } } +impl std::ops::BitXor for PeerId { + type Output = PeerId; + + #[inline] + fn bitxor(mut self, rhs: PeerId) -> Self::Output { + self ^= rhs; + self + } +} + +impl std::ops::BitXor<&PeerId> for PeerId { + type Output = PeerId; + + #[inline] + fn bitxor(mut self, rhs: &PeerId) -> Self::Output { + self ^= rhs; + self + } +} + +impl std::ops::BitXor<&PeerId> for &PeerId { + type Output = PeerId; + + #[inline] + fn bitxor(self, rhs: &PeerId) -> Self::Output { + *self ^ rhs + } +} + +impl std::ops::BitXorAssign for PeerId { + #[inline] + fn bitxor_assign(&mut self, rhs: PeerId) { + std::ops::BitXorAssign::bitxor_assign(self, &rhs) + } +} + +impl std::ops::BitXorAssign<&PeerId> for PeerId { + #[inline] + fn bitxor_assign(&mut self, rhs: &PeerId) { + for (left, right) in self.0.iter_mut().zip(&rhs.0) { + *left ^= right; + } + } +} + #[derive(Debug, Clone, Copy, Hash, PartialEq, Eq)] pub enum Direction { Inbound, From 0a9c05fef2adc3c1f1c6f4b9242f6efc168f5f77 Mon Sep 17 00:00:00 2001 From: Ivan Kalinin Date: Wed, 20 Dec 2023 14:15:12 +0100 Subject: [PATCH 07/35] Fix routing table --- network/src/dht/routing.rs | 280 +++++++++++++++++++++++++++++++++-- network/src/types/peer_id.rs | 16 ++ 2 files changed, 283 insertions(+), 13 deletions(-) diff --git a/network/src/dht/routing.rs b/network/src/dht/routing.rs index 12ef9830a..57fb65300 100644 --- a/network/src/dht/routing.rs +++ b/network/src/dht/routing.rs @@ -63,21 +63,46 @@ impl RoutingTable { } pub fn closest(&self, key: &PeerId, count: usize) -> Vec { - let index = self.get_bucket_index(key); - let mut result = Vec::::new(); + let count = count.min(self.max_k); + if count == 0 { + return Vec::new(); + } - { - let (first, second) = self.buckets[index].nodes.as_slices(); - result.extend_from_slice(first); - result.extend_from_slice(second); + // TODO: fill secure and unsecure buckets in parallel + let mut result = Vec::with_capacity(count); + let distance = distance(&self.local_id, key); + + // Search for closest nodes first + for i in (distance..=MAX_DISTANCE).chain((0..distance).rev()) { + let remaining = match count.checked_sub(result.len()) { + None | Some(0) => break, + Some(n) => n, + }; + + if let Some(bucket) = self.buckets.get(&i) { + for node in bucket.nodes.iter().take(remaining) { + result.push(node.id); + } + } } result } - fn get_bucket_index(&mut self, key: &PeerId) -> usize { - debug_assert!(!self.buckets.is_empty()); - std::cmp::min(self.local_id.common_prefix_len(key), self.buckets.len() - 1) + pub fn is_empty(&self) -> bool { + self.buckets.values().all(Bucket::is_empty) + } + + pub fn len(&self) -> usize { + self.buckets.values().map(|bucket| bucket.nodes.len()).sum() + } + + pub fn contains(&self, key: &PeerId) -> bool { + let distance = distance(&self.local_id, key); + self.buckets + .get(&distance) + .map(|bucket| bucket.contains(key)) + .unwrap_or_default() } } @@ -144,18 +169,247 @@ impl Node { } pub fn distance(left: &PeerId, right: &PeerId) -> usize { - const MAX_DISTANCE: usize = 256; - for (i, (left, right)) in std::iter::zip(left.0.chunks(8), right.0.chunks(8)).enumerate() { let left = u64::from_be_bytes(left.try_into().unwrap()); let right = u64::from_be_bytes(right.try_into().unwrap()); let diff = left ^ right; if diff != 0 { - return MAX_DISTANCE - i * 64 + diff.leading_zeros() as usize; + return MAX_DISTANCE - (i * 64 + diff.leading_zeros() as usize); } } 0 } -const MAX_BUCKET_SIZE_K: usize = 20; +const MAX_DISTANCE: usize = 256; + +#[cfg(test)] +mod tests { + use std::str::FromStr; + + use super::*; + + #[test] + fn buckets_are_sets() { + let mut table = RoutingTable::builder(PeerId::random()).build(); + + let peer = PeerId::random(); + assert!(table.add(&peer)); + assert!(table.add(&peer)); // returns true because the node was updated + assert_eq!(table.len(), 1); + } + + #[test] + fn sould_not_add_seld() { + let local_id = PeerId::random(); + let mut table = RoutingTable::builder(local_id).build(); + + assert!(!table.add(&local_id)); + assert!(table.is_empty()); + } + + #[test] + fn max_k_per_bucket() { + let k = 20; + let timeout = Duration::MAX; + let mut bucket = Bucket::with_capacity(k); + + for _ in 0..k { + assert!(bucket.insert(&PeerId::random(), k, &timeout)); + } + assert!(!bucket.insert(&PeerId::random(), k, &timeout)); + } + + #[test] + fn find_closest_nodes() { + let ids = [ + "4a76f9bc07ca82a9a60a198de13721283649b0d1e3eada12e717e922a02e5bb3", + "0f542142194b68e262a715791380574fe1ba59a440372bb48a6021cadcbe0c80", + "95e594066d545fe55f3a7da54065f12bfade3205480f2f0c48ea4ab23af955c9", + "ceec84c6726f140200dfe4b206d46eee82ee94f4539ad5579070ba59d4748065", + "ef02b1fda8ca4082168a925f8e4f1382764fc8650f5945c64c57a54741fd45b1", + "d2778cf6161b43fbd552902be3ab56d2059e8e4ab2563b7b54e2f3dc37735686", + "bd1ab6dcb76bdef6da7f7fb3fcc1d187638e67acf19654157074c0052c267fe1", + "2709f88a1cda61b92f3036e69a7bcee273721a89e1bcbfa5bf705efcfd66ea5e", + "cb6eeb5680c581bfab2da1d9c2dbeae43ce27d8c59179f6a2e75c9c63a044db6", + "75a8edc3ac6fd40dcb3ec28ef27886225dfe267745c5ca237c036f6d60a06c7f", + "1e7de617e4fd4cd5a5c6c4258dbf24561099e8cb233577a1e2a875f77260a7ab", + "138f06d98756b78695d115e4cacdb56069f3564ac48f49241d9d61b177948b37", + "e0e608b434424cfbe6b7995f8b1dec9d8d08cf9e93aa59b8e36fd6f9f2239538", + "236286b8f8c388ea0877201fd1301e701b474c46f7b5df329fbd3289602074e9", + "6660dc422459c1e1003a8cdcbd5e3fd722df33e67e26072b582ee8c46c5ad5e9", + "19b32fcbf5b45bd3679ce5c9e22b11b57a5fcf56a746ff5426857644ccbc272a", + "fb8c40aaa92e4910a7a47d547c290c598b5aa357a0b96fc3815d7710f682b69c", + "6cf33e51fa4e0cef076c79bd90e1b50eb4be2cb70a1d0a275bd7aa2760a73e4e", + "1c72b8583ac3947718238a23863283a2fe8aedc2581d5538f9830441ad3bf84c", + "c52600bc1018e595739d45433ea4c59ce36fea5242c3c46270c3e61af3f24c26", + "1127d91d128f383f528e5e8b24bfc683368fd22d3e7e185ac50816f1d51726f4", + "1d16bbaf7d432418ad0f799d71cdfea0a865f50a3b02543dc846e4797bdcf30d", + "74ce456b7e956c407625c24522ef5851ae3e2d5630603ff60427fe682e3419ea", + "12dcaae7276b99335b3b75ae8fd04ce378c5e5c7b0143efd04c3d800e4345061", + "6f9bde29ef1eae90896e1fb50fe19df9f342af97018e67dae7c8f721280f4243", + "a8caf1325b29fc49c465b6b8bd6cfc2cbb2d4809153e307956c336b0d4bbd816", + "b4b5d8eb4c39345dd8bea701a541e8fb4df827964aa3796bad122652ddd5be1e", + "9f812affedd280a6c13d8916e3f933a7d52d5814bc3c6218a31bfe37cce3befa", + "beec74f32c5c9b83df70aa0df4e3f5abea71726b2edc123a8bb453ddf3d2de90", + "d2f0f2e684c6578e60794fee51d3bcb484627bb384027bd07a442216f1a38893", + "956b9e26da6a429e70828b777d6b843662f1172258129f20273bea63d4c8a467", + "88361b564dc7d147e411087ac891e8565aadd5b27a2e92a7a7bd7107d02b5fdc", + "52593e3c6739d16f22723840225a804e0b9a90a86b29cb470345cc5559b6ac49", + "7289912a703a94fc0df53a80d6e23a8195f4dd04d67e45d31a0426dcc3d5d1b1", + "ae7c0ca443cf94dd0ee54a0bb7678aa2a82712da2da87813363ff8232ca3a363", + "db2328dc4fee7c9a888cf3c4099a9e1eb0d5c43d52702898c27ff442307c1934", + "965d913e0de7251d12985467e9adc9fb4ba87988307cc9e2b940712a818caacd", + "ba714e28cf5f87e84a6ff8e3187db1ffe0d5788c74067cb8d90bcea184600afa", + "beb3c47ee72dc88438d694947a619200dfc000dccc5239f719096e42600524ab", + "882a587dc9f47a0c40074f4232ff67a61865365c626999aff9526d34955757a2", + "f2ad154d811d2e019d63f8e4f824fba7b72ff13a7e97da12cf717a76ea91273e", + "45e5e550116a9f49bd25a86120ae1f40f85b2611bd9a45dd8a98a9f9dd647dc6", + "d5813e9a7a9445b68839db5e7a95e7125090e4ac763cbe32812ae0c5002d1a58", + "4214d98c9bf2166cc41ef9cf0c37fac68b685358f638e36f2297f426c04f91b4", + "7cc1af0803f8fea2015577e1a5510310cad5b136d5924919b9e533e66c648e2d", + "f62ca6a41fa5ed4be443d5b326166a3dafc2c0b1d7dbfcbc709ed22bfddf28a2", + "c91581e33de7a3e1404c58f559e5d6b4438f27d8bedfbb357b8f064ed86df1f8", + "d1ac225a8bfaba82776b6da70010d66b29a876385bacc4a4b365d6ffbeeacc86", + "8c8b75aeeff02c3b88394fe18e7a65534da1b00b36f9446061f7d995484e6177", + "f7d172ff80f4e451f04ba73e279286f2a4707e290ec4268bc16fe94277c7f733", + "3205396db7242347cfa75c796839cf5afb7961a9acb01f650c163fb86b332097", + "7ba5e8fee0239cc2c499161aaecf89d4fa3ebc76b7c8c2d1b305d3309121ca4d", + "c3c05d9e1d51c2d87d6eb4144a726eec697999ba21552951e9c4eefc07f35df4", + "771594e90ff55c810a697d901027ca73e286a8977ea19432e95e28761be19319", + "efcf3927f3456a8eb5e87a2f1d5c582c2bb97336455edad53ce10dcfdbe79420", + "e96dc8c885a3fc866597c8f3b243b011eb928b81bd4accd4fe08d9277a468b75", + "b2797ca70e15d10f8079c527ad13da29af6e261b75f6ccbb5908b6e4e7c7dc87", + "757fe465b20ac4614df7cca23fb3038848fd7fbd0d59afb8800f5e9f212acf40", + "d2bf6ff26de798c1e8944c6c8a39c22b2299e3192fd3a83347a72d7ec4f80071", + "30dbca20ebf6c7f4cdcd8ccf0ce78ae858fd3b296b033ff305559896cb22f54f", + "0a99ceb98807d4f3d217e4b71a7b0cbeb3f79f954088c4f143e1cf046e2132f6", + "227c54051f6872cae600a006eb0e6840ba3903e61a52a18f4a31e4248c7a68eb", + "79799ee7e4e0c5d90d900d9e6a1a4b07ec9f0a565a64e109bca4768a26d095b3", + "2f548b927815ada03b49befad9fc5019d1607f8e3219dd5fa1510b8ae493f064", + "f146a459753a2fb80f3ff5278f9d1bd33734442fa5048e6e0c97d2ae060f1798", + "272dc41968edb8784e34ad71a9b7b06a5a5a200b8df1d14c6b68e6451e27c922", + "5db66920b3d006733c1eb10666b28d83929eede48a7b1fc8f690da2660464c62", + "99019fa36fe000eeafca8efd5fa5c0e77a3a4ed77a4d7ae526cbc71e57026d06", + "c2a0c8b2132ef0db36420eef9f5f0f87da43b01cd78a734bb82e55515f8ffd1d", + "f0c4dac4e62b132b3c3f6086d691c2bf710f1b47e1914eed3fc0a3d4176338a3", + "4f57644cf2f94cb9f547ad1043f8cc439bd7d47cb31748d68ca79b9bc411f99b", + "4ed89565bcd28fa1637fd30b474184c289dc8d326dc4fa052be131b8900b338d", + "b1eb827b1e0b7ca81df1590a5f29818e53a8156634653ae0c02cf3c2a4bb2bde", + "4fa40df71e0237d39d8cc769c2e7252bf741abc755995bbadd6a7e8f95ab1694", + "92398a19157e20036d1e9baffb360096524ae045316e988bf5365e0514183e9c", + "7ca701bffa4a52902298fbe7a7cd383360049cf5fc8201efe17470fd8bbdc7ee", + "e823a52f49062a18c7f2622ced876ca17985d84e20d278935c230847e5560ed1", + "712a228b32fb45b91c9691e73daa96fa0136c85796d0cd802905de7b36da5c99", + "9475a23f0eb50d1573bc6032db822dfda0885bea1eb096cd65eee3bb292c7567", + "6da8d09bc9115d799efdc7e77b7e488dfd87e566e440fb9f591a1257b7914c9d", + "f1ca9e1623356604a00f1982837fe10d634b3f758c5b72d5f543548e616d95e5", + "4e97df7376a778ef083de064d09d9ddd60c42d382bd7d53a721fecdb1e6fba2a", + "dd429467062dcb9e51832f6c6ab55a361615f56e8be7aed600292241684a8133", + "0fc4aed5ebbd23755b4e250bcbc44a5accd3a64b3cf9078da1c02cb53dc8c196", + "8d70a1319a085c4d1c22eac63335085d2c0ddf1a4ffb5b7d93c8a796679e2463", + "f873f50e465c834e2819d104d9dc904f8a32b3f09eb6a880b8669cf08247913e", + "69870545d1b886222d4b968aa14c70c0bfa436893e5a6894749e964cd760069c", + "d5b590ec2b93d9b78e225b254121630ccfaec13be57a1dbf7c915cd922e08d75", + "2abfc539a31361ee6e830b82149c33c898d4bcd3dea6127b930c05ce354dd474", + "1a34e99b9561406f55c9eb5c28965ae1458a6573abb6143f2ca793ccd3bcb7c6", + "5bfe3ac277824dd2d093eeb7241fa8011bbd4dc90ebbad7cce3d055b15524c0b", + "304884f6ea7d01bfa294edc27562c2ebe660e810087f7b962c168b1b967a8d74", + "272b32b839b80f4e7c980577ebc41d8d729d8bed66db9522d69f3851a186fbeb", + "77f06ed2f83251c47af7afef00e9d96729a7d30388fdbe067987a333ea085ede", + "a942f1858af47d7347696427504b9eafa94af58452fa4a26fcc1a99ed70e78b6", + "500de9b4be309b5fa9074e856a473090419c2a131e3823636abe0d538e18c712", + "c30e59f93b5c3a801a31016d2e17e5c7fb5bd525165827466925e8a8cc4dbcd9", + "ffce42b385ed2abdc6eef6b88fd963522b57bfea2f9c7f6b230eb1c518912edf", + "750b037a6a8b833ee976ce27120e31807b644626e526a5e4fff3bfcfeed374dd", + "93a756cd44f530a9a072b6573421ba1ade3a7fe35494a2fc308da2ed58c1a7f7", + ]; + let ids = ids + .into_iter() + .map(PeerId::from_str) + .collect::, _>>() + .unwrap(); + + let local_id = + PeerId::from_str("bdbc554024c65b463b0f0a01037b55985190f4fc01c47dc81c19aab4b4b2d9ab") + .unwrap(); + + let mut table = RoutingTable::builder(local_id).build(); + for id in ids { + table.add(&id); + } + + { + let expected_closest_ids = [ + "882a587dc9f47a0c40074f4232ff67a61865365c626999aff9526d34955757a2", + "88361b564dc7d147e411087ac891e8565aadd5b27a2e92a7a7bd7107d02b5fdc", + "8c8b75aeeff02c3b88394fe18e7a65534da1b00b36f9446061f7d995484e6177", + "92398a19157e20036d1e9baffb360096524ae045316e988bf5365e0514183e9c", + "9475a23f0eb50d1573bc6032db822dfda0885bea1eb096cd65eee3bb292c7567", + "956b9e26da6a429e70828b777d6b843662f1172258129f20273bea63d4c8a467", + "95e594066d545fe55f3a7da54065f12bfade3205480f2f0c48ea4ab23af955c9", + "965d913e0de7251d12985467e9adc9fb4ba87988307cc9e2b940712a818caacd", + "99019fa36fe000eeafca8efd5fa5c0e77a3a4ed77a4d7ae526cbc71e57026d06", + "9f812affedd280a6c13d8916e3f933a7d52d5814bc3c6218a31bfe37cce3befa", + "a8caf1325b29fc49c465b6b8bd6cfc2cbb2d4809153e307956c336b0d4bbd816", + "a942f1858af47d7347696427504b9eafa94af58452fa4a26fcc1a99ed70e78b6", + "ae7c0ca443cf94dd0ee54a0bb7678aa2a82712da2da87813363ff8232ca3a363", + "b1eb827b1e0b7ca81df1590a5f29818e53a8156634653ae0c02cf3c2a4bb2bde", + "b2797ca70e15d10f8079c527ad13da29af6e261b75f6ccbb5908b6e4e7c7dc87", + "b4b5d8eb4c39345dd8bea701a541e8fb4df827964aa3796bad122652ddd5be1e", + "ba714e28cf5f87e84a6ff8e3187db1ffe0d5788c74067cb8d90bcea184600afa", + "bd1ab6dcb76bdef6da7f7fb3fcc1d187638e67acf19654157074c0052c267fe1", + "beb3c47ee72dc88438d694947a619200dfc000dccc5239f719096e42600524ab", + "beec74f32c5c9b83df70aa0df4e3f5abea71726b2edc123a8bb453ddf3d2de90", + ]; + let expected_closest_ids = expected_closest_ids + .into_iter() + .map(PeerId::from_str) + .collect::, _>>() + .unwrap(); + + let mut closest = table.closest(&local_id, 20); + closest.sort(); + assert_eq!(closest, expected_closest_ids); + } + + { + let expected_closest_ids = [ + "c3c05d9e1d51c2d87d6eb4144a726eec697999ba21552951e9c4eefc07f35df4", + "c52600bc1018e595739d45433ea4c59ce36fea5242c3c46270c3e61af3f24c26", + "c91581e33de7a3e1404c58f559e5d6b4438f27d8bedfbb357b8f064ed86df1f8", + "cb6eeb5680c581bfab2da1d9c2dbeae43ce27d8c59179f6a2e75c9c63a044db6", + "ceec84c6726f140200dfe4b206d46eee82ee94f4539ad5579070ba59d4748065", + "d1ac225a8bfaba82776b6da70010d66b29a876385bacc4a4b365d6ffbeeacc86", + "d2778cf6161b43fbd552902be3ab56d2059e8e4ab2563b7b54e2f3dc37735686", + "d2bf6ff26de798c1e8944c6c8a39c22b2299e3192fd3a83347a72d7ec4f80071", + "d2f0f2e684c6578e60794fee51d3bcb484627bb384027bd07a442216f1a38893", + "d5813e9a7a9445b68839db5e7a95e7125090e4ac763cbe32812ae0c5002d1a58", + "db2328dc4fee7c9a888cf3c4099a9e1eb0d5c43d52702898c27ff442307c1934", + "e0e608b434424cfbe6b7995f8b1dec9d8d08cf9e93aa59b8e36fd6f9f2239538", + "e96dc8c885a3fc866597c8f3b243b011eb928b81bd4accd4fe08d9277a468b75", + "ef02b1fda8ca4082168a925f8e4f1382764fc8650f5945c64c57a54741fd45b1", + "efcf3927f3456a8eb5e87a2f1d5c582c2bb97336455edad53ce10dcfdbe79420", + "f146a459753a2fb80f3ff5278f9d1bd33734442fa5048e6e0c97d2ae060f1798", + "f2ad154d811d2e019d63f8e4f824fba7b72ff13a7e97da12cf717a76ea91273e", + "f62ca6a41fa5ed4be443d5b326166a3dafc2c0b1d7dbfcbc709ed22bfddf28a2", + "f7d172ff80f4e451f04ba73e279286f2a4707e290ec4268bc16fe94277c7f733", + "fb8c40aaa92e4910a7a47d547c290c598b5aa357a0b96fc3815d7710f682b69c", + ]; + let expected_closest_ids = expected_closest_ids + .into_iter() + .map(PeerId::from_str) + .collect::, _>>() + .unwrap(); + + let target = PeerId::from_str( + "d41f603e6bd24f1c3e2eb4d97d81fd155dd307f5b5c9be443a1a229bd1392b72", + ) + .unwrap(); + + let mut closest = table.closest(&target, 20); + closest.sort(); + assert_eq!(closest, expected_closest_ids); + } + } +} diff --git a/network/src/types/peer_id.rs b/network/src/types/peer_id.rs index 0057594da..a8642cf49 100644 --- a/network/src/types/peer_id.rs +++ b/network/src/types/peer_id.rs @@ -1,4 +1,7 @@ +use std::str::FromStr; + use everscale_crypto::ed25519; +use rand::Rng; use tl_proto::{TlRead, TlWrite}; #[derive(Clone, Copy, TlRead, TlWrite, Hash, PartialEq, Eq, PartialOrd, Ord)] @@ -19,6 +22,10 @@ impl PeerId { pub fn as_public_key(&self) -> Option { ed25519::PublicKey::from_bytes(self.0) } + + pub fn random() -> Self { + Self(rand::thread_rng().gen()) + } } impl<'a> TlRead<'a> for &'a PeerId { @@ -49,6 +56,15 @@ impl std::fmt::Debug for PeerId { } } +impl FromStr for PeerId { + type Err = hex::FromHexError; + + fn from_str(s: &str) -> Result { + let mut peer_id = PeerId([0; 32]); + hex::decode_to_slice(s, &mut peer_id.0).map(|_| peer_id) + } +} + impl std::ops::BitXor for PeerId { type Output = PeerId; From aeb9e6766da02e00e91fa9feb67f2fff1744cbb4 Mon Sep 17 00:00:00 2001 From: Ivan Kalinin Date: Wed, 20 Dec 2023 16:00:40 +0100 Subject: [PATCH 08/35] Fix public visibility --- network/src/config.rs | 4 +- network/src/dht/mod.rs | 24 ++++++- network/src/dht/routing.rs | 8 +-- network/src/dht/storage.rs | 41 +++++++----- network/src/lib.rs | 24 ++++--- network/src/network/connection_manager.rs | 8 +-- network/src/network/mod.rs | 73 ++++++++++++++++------ network/src/network/peer.rs | 2 +- network/src/network/request_handler.rs | 12 ++-- network/src/{proto.rs => network/wire.rs} | 0 network/src/{dht/proto.rs => proto/dht.rs} | 1 + network/src/types/peer_id.rs | 2 +- 12 files changed, 136 insertions(+), 63 deletions(-) rename network/src/{proto.rs => network/wire.rs} (100%) rename network/src/{dht/proto.rs => proto/dht.rs} (99%) diff --git a/network/src/config.rs b/network/src/config.rs index 10a50f7ad..92c608788 100644 --- a/network/src/config.rs +++ b/network/src/config.rs @@ -90,7 +90,7 @@ impl QuicConfig { } } -pub struct EndpointConfig { +pub(crate) struct EndpointConfig { pub peer_id: PeerId, pub service_name: String, pub client_cert: rustls::Certificate, @@ -124,7 +124,7 @@ impl EndpointConfig { } } -pub struct EndpointConfigBuilder { +pub(crate) struct EndpointConfigBuilder { mandatory_fields: MandatoryFields, optional_fields: EndpointConfigBuilderFields, } diff --git a/network/src/dht/mod.rs b/network/src/dht/mod.rs index 4b6f0156e..a665d0d0b 100644 --- a/network/src/dht/mod.rs +++ b/network/src/dht/mod.rs @@ -1,3 +1,21 @@ -pub mod routing; -pub mod proto; -pub mod storage; +use std::sync::Arc; +use std::sync::Mutex; +use std::time::Instant; + +use self::routing::RoutingTable; +use self::storage::Storage; +use crate::types::PeerId; + +mod routing; +mod storage; + +pub struct Dht(Arc); + +impl Dht {} + +struct DhtInner { + _local_id: PeerId, + _routing_table: Mutex, + _last_table_refersh: Instant, + _storage: Storage, +} diff --git a/network/src/dht/routing.rs b/network/src/dht/routing.rs index 57fb65300..cce7544e3 100644 --- a/network/src/dht/routing.rs +++ b/network/src/dht/routing.rs @@ -3,13 +3,13 @@ use std::time::{Duration, Instant}; use crate::types::PeerId; -pub struct Builder { +pub struct RoutingTableBuilder { local_id: PeerId, max_k: usize, node_timeout: Duration, } -impl Builder { +impl RoutingTableBuilder { pub fn build(self) -> RoutingTable { RoutingTable { local_id: self.local_id, @@ -33,8 +33,8 @@ pub struct RoutingTable { } impl RoutingTable { - pub fn builder(local_id: PeerId) -> Builder { - Builder { + pub fn builder(local_id: PeerId) -> RoutingTableBuilder { + RoutingTableBuilder { local_id, max_k: 20, node_timeout: Duration::from_secs(15 * 60), diff --git a/network/src/dht/storage.rs b/network/src/dht/storage.rs index cad1cc4e1..969e96396 100644 --- a/network/src/dht/storage.rs +++ b/network/src/dht/storage.rs @@ -8,28 +8,36 @@ use moka::Expiry; use tl_proto::TlWrite; use tycho_util::time::*; -use crate::dht::proto; +use crate::proto; type DhtCache = Cache; type DhtCacheBuilder = CacheBuilder>; pub trait OverlayValueMerger { - fn check_value(&self, new: &proto::OverlayValue) -> Result<(), StorageError>; + fn check_value(&self, new: &proto::dht::OverlayValue) -> Result<(), StorageError>; - fn merge_value(&self, new: &proto::OverlayValue, stored: &mut proto::OverlayValue) -> bool; + fn merge_value( + &self, + new: &proto::dht::OverlayValue, + stored: &mut proto::dht::OverlayValue, + ) -> bool; } impl OverlayValueMerger for () { - fn check_value(&self, _new: &proto::OverlayValue) -> Result<(), StorageError> { + fn check_value(&self, _new: &proto::dht::OverlayValue) -> Result<(), StorageError> { Err(StorageError::InvalidKey) } - fn merge_value(&self, _new: &proto::OverlayValue, _stored: &mut proto::OverlayValue) -> bool { + fn merge_value( + &self, + _new: &proto::dht::OverlayValue, + _stored: &mut proto::dht::OverlayValue, + ) -> bool { false } } -pub struct Builder { +pub struct StorageBuilder { cache_builder: DhtCacheBuilder, overlay_value_merger: Weak, max_ttl: Duration, @@ -38,7 +46,7 @@ pub struct Builder { // TODO: add a hashset for allowed keys (maybe separate signed keys from overlay keys) } -impl Default for Builder { +impl Default for StorageBuilder { fn default() -> Self { Self { cache_builder: Default::default(), @@ -50,7 +58,7 @@ impl Default for Builder { } } -impl Builder { +impl StorageBuilder { pub fn build(self) -> Storage { fn weigher(_key: &StorageKeyId, value: &StoredValue) -> u32 { std::mem::size_of::() as u32 @@ -112,11 +120,11 @@ pub struct Storage { } impl Storage { - pub fn builder() -> Builder { - Builder::default() + pub fn builder() -> StorageBuilder { + StorageBuilder::default() } - pub fn insert(&self, value: &proto::Value) -> Result { + pub fn insert(&self, value: &proto::dht::Value) -> Result { match value.expires_at().checked_sub(now_sec()) { Some(0) | None => return Err(StorageError::ValueExpired), Some(remaining_ttl) if remaining_ttl > self.max_ttl_sec => { @@ -132,12 +140,12 @@ impl Storage { } match value { - proto::Value::Signed(value) => self.insert_signed_value(value), - proto::Value::Overlay(value) => self.insert_overlay_value(value), + proto::dht::Value::Signed(value) => self.insert_signed_value(value), + proto::dht::Value::Overlay(value) => self.insert_overlay_value(value), } } - fn insert_signed_value(&self, value: &proto::SignedValue) -> Result { + fn insert_signed_value(&self, value: &proto::dht::SignedValue) -> Result { let Some(public_key) = value.key.peer_id.as_public_key() else { return Err(StorageError::InvalidSignature); }; @@ -159,7 +167,7 @@ impl Storage { .is_fresh()) } - fn insert_overlay_value(&self, value: &proto::OverlayValue) -> Result { + fn insert_overlay_value(&self, value: &proto::dht::OverlayValue) -> Result { use std::borrow::Cow; use std::cell::RefCell; @@ -180,7 +188,8 @@ impl Storage { StoredValue::new(value.as_ref(), value.expires_at) }, |prev| { - let Ok(mut prev) = tl_proto::deserialize::(&prev.data) + let Ok(mut prev) = + tl_proto::deserialize::(&prev.data) else { // Invalid values are always replaced with new values return true; diff --git a/network/src/lib.rs b/network/src/lib.rs index f7ed02019..62709bbd9 100644 --- a/network/src/lib.rs +++ b/network/src/lib.rs @@ -1,8 +1,16 @@ -pub mod config; -pub mod connection; -pub mod crypto; -pub mod dht; -pub mod endpoint; -pub mod network; -pub mod proto; -pub mod types; +pub use config::{Config, QuicConfig}; +pub use dht::Dht; +pub use network::{Network, NetworkBuilder, Peer, WeakNetwork}; +pub use types::{Address, AddressList, Direction, PeerId, Request, Response, RpcQuery, Version}; + +mod config; +mod connection; +mod crypto; +mod dht; +mod endpoint; +mod network; +mod types; + +pub mod proto { + pub mod dht; +} diff --git a/network/src/network/connection_manager.rs b/network/src/network/connection_manager.rs index 08115065d..73ccc405f 100644 --- a/network/src/network/connection_manager.rs +++ b/network/src/network/connection_manager.rs @@ -10,6 +10,8 @@ use tokio::sync::{broadcast, mpsc, oneshot}; use tokio::task::JoinSet; use tower::util::BoxCloneService; +use super::request_handler::InboundRequestHandler; +use super::wire::handshake; use crate::config::Config; use crate::connection::Connection; use crate::endpoint::{Connecting, Endpoint}; @@ -18,8 +20,6 @@ use crate::types::{ PeerAffinity, PeerEvent, PeerId, PeerInfo, Response, }; -use super::request_handler::InboundRequestHandler; - #[derive(Debug)] pub enum ConnectionManagerRequest { Connect(Address, Option, oneshot::Sender>), @@ -248,7 +248,7 @@ impl ConnectionManager { } } - crate::proto::handshake(connection).await + handshake(connection).await }; let connecting_result = tokio::time::timeout(config.connect_timeout, fut) @@ -325,7 +325,7 @@ impl ConnectionManager { ) -> ConnectingOutput { let fut = async { let connection = connecting?.await?; - crate::proto::handshake(connection).await + handshake(connection).await }; let connecting_result = tokio::time::timeout(config.connect_timeout, fut) diff --git a/network/src/network/mod.rs b/network/src/network/mod.rs index e4c748f4e..ebf62235c 100644 --- a/network/src/network/mod.rs +++ b/network/src/network/mod.rs @@ -1,6 +1,6 @@ use std::convert::Infallible; use std::net::{SocketAddr, ToSocketAddrs}; -use std::sync::Arc; +use std::sync::{Arc, Weak}; use anyhow::Result; use bytes::Bytes; @@ -12,16 +12,18 @@ use crate::config::{Config, EndpointConfig}; use crate::endpoint::Endpoint; use crate::types::{Address, DisconnectReason, InboundServiceRequest, PeerId, Response}; +pub use self::peer::Peer; + use self::connection_manager::{ ActivePeers, ConnectionManager, ConnectionManagerRequest, KnownPeers, WeakActivePeers, }; -use self::peer::Peer; -pub mod connection_manager; -pub mod peer; -pub mod request_handler; +mod connection_manager; +mod peer; +mod request_handler; +mod wire; -pub struct Builder { +pub struct NetworkBuilder { mandatory_fields: MandatoryFields, optional_fields: BuilderFields, } @@ -31,38 +33,38 @@ struct BuilderFields { config: Option, } -impl Builder { +impl NetworkBuilder { pub fn with_config(mut self, config: Config) -> Self { self.optional_fields.config = Some(config); self } } -impl Builder<((), T2)> { - pub fn with_service_name>(self, name: T) -> Builder<(String, T2)> { +impl NetworkBuilder<((), T2)> { + pub fn with_service_name>(self, name: T) -> NetworkBuilder<(String, T2)> { let (_, private_key) = self.mandatory_fields; - Builder { + NetworkBuilder { mandatory_fields: (name.into(), private_key), optional_fields: self.optional_fields, } } } -impl Builder<(T1, ())> { - pub fn with_private_key(self, private_key: [u8; 32]) -> Builder<(T1, [u8; 32])> { +impl NetworkBuilder<(T1, ())> { + pub fn with_private_key(self, private_key: [u8; 32]) -> NetworkBuilder<(T1, [u8; 32])> { let (service_name, _) = self.mandatory_fields; - Builder { + NetworkBuilder { mandatory_fields: (service_name, private_key), optional_fields: self.optional_fields, } } - pub fn with_random_private_key(self) -> Builder<(T1, [u8; 32])> { + pub fn with_random_private_key(self) -> NetworkBuilder<(T1, [u8; 32])> { self.with_private_key(rand::thread_rng().gen()) } } -impl Builder { +impl NetworkBuilder { pub fn build(self, bind_address: T, service: S) -> Result where S: Clone + Send + 'static, @@ -148,11 +150,25 @@ impl Builder { } } +#[derive(Clone)] +pub struct WeakNetwork(Weak); + +impl WeakNetwork { + pub fn upgrade(&self) -> Option { + self.0 + .upgrade() + .map(Network) + .and_then(|network| (!network.is_closed()).then_some(network)) + } +} + +#[derive(Clone)] +#[repr(transparent)] pub struct Network(Arc); impl Network { - pub fn builder() -> Builder<((), ())> { - Builder { + pub fn builder() -> NetworkBuilder<((), ())> { + NetworkBuilder { mandatory_fields: ((), ()), optional_fields: Default::default(), } @@ -191,9 +207,17 @@ impl Network { pub fn disconnect(&self, peer_id: &PeerId) -> Result<()> { self.0.disconnect(peer_id) } + + pub async fn shutdown(&self) -> Result<()> { + self.0.shutdown().await + } + + pub fn is_closed(&self) -> bool { + self.0.is_closed() + } } -pub struct NetworkInner { +struct NetworkInner { config: Arc, endpoint: Arc, active_peers: WeakActivePeers, @@ -240,6 +264,19 @@ impl NetworkInner { let connection = active_peers.get(peer_id)?; Some(Peer::new(connection, self.config.clone())) } + + async fn shutdown(&self) -> Result<()> { + let (sender, receiver) = oneshot::channel(); + self.connection_manager_handle + .send(ConnectionManagerRequest::Shutdown(sender)) + .await + .map_err(|_e| anyhow::anyhow!("network has been shutdown"))?; + receiver.await.map_err(Into::into) + } + + fn is_closed(&self) -> bool { + self.connection_manager_handle.is_closed() + } } #[cfg(test)] diff --git a/network/src/network/peer.rs b/network/src/network/peer.rs index aa69c4efe..0bf269250 100644 --- a/network/src/network/peer.rs +++ b/network/src/network/peer.rs @@ -4,9 +4,9 @@ use anyhow::Result; use bytes::Bytes; use tokio_util::codec::{FramedRead, FramedWrite}; +use super::wire::{make_codec, recv_response, send_request}; use crate::config::Config; use crate::connection::Connection; -use crate::proto::{make_codec, recv_response, send_request}; use crate::types::{PeerId, Request, Response}; #[derive(Clone)] diff --git a/network/src/network/request_handler.rs b/network/src/network/request_handler.rs index 1edd4b574..d294ae0e7 100644 --- a/network/src/network/request_handler.rs +++ b/network/src/network/request_handler.rs @@ -8,12 +8,12 @@ use tokio::task::JoinSet; use tokio_util::codec::{FramedRead, FramedWrite, LengthDelimitedCodec}; use tower::util::{BoxCloneService, ServiceExt}; +use super::connection_manager::ActivePeers; +use super::wire::{make_codec, recv_request, send_response}; use crate::config::Config; use crate::connection::{Connection, SendStream}; use crate::types::{DisconnectReason, InboundRequestMeta, InboundServiceRequest, Response}; -use super::connection_manager::ActivePeers; - pub struct InboundRequestHandler { config: Arc, connection: Connection, @@ -116,8 +116,8 @@ impl BiStreamRequestHandler { Self { meta, service, - send_stream: FramedWrite::new(send_stream, crate::proto::make_codec(config)), - recv_stream: FramedRead::new(recv_stream, crate::proto::make_codec(config)), + send_stream: FramedWrite::new(send_stream, make_codec(config)), + recv_stream: FramedRead::new(recv_stream, make_codec(config)), } } @@ -128,7 +128,7 @@ impl BiStreamRequestHandler { } async fn do_handle(mut self) -> Result<()> { - let req = crate::proto::recv_request(&mut self.recv_stream).await?; + let req = recv_request(&mut self.recv_stream).await?; let res = { let handler = self.service.oneshot(InboundServiceRequest { metadata: self.meta, @@ -142,7 +142,7 @@ impl BiStreamRequestHandler { } }; - crate::proto::send_response(&mut self.send_stream, res).await?; + send_response(&mut self.send_stream, res).await?; self.send_stream.get_mut().finish().await?; Ok(()) diff --git a/network/src/proto.rs b/network/src/network/wire.rs similarity index 100% rename from network/src/proto.rs rename to network/src/network/wire.rs diff --git a/network/src/dht/proto.rs b/network/src/proto/dht.rs similarity index 99% rename from network/src/dht/proto.rs rename to network/src/proto/dht.rs index c200cef26..9a9989139 100644 --- a/network/src/dht/proto.rs +++ b/network/src/proto/dht.rs @@ -168,6 +168,7 @@ pub struct NodeInfoResponse { pub info: NodeInfo, } +/// DHT RPC models. pub mod rpc { use crate::types::RpcQuery; diff --git a/network/src/types/peer_id.rs b/network/src/types/peer_id.rs index a8642cf49..c96e46251 100644 --- a/network/src/types/peer_id.rs +++ b/network/src/types/peer_id.rs @@ -97,7 +97,7 @@ impl std::ops::BitXor<&PeerId> for &PeerId { impl std::ops::BitXorAssign for PeerId { #[inline] fn bitxor_assign(&mut self, rhs: PeerId) { - std::ops::BitXorAssign::bitxor_assign(self, &rhs) + std::ops::BitXorAssign::bitxor_assign(self, &rhs); } } From 3cdf869d9a54ebdb43b70a6c2fedc667a6d51b3a Mon Sep 17 00:00:00 2001 From: Ivan Kalinin Date: Thu, 21 Dec 2023 17:54:10 +0100 Subject: [PATCH 09/35] Add query ext --- network/src/dht/mod.rs | 29 +++++++-- network/src/dht/storage.rs | 2 +- network/src/lib.rs | 2 + network/src/network/connection_manager.rs | 12 ++++ network/src/network/mod.rs | 19 ++++-- network/src/proto/dht.rs | 14 ++++ network/src/util/mod.rs | 78 +++++++++++++++++++++++ 7 files changed, 143 insertions(+), 13 deletions(-) create mode 100644 network/src/util/mod.rs diff --git a/network/src/dht/mod.rs b/network/src/dht/mod.rs index a665d0d0b..afb9ded51 100644 --- a/network/src/dht/mod.rs +++ b/network/src/dht/mod.rs @@ -1,9 +1,12 @@ -use std::sync::Arc; -use std::sync::Mutex; +use std::sync::{Arc, Mutex}; use std::time::Instant; +use anyhow::Result; + use self::routing::RoutingTable; use self::storage::Storage; +use crate::network::WeakNetwork; +use crate::proto; use crate::types::PeerId; mod routing; @@ -11,11 +14,23 @@ mod storage; pub struct Dht(Arc); -impl Dht {} +impl Dht { + pub async fn find_peers(&self, key: &PeerId) -> Result> { + todo!() + } + + pub async fn find_value(&self, key: T) -> Result> + where + T: proto::dht::WithValue, + { + todo!() + } +} struct DhtInner { - _local_id: PeerId, - _routing_table: Mutex, - _last_table_refersh: Instant, - _storage: Storage, + local_id: PeerId, + routing_table: Mutex, + last_table_refersh: Instant, + storage: Storage, + network: WeakNetwork, } diff --git a/network/src/dht/storage.rs b/network/src/dht/storage.rs index 969e96396..c150f9df9 100644 --- a/network/src/dht/storage.rs +++ b/network/src/dht/storage.rs @@ -6,7 +6,7 @@ use bytes::{Bytes, BytesMut}; use moka::sync::{Cache, CacheBuilder}; use moka::Expiry; use tl_proto::TlWrite; -use tycho_util::time::*; +use tycho_util::time::now_sec; use crate::proto; diff --git a/network/src/lib.rs b/network/src/lib.rs index 62709bbd9..140983c31 100644 --- a/network/src/lib.rs +++ b/network/src/lib.rs @@ -11,6 +11,8 @@ mod endpoint; mod network; mod types; +pub mod util; + pub mod proto { pub mod dht; } diff --git a/network/src/network/connection_manager.rs b/network/src/network/connection_manager.rs index 73ccc405f..dad4f41bf 100644 --- a/network/src/network/connection_manager.rs +++ b/network/src/network/connection_manager.rs @@ -422,6 +422,10 @@ impl ActivePeers { self.0.remove_with_stable_id(peer_id, stable_id, reason); } + pub fn subscribe(&self) -> broadcast::Receiver { + self.0.subscribe() + } + pub fn is_empty(&self) -> bool { self.0.is_empty() } @@ -522,6 +526,10 @@ impl ActivePeersInner { } } + fn subscribe(&self) -> broadcast::Receiver { + self.events_tx.subscribe() + } + fn send_event(&self, event: PeerEvent) { _ = self.events_tx.send(event); } @@ -558,6 +566,10 @@ impl KnownPeers { Self::default() } + pub fn contains(&self, peer_id: &PeerId) -> bool { + self.0.contains_key(peer_id) + } + pub fn get(&self, peer_id: &PeerId) -> Option { self.0.get(peer_id).map(|item| item.value().clone()) } diff --git a/network/src/network/mod.rs b/network/src/network/mod.rs index ebf62235c..0e7e4c58b 100644 --- a/network/src/network/mod.rs +++ b/network/src/network/mod.rs @@ -2,15 +2,15 @@ use std::convert::Infallible; use std::net::{SocketAddr, ToSocketAddrs}; use std::sync::{Arc, Weak}; -use anyhow::Result; +use anyhow::{Context, Result}; use bytes::Bytes; use rand::Rng; -use tokio::sync::{mpsc, oneshot}; +use tokio::sync::{broadcast, mpsc, oneshot}; use tower::ServiceExt; use crate::config::{Config, EndpointConfig}; use crate::endpoint::Endpoint; -use crate::types::{Address, DisconnectReason, InboundServiceRequest, PeerId, Response}; +use crate::types::{Address, DisconnectReason, InboundServiceRequest, PeerEvent, PeerId, Response}; pub use self::peer::Peer; @@ -190,6 +190,11 @@ impl Network { self.0.known_peers() } + pub fn subscribe(&self) -> Result> { + let active_peers = self.0.active_peers.upgrade().ok_or(NetworkShutdownError)?; + Ok(active_peers.subscribe()) + } + pub async fn connect(&self, addr: T) -> Result where T: Into

, @@ -247,7 +252,7 @@ impl NetworkInner { tx, )) .await - .map_err(|_e| anyhow::anyhow!("network has been shutdown"))?; + .map_err(|_e| NetworkShutdownError)?; rx.await? } @@ -270,7 +275,7 @@ impl NetworkInner { self.connection_manager_handle .send(ConnectionManagerRequest::Shutdown(sender)) .await - .map_err(|_e| anyhow::anyhow!("network has been shutdown"))?; + .map_err(|_e| NetworkShutdownError)?; receiver.await.map_err(Into::into) } @@ -279,6 +284,10 @@ impl NetworkInner { } } +#[derive(thiserror::Error, Debug)] +#[error("network has been shutdown")] +struct NetworkShutdownError; + #[cfg(test)] mod tests { use tower::util::BoxCloneService; diff --git a/network/src/proto/dht.rs b/network/src/proto/dht.rs index 9a9989139..3e56d6785 100644 --- a/network/src/proto/dht.rs +++ b/network/src/proto/dht.rs @@ -17,6 +17,12 @@ pub struct NodeInfo { pub signature: Bytes, } +pub trait WithValue: + TlWrite + for<'a> TlRead<'a, Repr = tl_proto::Boxed> +{ + type Value<'a>: TlWrite + TlRead<'a, Repr = tl_proto::Boxed>; +} + /// Key for values that can only be updated by the owner. #[derive(Debug, Clone, TlRead, TlWrite)] #[tl(boxed, id = "dht.signedKey", scheme = "proto.tl")] @@ -29,6 +35,10 @@ pub struct SignedKey { pub peer_id: PeerId, } +impl WithValue for SignedKey { + type Value<'a> = SignedValue; +} + /// Key for overlay-managed values. #[derive(Debug, Clone, TlRead, TlWrite)] #[tl(boxed, id = "dht.overlayKey", scheme = "proto.tl")] @@ -41,6 +51,10 @@ pub struct OverlayKey { pub idx: u32, } +impl WithValue for OverlayKey { + type Value<'a> = OverlayValue; +} + /// Value with a known owner. #[derive(Debug, Clone, TlRead, TlWrite)] #[tl(boxed, id = "dht.signedValue", scheme = "proto.tl")] diff --git a/network/src/util/mod.rs b/network/src/util/mod.rs new file mode 100644 index 000000000..00cbfb26d --- /dev/null +++ b/network/src/util/mod.rs @@ -0,0 +1,78 @@ +use std::future::Future; +use std::pin::Pin; +use std::task::{Context, Poll}; + +use anyhow::Result; +use bytes::Bytes; +use futures_util::FutureExt; + +use crate::network::Network; +use crate::types::{PeerEvent, PeerId, Request, Response}; + +pub trait NetworkExt { + fn query(&self, peer_id: &PeerId, request: Request) -> Query; +} + +impl NetworkExt for Network { + fn query(&self, peer_id: &PeerId, request: Request) -> Query { + use tokio::sync::broadcast::error::RecvError; + + let network = self.clone(); + let peer_id = *peer_id; + Query(Box::pin(async move { + let mut peer_events = network.subscribe()?; + + // Make query if already connected + if let Some(peer) = network.peer(&peer_id) { + return peer.rpc(request).await; + } + + match network.known_peers().get(&peer_id) { + // Initiate a connection of it is a known peer + Some(peer_info) => { + network + .connect_with_peer_id(peer_info.address, &peer_id) + .await?; + } + // Error otherwise + None => anyhow::bail!("trying to query an unknown peer: {peer_id}"), + } + + loop { + match peer_events.recv().await { + Ok(PeerEvent::NewPeer(peer_id)) if peer_id == peer_id => { + if let Some(peer) = network.peer(&peer_id) { + return peer.rpc(request).await; + } + } + Ok(_) => {} + Err(RecvError::Closed) => anyhow::bail!("network subscription closed"), + Err(RecvError::Lagged(_)) => { + peer_events = peer_events.resubscribe(); + + if let Some(peer) = network.peer(&peer_id) { + return peer.rpc(request).await; + } + } + } + + anyhow::ensure!( + network.known_peers().contains(&peer_id), + "waiting for a connection to an unknown peer: {peer_id}", + ); + } + })) + } +} + +// TODO: replace with RPITIT +pub struct Query(Pin>> + Send + 'static>>); + +impl Future for Query { + type Output = Result>; + + #[inline] + fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { + self.0.poll_unpin(cx) + } +} From b8193378d3b4dfdee0d597539a507d6c95f73a9c Mon Sep 17 00:00:00 2001 From: Kirill Mikheev Date: Tue, 30 Jan 2024 12:55:35 +0300 Subject: [PATCH 10/35] network draft --- Cargo.lock | 213 ++++++++++++++++ consensus/Cargo.toml | 14 ++ consensus/src/engine/dag.rs | 1 + consensus/src/engine/mod.rs | 2 + consensus/src/engine/threshold_clock.rs | 1 + consensus/src/intercom/dispatcher.rs | 309 ++++++++++++++++++++++++ consensus/src/intercom/mod.rs | 4 + consensus/src/intercom/receiver.rs | 1 + consensus/src/intercom/responses.rs | 32 +++ consensus/src/intercom/uploader.rs | 1 + consensus/src/lib.rs | 5 +- consensus/src/models.rs | 61 +++++ consensus/src/tasks/broadcaster.rs | 1 + consensus/src/tasks/downloader.rs | 1 + consensus/src/tasks/mod.rs | 4 + consensus/src/tasks/syncer.rs | 1 + consensus/src/tasks/uploader.rs | 1 + network/src/lib.rs | 5 +- 18 files changed, 655 insertions(+), 2 deletions(-) create mode 100644 consensus/src/engine/dag.rs create mode 100644 consensus/src/engine/mod.rs create mode 100644 consensus/src/engine/threshold_clock.rs create mode 100644 consensus/src/intercom/dispatcher.rs create mode 100644 consensus/src/intercom/mod.rs create mode 100644 consensus/src/intercom/receiver.rs create mode 100644 consensus/src/intercom/responses.rs create mode 100644 consensus/src/intercom/uploader.rs create mode 100644 consensus/src/models.rs create mode 100644 consensus/src/tasks/broadcaster.rs create mode 100644 consensus/src/tasks/downloader.rs create mode 100644 consensus/src/tasks/mod.rs create mode 100644 consensus/src/tasks/syncer.rs create mode 100644 consensus/src/tasks/uploader.rs diff --git a/Cargo.lock b/Cargo.lock index f902f4d99..40a9a501a 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -117,6 +117,36 @@ version = "1.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8c3c1a368f70d6cf7302d78f8f7093da241fb8e8807c05cc9e51a125895a6d5b" +[[package]] +name = "bincode" +version = "1.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b1f45e9417d87227c7a56d22e471c6206462cba514c7590c09aff4cf6d1ddcad" +dependencies = [ + "serde", +] + +[[package]] +name = "bindgen" +version = "0.65.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cfdf7b466f9a4903edc73f95d6d2bcd5baf8ae620638762244d3f60143643cc5" +dependencies = [ + "bitflags 1.3.2", + "cexpr", + "clang-sys", + "lazy_static", + "lazycell", + "peeking_take_while", + "prettyplease", + "proc-macro2", + "quote", + "regex", + "rustc-hash", + "shlex", + "syn 2.0.41", +] + [[package]] name = "bitflags" version = "1.3.2" @@ -155,6 +185,20 @@ name = "bytes" version = "1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a2bd12c1caf447e69cd4528f47f94d203fd2582878ecb9e9465484c4148a8223" +dependencies = [ + "serde", +] + +[[package]] +name = "bzip2-sys" +version = "0.1.11+1.0.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "736a955f3fa7875102d57c82b8cac37ec45224a07fd32d58f9f7a186b6cd4cdc" +dependencies = [ + "cc", + "libc", + "pkg-config", +] [[package]] name = "camino" @@ -193,15 +237,36 @@ version = "1.0.83" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f1174fb0b6ec23863f8b971027804a42614e347eafb0a95bf0b12cdae21fc4d0" dependencies = [ + "jobserver", "libc", ] +[[package]] +name = "cexpr" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6fac387a98bb7c37292057cffc56d62ecb629900026402633ae9160df93a8766" +dependencies = [ + "nom", +] + [[package]] name = "cfg-if" version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" +[[package]] +name = "clang-sys" +version = "1.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "67523a3b4be3ce1989d607a828d036249522dd9c1c8de7f4dd2dae43a37369d1" +dependencies = [ + "glob", + "libc", + "libloading", +] + [[package]] name = "const-oid" version = "0.9.6" @@ -529,6 +594,15 @@ version = "1.0.10" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b1a46d1a171d865aa5f83f92695765caa047a9b4cbae2cbf37dbd613a793fd4c" +[[package]] +name = "jobserver" +version = "0.1.27" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8c37f63953c4c63420ed5fd3d6d398c719489b9f872b9fa683262f8edd363c7d" +dependencies = [ + "libc", +] + [[package]] name = "js-sys" version = "0.3.66" @@ -544,12 +618,56 @@ version = "1.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646" +[[package]] +name = "lazycell" +version = "1.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "830d08ce1d1d941e6b30645f1a0eb5643013d835ce3779a5fc208261dbe10f55" + [[package]] name = "libc" version = "0.2.151" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "302d7ab3130588088d277783b1e2d2e10c9e9e4a16dd9050e6ec93fb3e7048f4" +[[package]] +name = "libloading" +version = "0.8.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c571b676ddfc9a8c12f1f3d3085a7b163966a8fd8098a90640953ce5f6170161" +dependencies = [ + "cfg-if", + "windows-sys 0.48.0", +] + +[[package]] +name = "librocksdb-sys" +version = "0.11.0+8.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d3386f101bcb4bd252d8e9d2fb41ec3b0862a15a62b478c355b2982efa469e3e" +dependencies = [ + "bindgen", + "bzip2-sys", + "cc", + "glob", + "libc", + "libz-sys", + "lz4-sys", + "tikv-jemalloc-sys", + "zstd-sys", +] + +[[package]] +name = "libz-sys" +version = "1.1.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "037731f5d3aaa87a5675e895b63ddff1a87624bc29f77004ea829809654e48f6" +dependencies = [ + "cc", + "pkg-config", + "vcpkg", +] + [[package]] name = "linux-raw-sys" version = "0.4.12" @@ -572,6 +690,16 @@ version = "0.4.20" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b5e6163cb8c49088c2c36f57875e58ccd8c87c7427f7fbd50ea6710b2f3f2e8f" +[[package]] +name = "lz4-sys" +version = "1.9.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "57d27b317e207b10f69f5e75494119e391a96f48861ae870d1da6edac98ca900" +dependencies = [ + "cc", + "libc", +] + [[package]] name = "mach2" version = "0.4.1" @@ -765,6 +893,12 @@ dependencies = [ "windows-targets 0.48.5", ] +[[package]] +name = "peeking_take_while" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "19b17cddbe7ec3f8bc800887bab5e717348c95ea2ca0b1bf0837fb964dc67099" + [[package]] name = "pem" version = "3.0.3" @@ -862,6 +996,12 @@ dependencies = [ "spki", ] +[[package]] +name = "pkg-config" +version = "0.3.29" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2900ede94e305130c13ddd391e0ab7cbaeb783945ae07a279c268cb05109c6cb" + [[package]] name = "platforms" version = "3.2.0" @@ -880,6 +1020,16 @@ version = "0.2.17" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5b40af805b3121feab8a3c29f04d8ad262fa8e0561883e7653e024ae4479e6de" +[[package]] +name = "prettyplease" +version = "0.2.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ae005bd773ab59b4725093fd7df83fd7892f7d8eafb48dbd7de6e024e4215f9d" +dependencies = [ + "proc-macro2", + "syn 2.0.41", +] + [[package]] name = "proc-macro2" version = "1.0.70" @@ -1105,6 +1255,16 @@ dependencies = [ "windows-sys 0.48.0", ] +[[package]] +name = "rocksdb" +version = "0.21.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bb6f170a4041d50a0ce04b0d2e14916d6ca863ea2e422689a5b694395d299ffe" +dependencies = [ + "libc", + "librocksdb-sys", +] + [[package]] name = "rustc-demangle" version = "0.1.23" @@ -1261,6 +1421,12 @@ dependencies = [ "lazy_static", ] +[[package]] +name = "shlex" +version = "1.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0fda2ff0d084019ba4d7c6f371c95d8fd75ce3524c3cb8fb653a3023f6323e64" + [[package]] name = "signature" version = "2.2.0" @@ -1421,6 +1587,16 @@ dependencies = [ "once_cell", ] +[[package]] +name = "tikv-jemalloc-sys" +version = "0.5.4+5.3.0-patched" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9402443cb8fd499b6f327e40565234ff34dbda27460c5b47db0db77443dd85d1" +dependencies = [ + "cc", + "libc", +] + [[package]] name = "time" version = "0.3.30" @@ -1669,9 +1845,18 @@ checksum = "859eb650cfee7434994602c3a68b25d77ad9e68c8a6cd491616ef86661382eb3" name = "tycho-consensus" version = "0.0.1" dependencies = [ + "anyhow", + "bincode", + "bytes", + "serde", + "tokio", + "tower", + "tracing", + "tracing-test", "tycho-network", "tycho-storage", "tycho-util", + "weedb", ] [[package]] @@ -1798,6 +1983,12 @@ version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "830b7e5d4d90034032940e4ace0d9a9a057e7a45cd94e6c007832e39edb82f6d" +[[package]] +name = "vcpkg" +version = "0.2.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "accd4ea62f7bb7a82fe23066fb0957d48ef677f6eeb8215f372f52e48bb32426" + [[package]] name = "version_check" version = "0.9.4" @@ -1884,6 +2075,18 @@ dependencies = [ "wasm-bindgen", ] +[[package]] +name = "weedb" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "de8f9c5dfe31e92c6374e25086363a0dccf15cf9b0923ea8a4a2a105d662428e" +dependencies = [ + "librocksdb-sys", + "rocksdb", + "thiserror", + "tracing", +] + [[package]] name = "winapi" version = "0.3.9" @@ -2098,3 +2301,13 @@ name = "zeroize" version = "1.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "525b4ec142c6b68a2d10f01f7bbf6755599ca3f81ea53b8431b7dd348f5fdb2d" + +[[package]] +name = "zstd-sys" +version = "2.0.9+zstd.1.5.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9e16efa8a874a0481a574084d34cc26fdb3b99627480f785888deb6386506656" +dependencies = [ + "cc", + "pkg-config", +] diff --git a/consensus/Cargo.toml b/consensus/Cargo.toml index 6b2337326..53d247dd2 100644 --- a/consensus/Cargo.toml +++ b/consensus/Cargo.toml @@ -6,8 +6,22 @@ description = "DAG-based consensus for external messages queue." [dependencies] # crates.io deps +anyhow = "1.0" +bincode = "1.3" +bytes = { version = "1.0", features = ["serde"] } +serde = { version = "1.0", features = ["derive"] } +tower = "0.4" +tracing = "0.1" +weedb = "0.1" # local deps tycho-network = { path = "../network", version = "=0.0.1" } tycho-storage = { path = "../storage", version = "=0.0.1" } tycho-util = { path = "../util", version = "=0.0.1" } + +[dev-dependencies] +tokio = { version = "1", features = ["rt-multi-thread", "macros"] } +tracing-test = "0.2" + +[lints] +workspace = true \ No newline at end of file diff --git a/consensus/src/engine/dag.rs b/consensus/src/engine/dag.rs new file mode 100644 index 000000000..8b1378917 --- /dev/null +++ b/consensus/src/engine/dag.rs @@ -0,0 +1 @@ + diff --git a/consensus/src/engine/mod.rs b/consensus/src/engine/mod.rs new file mode 100644 index 000000000..16aee4a36 --- /dev/null +++ b/consensus/src/engine/mod.rs @@ -0,0 +1,2 @@ +mod dag; +mod threshold_clock; diff --git a/consensus/src/engine/threshold_clock.rs b/consensus/src/engine/threshold_clock.rs new file mode 100644 index 000000000..8b1378917 --- /dev/null +++ b/consensus/src/engine/threshold_clock.rs @@ -0,0 +1 @@ + diff --git a/consensus/src/intercom/dispatcher.rs b/consensus/src/intercom/dispatcher.rs new file mode 100644 index 000000000..6424655f2 --- /dev/null +++ b/consensus/src/intercom/dispatcher.rs @@ -0,0 +1,309 @@ +use std::convert::Infallible; +use std::net::SocketAddr; +use std::str::FromStr; +use std::sync::Arc; + +use anyhow::{anyhow, Result}; +use bytes::Bytes; +use serde::{Deserialize, Serialize}; +use tower::util::BoxCloneService; +use tower::ServiceExt; + +use tycho_network::util::NetworkExt; +use tycho_network::{Config, InboundServiceRequest, Network, Response, Version}; + +use crate::intercom::responses::*; +use crate::models::{Location, Point, PointId, RoundId, Signature}; + +const LOCAL_ADDR: &str = "127.0.0.1:0"; +#[derive(Serialize, Deserialize, Debug)] +enum MPRequest { + // by author + Broadcast { point: Point }, + Point { id: PointId }, + // any point from the last author's round; + // 1/3+1 evidenced vertices determine current consensus round + // PointLast, + // unique point with known evidence + Vertex { id: Location }, + // the next point by the same author + // that contains >=2F signatures for requested vertex + Evidence { vertex_id: Location }, + Vertices { round: RoundId }, +} + +#[derive(Serialize, Deserialize, Debug)] +enum MPResponse { + Broadcast(BroadcastResponse), + Point(PointResponse), + //PointLast(Option), + Vertex(VertexResponse), + Evidence(EvidenceResponse), + Vertices(VerticesResponse), +} + +#[derive(Serialize, Deserialize, Debug)] +enum MPRemoteResult { + Ok(MPResponse), + Err(String), +} + +pub struct Dispatcher { + inner: Arc, + network: Network, +} + +impl Dispatcher { + pub fn new() -> Result { + let inner = Arc::new(DispatcherInner {}); + let handler = inner.clone(); + let service_fn: BoxCloneService, Response, Infallible> = + tower::service_fn(move |a| handler.clone().handle(a)).boxed_clone(); + let network = Network::builder() + .with_config(Config::default()) + .with_random_private_key() + .with_service_name("tycho-mempool-router") + .build(LOCAL_ADDR, service_fn)?; + Ok(Self { inner, network }) + } + + pub async fn broadcast(&self, point: Point, from: SocketAddr) -> Result { + let request = tycho_network::Request { + version: Version::V1, + body: Bytes::from(bincode::serialize(&MPRequest::Broadcast { point })?), + }; + + let remote_peer = self.network.connect(from).await?; + + let response = self.network.query(&remote_peer, request).await?; + + match parse_response(&response.body)? { + MPResponse::Broadcast(r) => Ok(r), + x => Err(anyhow!("wrong response")), + } + } + + pub async fn point(&self, id: PointId, from: SocketAddr) -> Result { + let request = tycho_network::Request { + version: Version::V1, + body: Bytes::from(bincode::serialize(&MPRequest::Point { id })?), + }; + + let remote_peer = self.network.connect(from).await?; + + let response = self.network.query(&remote_peer, request).await?; + + match parse_response(&response.body)? { + MPResponse::Point(r) => Ok(r), + x => Err(anyhow!("wrong response")), + } + } + + pub async fn vertex(&self, id: Location, from: SocketAddr) -> Result { + let request = tycho_network::Request { + version: Version::V1, + body: Bytes::from(bincode::serialize(&MPRequest::Vertex { id })?), + }; + + let remote_peer = self.network.connect(from).await?; + + let response = self.network.query(&remote_peer, request).await?; + + match parse_response(&response.body)? { + MPResponse::Vertex(r) => Ok(r), + x => Err(anyhow!("wrong response")), + } + } + + pub async fn evidence( + &self, + vertex_id: Location, + from: SocketAddr, + ) -> Result { + let request = tycho_network::Request { + version: Version::V1, + body: Bytes::from(bincode::serialize(&MPRequest::Evidence { vertex_id })?), + }; + + let remote_peer = self.network.connect(from).await?; + + let response = self.network.query(&remote_peer, request).await?; + + match parse_response(&response.body)? { + MPResponse::Evidence(r) => Ok(r), + x => Err(anyhow!("wrong response")), + } + } + + pub async fn vertices(&self, round: RoundId, from: SocketAddr) -> Result { + let request = tycho_network::Request { + version: Version::V1, + body: Bytes::from(bincode::serialize(&MPRequest::Vertices { round })?), + }; + + let remote_peer = self.network.connect(from).await?; + + let response = self.network.query(&remote_peer, request).await?; + + match parse_response(&response.body)? { + MPResponse::Vertices(r) => Ok(r), + x => Err(anyhow!("wrong response")), + } + } +} + +struct DispatcherInner { + // state and storage components go here +} + +impl DispatcherInner { + async fn handle( + self: Arc, + request: InboundServiceRequest, + ) -> Result, Infallible> { + let result = match bincode::deserialize::(&request.body) { + Ok(request_body) => { + let result: Result = match &request_body { + MPRequest::Broadcast { point } => { + // 1.1 sigs for my block + 1.2 my next includes + // ?? + 3.1 ask last + Ok(MPResponse::Broadcast(BroadcastResponse { + current_round: RoundId(0), + signature: Signature(Bytes::new()), + signer_point: None, + })) + } + MPRequest::Point { id } => { + // 1.2 my next includes (merged with Broadcast flow) + Ok(MPResponse::Point(PointResponse { + current_round: RoundId(0), + point: None, + })) + } + MPRequest::Vertex { id } => { + // verification flow: downloader + Ok(MPResponse::Vertex(VertexResponse { + current_round: RoundId(0), + vertex: None, + })) + } + MPRequest::Evidence { vertex_id } => { + // verification flow: downloader + Ok(MPResponse::Evidence(EvidenceResponse { + current_round: RoundId(0), + point: None, + })) + } + MPRequest::Vertices { round } => { + // cold sync flow: downloader + Ok(MPResponse::Vertices(VerticesResponse { + vertices: Vec::new(), + })) + } + }; + result + .map(|r| MPRemoteResult::Ok(r)) + .map_err(|e| { + let msg = format!("{e:?}"); + tracing::error!( + "failed to process request {:?} from {:?}: {msg}", + request_body, + request.metadata.as_ref() + ); + MPRemoteResult::Err(format!("remote exception in execution: {msg}")) + }) + .unwrap() + } + Err(e) => { + let msg = format!("{e:?}"); + tracing::warn!( + "unexpected request from {:?}: {msg}", + request.metadata.as_ref() + ); + MPRemoteResult::Err(format!( + "remote exception on request deserialization: {msg}" + )) + } + }; + + let body = bincode::serialize(&result) + .map(Bytes::from) + .unwrap_or_else(|err| { + let msg = format!("{err:?}"); + tracing::error!( + "cannot serialize response to {:?}: {msg}; data: {result:?}", + request.metadata.as_ref() + ); + bincode::serialize(&MPRemoteResult::Err(format!( + "remote exception on response serialization: {msg}" + ))) + .map(Bytes::from) + // empty body denotes a failure during serialization of error serialization, unlikely to happen + .unwrap_or(Bytes::new()) + }); + + let response = Response { + version: Version::default(), + body, + }; + Ok::<_, Infallible>(response) + } +} + +fn parse_response(body: &Bytes) -> anyhow::Result { + if body.is_empty() { + return Err(anyhow::Error::msg( + "remote response serialization exception is hidden by exception during serialization", + )); + } + match bincode::deserialize::(body) { + Ok(MPRemoteResult::Ok(response)) => Ok(response), + Ok(MPRemoteResult::Err(e)) => Err(anyhow::Error::msg(e)), + Err(e) => Err(anyhow!("failed to deserialize response: {e:?}")), + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[tokio::test] + #[tracing_test::traced_test] + async fn underlying_network_works() -> Result<()> { + let node1 = Dispatcher::new()?.network; + let node2 = Dispatcher::new()?.network; + + let peer2 = node1.connect(node2.local_addr()).await?; + let response = node1 + .query( + &peer2, + tycho_network::Request { + version: Version::V1, + body: Bytes::from("bites"), + }, + ) + .await?; + let response = parse_response(&response.body); + + tracing::info!("response '{response:?}'"); + + assert!(response.is_err()); + Ok(()) + } + + #[tokio::test] + #[tracing_test::traced_test] + async fn dispatcher_works() -> Result<()> { + let node1 = Dispatcher::new()?; + let node2 = Dispatcher::new()?; + + let data = node1 + .vertices(RoundId(0), node2.network.local_addr()) + .await?; + + tracing::info!("response: '{data:?}'"); + + assert!(data.vertices.is_empty()); + Ok(()) + } +} diff --git a/consensus/src/intercom/mod.rs b/consensus/src/intercom/mod.rs new file mode 100644 index 000000000..736e764d3 --- /dev/null +++ b/consensus/src/intercom/mod.rs @@ -0,0 +1,4 @@ +mod dispatcher; +mod receiver; +mod responses; +mod uploader; diff --git a/consensus/src/intercom/receiver.rs b/consensus/src/intercom/receiver.rs new file mode 100644 index 000000000..85b0baca4 --- /dev/null +++ b/consensus/src/intercom/receiver.rs @@ -0,0 +1 @@ +pub struct Receiver {} diff --git a/consensus/src/intercom/responses.rs b/consensus/src/intercom/responses.rs new file mode 100644 index 000000000..1eb2c18c7 --- /dev/null +++ b/consensus/src/intercom/responses.rs @@ -0,0 +1,32 @@ +use serde::{Deserialize, Serialize}; + +use crate::models::{Point, RoundId, Signature}; + +#[derive(Serialize, Deserialize, Debug)] +pub struct BroadcastResponse { + pub current_round: RoundId, + // for requested point + pub signature: Signature, + // at the same round, if it was not skipped + pub signer_point: Option, +} +#[derive(Serialize, Deserialize, Debug)] +pub struct PointResponse { + pub current_round: RoundId, + pub point: Option, +} +//PointLast(Option), +#[derive(Serialize, Deserialize, Debug)] +pub struct VertexResponse { + pub current_round: RoundId, + pub vertex: Option, +} +#[derive(Serialize, Deserialize, Debug)] +pub struct EvidenceResponse { + pub current_round: RoundId, + pub point: Option, +} +#[derive(Serialize, Deserialize, Debug)] +pub struct VerticesResponse { + pub vertices: Vec, +} diff --git a/consensus/src/intercom/uploader.rs b/consensus/src/intercom/uploader.rs new file mode 100644 index 000000000..1d4b54f75 --- /dev/null +++ b/consensus/src/intercom/uploader.rs @@ -0,0 +1 @@ +pub struct Uploader {} diff --git a/consensus/src/lib.rs b/consensus/src/lib.rs index 8b1378917..a43c4473e 100644 --- a/consensus/src/lib.rs +++ b/consensus/src/lib.rs @@ -1 +1,4 @@ - +mod engine; +mod intercom; +mod models; +mod tasks; diff --git a/consensus/src/models.rs b/consensus/src/models.rs new file mode 100644 index 000000000..2c9ea00df --- /dev/null +++ b/consensus/src/models.rs @@ -0,0 +1,61 @@ +use bytes::Bytes; +use serde::{Deserialize, Serialize}; +use tycho_network::FastHashMap; + +pub const POINT_DIGEST_SIZE: usize = 32; +pub const SIGNATURE_SIZE: usize = 64; + +#[derive(Serialize, Deserialize, PartialEq, Debug)] +pub struct Digest(pub Bytes); +#[derive(Serialize, Deserialize, PartialEq, Debug)] +pub struct Signature(pub Bytes); +#[derive(Serialize, Deserialize, PartialEq, Eq, Hash, Debug)] +pub struct NodeId(pub u8); +#[derive(Serialize, Deserialize, PartialEq, Debug)] +pub struct RoundId(pub u32); + +#[derive(Serialize, Deserialize, PartialEq, Debug)] +pub struct Location { + round: RoundId, + author: NodeId, +} + +#[derive(Serialize, Deserialize, PartialEq, Debug)] +pub struct PointId { + location: Location, + digest: Digest, +} + +#[derive(Serialize, Deserialize, Debug)] +pub struct PrevPoint { + round: RoundId, + digest: Digest, + // >= 2F witnesses, point author excluded + evidence: FastHashMap, +} + +#[derive(Serialize, Deserialize, Debug)] +pub struct PointData { + location: Location, + local_time: u64, + payload: Vec, + // >= 2F+1 vertices from the round before last, + // optionally including author's own vertex + includes: FastHashMap, + anchor: PointId, + proposed_leader: Option, + // any vertices the leader adds to its diff-graph + // beyond its direct inclusions + leader_deep_includes: Vec, + // of the same author + prev_point: Option, +} + +#[derive(Serialize, Deserialize, Debug)] +pub struct Point { + data: PointData, + // author's + signature: Signature, + // of both data and author's signature + digest: Digest, +} diff --git a/consensus/src/tasks/broadcaster.rs b/consensus/src/tasks/broadcaster.rs new file mode 100644 index 000000000..8b1378917 --- /dev/null +++ b/consensus/src/tasks/broadcaster.rs @@ -0,0 +1 @@ + diff --git a/consensus/src/tasks/downloader.rs b/consensus/src/tasks/downloader.rs new file mode 100644 index 000000000..8b1378917 --- /dev/null +++ b/consensus/src/tasks/downloader.rs @@ -0,0 +1 @@ + diff --git a/consensus/src/tasks/mod.rs b/consensus/src/tasks/mod.rs new file mode 100644 index 000000000..0926bcb19 --- /dev/null +++ b/consensus/src/tasks/mod.rs @@ -0,0 +1,4 @@ +mod broadcaster; +mod downloader; +mod syncer; +mod uploader; diff --git a/consensus/src/tasks/syncer.rs b/consensus/src/tasks/syncer.rs new file mode 100644 index 000000000..8b1378917 --- /dev/null +++ b/consensus/src/tasks/syncer.rs @@ -0,0 +1 @@ + diff --git a/consensus/src/tasks/uploader.rs b/consensus/src/tasks/uploader.rs new file mode 100644 index 000000000..8b1378917 --- /dev/null +++ b/consensus/src/tasks/uploader.rs @@ -0,0 +1 @@ + diff --git a/network/src/lib.rs b/network/src/lib.rs index 140983c31..a04961655 100644 --- a/network/src/lib.rs +++ b/network/src/lib.rs @@ -1,7 +1,10 @@ pub use config::{Config, QuicConfig}; pub use dht::Dht; pub use network::{Network, NetworkBuilder, Peer, WeakNetwork}; -pub use types::{Address, AddressList, Direction, PeerId, Request, Response, RpcQuery, Version}; +pub use types::{ + Address, AddressList, Direction, FastDashMap, FastHashMap, InboundServiceRequest, PeerId, + Request, Response, RpcQuery, Version, +}; mod config; mod connection; From 083b23c07bf353b88fb95acad334a88da832a359 Mon Sep 17 00:00:00 2001 From: Ivan Kalinin Date: Thu, 8 Feb 2024 18:37:21 +0100 Subject: [PATCH 11/35] network: Replace tower service with separate handlers --- Cargo.lock | 385 ++++++++----------- consensus/Cargo.toml | 4 +- consensus/src/intercom/dispatcher.rs | 160 ++++---- consensus/src/models.rs | 2 +- network/Cargo.toml | 1 - network/src/dht/mod.rs | 31 +- network/src/dht/storage.rs | 2 +- network/src/lib.rs | 6 +- network/src/network/connection_manager.rs | 11 +- network/src/network/mod.rs | 25 +- network/src/network/request_handler.rs | 47 ++- network/src/types/mod.rs | 9 +- network/src/types/service.rs | 430 ++++++++++++++++++++++ rust-toolchain | 1 - util/Cargo.toml | 4 +- util/src/lib.rs | 5 + 16 files changed, 741 insertions(+), 382 deletions(-) create mode 100644 network/src/types/service.rs delete mode 100644 rust-toolchain diff --git a/Cargo.lock b/Cargo.lock index 40a9a501a..3a58eba4a 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -19,9 +19,9 @@ checksum = "f26201604c87b1e01bd3d98f8d5d9a8fcbb815e8cedb41ffccbeb4bf593a35fe" [[package]] name = "ahash" -version = "0.8.6" +version = "0.8.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "91429305e9f0a25f6205c5b8e0d2db09e0708a7a6df0f42212bb56c32c8ac97a" +checksum = "77c3a9648d43b9cd48db467b3f87fdd6e146bcc88ab0180006cef2179fe11d01" dependencies = [ "cfg-if", "getrandom", @@ -41,9 +41,9 @@ dependencies = [ [[package]] name = "anyhow" -version = "1.0.75" +version = "1.0.79" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a4668cab20f66d8d020e1fbc0ebe47217433c1b6c8f2040faf858554e394ace6" +checksum = "080e9890a082662b09c1ad45f567faeeb47f22b5fb23895fbe1e651e718e25ca" [[package]] name = "asn1-rs" @@ -107,9 +107,9 @@ dependencies = [ [[package]] name = "base64" -version = "0.21.5" +version = "0.21.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "35636a1494ede3b646cc98f74f8e62c773a38a659ebc777a2cf26b9b74171df9" +checksum = "9d297deb1925b89f2ccc13d7635fa0714f12c87adce1c75356b39ca9b7178567" [[package]] name = "base64ct" @@ -144,7 +144,7 @@ dependencies = [ "regex", "rustc-hash", "shlex", - "syn 2.0.41", + "syn 2.0.48", ] [[package]] @@ -155,9 +155,9 @@ checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a" [[package]] name = "bitflags" -version = "2.4.1" +version = "2.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "327762f6e5a765692301e5bb513e0d9fef63be86bbc14528052b1cd3e6f03e07" +checksum = "ed570934406eb16438a4e976b1b4500774099c13b8cb96eec99f620f05090ddf" [[package]] name = "block-buffer" @@ -211,9 +211,9 @@ dependencies = [ [[package]] name = "cargo-platform" -version = "0.1.5" +version = "0.1.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e34637b3140142bdf929fb439e8aa4ebad7651ebf7b1080b3930aa16ac1459ff" +checksum = "ceed8ef69d8518a5dda55c07425450b58a4e1946f4951eab6d7191ee86c2443d" dependencies = [ "serde", ] @@ -275,9 +275,9 @@ checksum = "c2459377285ad874054d797f3ccebf984978aa39129f6eafde5cdc8315b612f8" [[package]] name = "cpufeatures" -version = "0.2.11" +version = "0.2.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ce420fe07aecd3e67c5f910618fe65e94158f6dcc0adf44e00d69ce2bdfe0fd0" +checksum = "53fe5e26ff1b7aef8bca9c6080520cfb8d9333c7568e1829cef191a9723e5504" dependencies = [ "libc", ] @@ -299,34 +299,27 @@ checksum = "19d374276b40fb8bbdee95aef7c7fa6b5316ec764510eb64b8dd0e2ed0d7e7f5" [[package]] name = "crossbeam-channel" -version = "0.5.9" +version = "0.5.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "14c3242926edf34aec4ac3a77108ad4854bffaa2e4ddc1824124ce59231302d5" +checksum = "176dc175b78f56c0f321911d9c8eb2b77a78a4860b9c19db83835fea1a46649b" dependencies = [ - "cfg-if", "crossbeam-utils", ] [[package]] name = "crossbeam-epoch" -version = "0.9.16" +version = "0.9.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2d2fe95351b870527a5d09bf563ed3c97c0cffb87cf1c78a591bf48bb218d9aa" +checksum = "5b82ac4a3c2ca9c3460964f020e1402edd5753411d7737aa39c3714ad1b5420e" dependencies = [ - "autocfg", - "cfg-if", "crossbeam-utils", - "memoffset", ] [[package]] name = "crossbeam-utils" -version = "0.8.17" +version = "0.8.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c06d96137f14f244c37f989d9fff8f95e6c18b918e71f36638f8c49112e4c78f" -dependencies = [ - "cfg-if", -] +checksum = "248e3bacc7dc6baa3b21e405ee045c3047101a49145e7e9eca583ab4c2ca5345" [[package]] name = "crypto-common" @@ -340,9 +333,9 @@ dependencies = [ [[package]] name = "curve25519-dalek" -version = "4.1.1" +version = "4.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e89b8c6a2e4b1f45971ad09761aafb85514a84744b67a95e32c3cc1352d1f65c" +checksum = "0a677b8922c94e01bdbb12126b0bc852f00447528dee1782229af9c720c3f348" dependencies = [ "cfg-if", "cpufeatures", @@ -362,7 +355,7 @@ checksum = "f46882e17999c6cc590af592290432be3bce0428cb0d5f8b6715e4dc7b383eb3" dependencies = [ "proc-macro2", "quote", - "syn 2.0.41", + "syn 2.0.48", ] [[package]] @@ -410,9 +403,9 @@ dependencies = [ [[package]] name = "deranged" -version = "0.3.10" +version = "0.3.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8eb30d70a07a3b04884d2677f06bec33509dc67ca60d92949e5535352d3191dc" +checksum = "b42b6fa04a440b495c8b04d0e71b707c585f83cb9cb28cf8cd0d976c315e31b4" dependencies = [ "powerfmt", ] @@ -435,7 +428,7 @@ checksum = "487585f4d0c6655fe74905e2504d8ad6908e4db67f744eb140876906c2f3175d" dependencies = [ "proc-macro2", "quote", - "syn 2.0.41", + "syn 2.0.48", ] [[package]] @@ -489,44 +482,44 @@ checksum = "25cbce373ec4653f1a01a31e8a5e5ec0c622dc27ff9c4e6606eefef5cbbed4a5" [[package]] name = "fiat-crypto" -version = "0.2.5" +version = "0.2.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "27573eac26f4dd11e2b1916c3fe1baa56407c83c71a773a8ba17ec0bca03b6b7" +checksum = "1676f435fc1dadde4d03e43f5d62b259e1ce5f40bd4ffb21db2b42ebe59c1382" [[package]] name = "futures-core" -version = "0.3.29" +version = "0.3.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eb1d22c66e66d9d72e1758f0bd7d4fd0bee04cad842ee34587d68c07e45d088c" +checksum = "dfc6580bb841c5a68e9ef15c77ccc837b40a7504914d52e47b8b0e9bbda25a1d" [[package]] name = "futures-macro" -version = "0.3.29" +version = "0.3.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "53b153fd91e4b0147f4aced87be237c98248656bb01050b96bf3ee89220a8ddb" +checksum = "87750cf4b7a4c0625b1529e4c543c2182106e4dedc60a2a6455e00d212c489ac" dependencies = [ "proc-macro2", "quote", - "syn 2.0.41", + "syn 2.0.48", ] [[package]] name = "futures-sink" -version = "0.3.29" +version = "0.3.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e36d3378ee38c2a36ad710c5d30c2911d752cb941c00c72dbabfb786a7970817" +checksum = "9fb8e00e87438d937621c1c6269e53f536c14d3fbd6a042bb24879e57d474fb5" [[package]] name = "futures-task" -version = "0.3.29" +version = "0.3.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "efd193069b0ddadc69c46389b740bbccdd97203899b48d09c5f7969591d6bae2" +checksum = "38d84fa142264698cdce1a9f9172cf383a0c82de1bddcf3092901442c4097004" [[package]] name = "futures-util" -version = "0.3.29" +version = "0.3.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a19526d624e703a3179b3d322efec918b6246ea0fa51d41124525f00f1cc8104" +checksum = "3d6401deb83407ab3da39eba7e33987a73c3df0c82b4bb5813ee871c19c41d48" dependencies = [ "futures-core", "futures-macro", @@ -549,9 +542,9 @@ dependencies = [ [[package]] name = "getrandom" -version = "0.2.11" +version = "0.2.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fe9006bed769170c11f845cf00c7c1e9092aeb3f268e007c3e760ac68008070f" +checksum = "190092ea657667030ac6a35e305e62fc4dd69fd98ac98631e5d3a2b1575a12b5" dependencies = [ "cfg-if", "libc", @@ -578,9 +571,9 @@ checksum = "290f1a1d9242c78d09ce40a5e87e7554ee637af1351968159f4952f028f75604" [[package]] name = "hermit-abi" -version = "0.3.3" +version = "0.3.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d77f7ec81a6d05a3abb01ab6eb7590f6083d08449fe5a1c8b1e620283546ccb7" +checksum = "d0c62115964e08cb8039170eb33c1d0e2388a256930279edca206fff675f82c3" [[package]] name = "hex" @@ -596,18 +589,18 @@ checksum = "b1a46d1a171d865aa5f83f92695765caa047a9b4cbae2cbf37dbd613a793fd4c" [[package]] name = "jobserver" -version = "0.1.27" +version = "0.1.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8c37f63953c4c63420ed5fd3d6d398c719489b9f872b9fa683262f8edd363c7d" +checksum = "ab46a6e9526ddef3ae7f787c06f0f2600639ba80ea3eade3d8e670a2230f51d6" dependencies = [ "libc", ] [[package]] name = "js-sys" -version = "0.3.66" +version = "0.3.68" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cee9c64da59eae3b50095c18d3e74f8b73c0b86d2792824ff01bbce68ba229ca" +checksum = "406cda4b368d531c842222cf9d2600a9a4acce8d29423695379c6868a143a9ee" dependencies = [ "wasm-bindgen", ] @@ -626,9 +619,9 @@ checksum = "830d08ce1d1d941e6b30645f1a0eb5643013d835ce3779a5fc208261dbe10f55" [[package]] name = "libc" -version = "0.2.151" +version = "0.2.153" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "302d7ab3130588088d277783b1e2d2e10c9e9e4a16dd9050e6ec93fb3e7048f4" +checksum = "9c198f91728a82281a64e1f4f9eeb25d82cb32a5de251c6bd1b5154d63a8e7bd" [[package]] name = "libloading" @@ -670,9 +663,9 @@ dependencies = [ [[package]] name = "linux-raw-sys" -version = "0.4.12" +version = "0.4.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c4cd1a83af159aa67994778be9070f0ae1bd732942279cabb14f86f986a21456" +checksum = "01cda141df6706de531b6c46c3a33ecca755538219bd484262fa09410c13539c" [[package]] name = "lock_api" @@ -700,15 +693,6 @@ dependencies = [ "libc", ] -[[package]] -name = "mach2" -version = "0.4.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6d0d1830bcd151a6fc4aea1369af235b36c1528fe976b8ff678683c9995eade8" -dependencies = [ - "libc", -] - [[package]] name = "matchers" version = "0.1.0" @@ -720,18 +704,9 @@ dependencies = [ [[package]] name = "memchr" -version = "2.6.4" +version = "2.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f665ee40bc4a3c5590afb1e9677db74a508659dfd71e126420da8274909a0167" - -[[package]] -name = "memoffset" -version = "0.9.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5a634b1c61a95585bd15607c6ab0c4e5b226e695ff2800ba0cdccddf208c406c" -dependencies = [ - "autocfg", -] +checksum = "523dc4f511e55ab87b694dc30d0f820d60906ef06413f93d4d7a1385599cc149" [[package]] name = "minimal-lexical" @@ -741,9 +716,9 @@ checksum = "68354c5c6bd36d73ff3feceb05efa59b6acb7626617f4962be322a825e61f79a" [[package]] name = "miniz_oxide" -version = "0.7.1" +version = "0.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e7810e0be55b428ada41041c41f32c9f1a42817901b4ccf45fa3d4b6561e74c7" +checksum = "9d811f3e15f28568be3407c8e7fdb6514c1cda3cb30683f15b6a1a1dc4ea14a7" dependencies = [ "adler", ] @@ -761,9 +736,9 @@ dependencies = [ [[package]] name = "moka" -version = "0.12.1" +version = "0.12.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d8017ec3548ffe7d4cef7ac0e12b044c01164a74c0f3119420faeaf13490ad8b" +checksum = "b1911e88d5831f748a4097a43862d129e3c6fca831eecac9b8db6d01d93c9de2" dependencies = [ "crossbeam-channel", "crossbeam-epoch", @@ -811,21 +786,26 @@ dependencies = [ "num-traits", ] +[[package]] +name = "num-conv" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "51d515d32fb182ee37cda2ccdcb92950d6a3c2893aa280e540671c2cd0f3b1d9" + [[package]] name = "num-integer" -version = "0.1.45" +version = "0.1.46" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "225d3389fb3509a24c93f5c29eb6bde2586b98d9f016636dff58d7c6f7569cd9" +checksum = "7969661fd2958a5cb096e56c8e1ad0444ac2bbcd0061bd28660485a44879858f" dependencies = [ - "autocfg", "num-traits", ] [[package]] name = "num-traits" -version = "0.2.17" +version = "0.2.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "39e3200413f237f41ab11ad6d161bc7239c84dcb631773ccd7de3dfe4b5c267c" +checksum = "da0df0e5185db44f69b44f26786fe401b6c293d1907744beaa7fa62b2e5a517a" dependencies = [ "autocfg", ] @@ -842,9 +822,9 @@ dependencies = [ [[package]] name = "object" -version = "0.32.1" +version = "0.32.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9cf5f9dd3933bd50a9e1f149ec995f39ae2c496d31fd772c1fd45ebc27e902b0" +checksum = "a6a622008b6e321afc04970976f62ee297fdbaa6f95318ca343e3eebb9648441" dependencies = [ "memchr", ] @@ -911,9 +891,9 @@ dependencies = [ [[package]] name = "pest" -version = "2.7.5" +version = "2.7.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ae9cee2a55a544be8b89dc6848072af97a20f2422603c10865be2a42b580fff5" +checksum = "219c0dcc30b6a27553f9cc242972b67f75b60eb0db71f0b5462f38b058c41546" dependencies = [ "memchr", "thiserror", @@ -922,9 +902,9 @@ dependencies = [ [[package]] name = "pest_derive" -version = "2.7.5" +version = "2.7.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "81d78524685f5ef2a3b3bd1cafbc9fcabb036253d9b1463e726a91cd16e2dfc2" +checksum = "22e1288dbd7786462961e69bfd4df7848c1e37e8b74303dbdab82c3a9cdd2809" dependencies = [ "pest", "pest_generator", @@ -932,48 +912,28 @@ dependencies = [ [[package]] name = "pest_generator" -version = "2.7.5" +version = "2.7.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "68bd1206e71118b5356dae5ddc61c8b11e28b09ef6a31acbd15ea48a28e0c227" +checksum = "1381c29a877c6d34b8c176e734f35d7f7f5b3adaefe940cb4d1bb7af94678e2e" dependencies = [ "pest", "pest_meta", "proc-macro2", "quote", - "syn 2.0.41", + "syn 2.0.48", ] [[package]] name = "pest_meta" -version = "2.7.5" +version = "2.7.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7c747191d4ad9e4a4ab9c8798f1e82a39affe7ef9648390b7e5548d18e099de6" +checksum = "d0934d6907f148c22a3acbda520c7eed243ad7487a30f51f6ce52b58b7077a8a" dependencies = [ "once_cell", "pest", "sha2", ] -[[package]] -name = "pin-project" -version = "1.1.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fda4ed1c6c173e3fc7a83629421152e01d7b1f9b7f65fb301e490e8cfc656422" -dependencies = [ - "pin-project-internal", -] - -[[package]] -name = "pin-project-internal" -version = "1.1.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4359fd9c9171ec6e8c62926d6faaf553a8dc3f64e1507e76da7911b4f6a04405" -dependencies = [ - "proc-macro2", - "quote", - "syn 2.0.41", -] - [[package]] name = "pin-project-lite" version = "0.2.13" @@ -1004,9 +964,9 @@ checksum = "2900ede94e305130c13ddd391e0ab7cbaeb783945ae07a279c268cb05109c6cb" [[package]] name = "platforms" -version = "3.2.0" +version = "3.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "14e6ab3f592e6fb464fc9712d8d6e6912de6473954635fd76a589d832cffcbb0" +checksum = "626dec3cac7cc0e1577a2ec3fc496277ec2baa084bebad95bb6fdbfae235f84c" [[package]] name = "powerfmt" @@ -1022,43 +982,42 @@ checksum = "5b40af805b3121feab8a3c29f04d8ad262fa8e0561883e7653e024ae4479e6de" [[package]] name = "prettyplease" -version = "0.2.15" +version = "0.2.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ae005bd773ab59b4725093fd7df83fd7892f7d8eafb48dbd7de6e024e4215f9d" +checksum = "a41cf62165e97c7f814d2221421dbb9afcbcdb0a88068e5ea206e19951c2cbb5" dependencies = [ "proc-macro2", - "syn 2.0.41", + "syn 2.0.48", ] [[package]] name = "proc-macro2" -version = "1.0.70" +version = "1.0.78" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "39278fbbf5fb4f646ce651690877f89d1c5811a3d4acb27700c1cb3cdb78fd3b" +checksum = "e2422ad645d89c99f8f3e6b88a9fdeca7fabeac836b1002371c4367c8f984aae" dependencies = [ "unicode-ident", ] [[package]] name = "pulldown-cmark" -version = "0.9.3" +version = "0.9.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "77a1a2f1f0a7ecff9c31abbe177637be0e97a0aef46cf8738ece09327985d998" +checksum = "57206b407293d2bcd3af849ce869d52068623f19e1b5ff8e8778e3309439682b" dependencies = [ - "bitflags 1.3.2", + "bitflags 2.4.2", "memchr", "unicase", ] [[package]] name = "quanta" -version = "0.11.1" +version = "0.12.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a17e662a7a8291a865152364c20c7abc5e60486ab2001e8ec10b24862de0b9ab" +checksum = "9ca0b7bac0b97248c40bb77288fc52029cf1459c0461ea1b05ee32ccf011de2c" dependencies = [ "crossbeam-utils", "libc", - "mach2", "once_cell", "raw-cpuid", "wasi", @@ -1115,9 +1074,9 @@ dependencies = [ [[package]] name = "quote" -version = "1.0.33" +version = "1.0.35" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5267fca4496028628a95160fc423a33e8b2e6af8a5302579e322e4b520293cae" +checksum = "291ec9ab5efd934aaf503a6466c5d5251535d108ee747472c3977cc5acc868ef" dependencies = [ "proc-macro2", ] @@ -1154,11 +1113,11 @@ dependencies = [ [[package]] name = "raw-cpuid" -version = "10.7.0" +version = "11.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6c297679cb867470fa8c9f67dbba74a78d78e3e98d7cf2b08d6d71540f797332" +checksum = "9d86a7c4638d42c44551f4791a20e687dbb4c3de1f33c43dd71e355cd429def1" dependencies = [ - "bitflags 1.3.2", + "bitflags 2.4.2", ] [[package]] @@ -1184,13 +1143,13 @@ dependencies = [ [[package]] name = "regex" -version = "1.10.2" +version = "1.10.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "380b951a9c5e80ddfd6136919eef32310721aa4aacd4889a8d39124b026ab343" +checksum = "b62dbe01f0b06f9d8dc7d49e05a0785f153b00b2c227856282f671e0318c9b15" dependencies = [ "aho-corasick", "memchr", - "regex-automata 0.4.3", + "regex-automata 0.4.5", "regex-syntax 0.8.2", ] @@ -1205,9 +1164,9 @@ dependencies = [ [[package]] name = "regex-automata" -version = "0.4.3" +version = "0.4.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5f804c7828047e88b2d32e2d7fe5a105da8ee3264f01902f796c8e067dc2483f" +checksum = "5bb987efffd3c6d0d8f5f89510bb458559eab11e4f869acb20bf845e016259cd" dependencies = [ "aho-corasick", "memchr", @@ -1297,11 +1256,11 @@ dependencies = [ [[package]] name = "rustix" -version = "0.38.28" +version = "0.38.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "72e572a5e8ca657d7366229cdde4bd14c4eb5499a9573d4d366fe1b599daa316" +checksum = "6ea3e1a662af26cd7a3ba09c0297a31af215563ecf42817c98df621387f4e949" dependencies = [ - "bitflags 2.4.1", + "bitflags 2.4.2", "errno", "libc", "linux-raw-sys", @@ -1363,38 +1322,38 @@ dependencies = [ [[package]] name = "semver" -version = "1.0.20" +version = "1.0.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "836fa6a3e1e547f9a2c4040802ec865b5d85f4014efe00555d7090a3dcaa1090" +checksum = "b97ed7a9823b74f99c7742f5336af7be5ecd3eeafcb1507d1fa93347b1d589b0" dependencies = [ "serde", ] [[package]] name = "serde" -version = "1.0.193" +version = "1.0.196" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "25dd9975e68d0cb5aa1120c288333fc98731bd1dd12f561e468ea4728c042b89" +checksum = "870026e60fa08c69f064aa766c10f10b1d62db9ccd4d0abb206472bee0ce3b32" dependencies = [ "serde_derive", ] [[package]] name = "serde_derive" -version = "1.0.193" +version = "1.0.196" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "43576ca501357b9b071ac53cdc7da8ef0cbd9493d8df094cd821777ea6e894d3" +checksum = "33c85360c95e7d137454dc81d9a4ed2b8efd8fbe19cee57357b32b9771fccb67" dependencies = [ "proc-macro2", "quote", - "syn 2.0.41", + "syn 2.0.48", ] [[package]] name = "serde_json" -version = "1.0.108" +version = "1.0.113" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3d1c7e3eac408d115102c4c24ad393e0821bb3a5df4d506a80f85f7a742a526b" +checksum = "69801b70b1c3dac963ecb03a364ba0ceda9cf60c71cfe475e99864759c8b8a79" dependencies = [ "itoa", "ryu", @@ -1462,9 +1421,9 @@ dependencies = [ [[package]] name = "smallvec" -version = "1.11.2" +version = "1.13.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4dccd0940a2dcdf68d092b8cbab7dc0ad8fa938bf95787e1b916b0e3d0e8e970" +checksum = "e6ecd384b10a64542d77071bd64bd7b231f4ed5940fba55e98c3de13824cf3d7" [[package]] name = "socket2" @@ -1517,9 +1476,9 @@ dependencies = [ [[package]] name = "syn" -version = "2.0.41" +version = "2.0.48" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "44c8b28c477cc3bf0e7966561e3460130e1255f7a1cf71931075f1c5e7a7e269" +checksum = "0f3531638e407dfc0814761abb7c00a5b54992b849452a0646b7f65c9f770f3f" dependencies = [ "proc-macro2", "quote", @@ -1546,35 +1505,34 @@ checksum = "7b2093cf4c8eb1e67749a6762251bc9cd836b6fc171623bd0a9d324d37af2417" [[package]] name = "tempfile" -version = "3.8.1" +version = "3.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7ef1adac450ad7f4b3c28589471ade84f25f731a7a0fe30d71dfa9f60fd808e5" +checksum = "a365e8cd18e44762ef95d87f284f4b5cd04107fec2ff3052bd6a3e6069669e67" dependencies = [ "cfg-if", "fastrand", - "redox_syscall", "rustix", - "windows-sys 0.48.0", + "windows-sys 0.52.0", ] [[package]] name = "thiserror" -version = "1.0.51" +version = "1.0.56" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f11c217e1416d6f036b870f14e0413d480dbf28edbee1f877abaf0206af43bb7" +checksum = "d54378c645627613241d077a3a79db965db602882668f9136ac42af9ecb730ad" dependencies = [ "thiserror-impl", ] [[package]] name = "thiserror-impl" -version = "1.0.51" +version = "1.0.56" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "01742297787513b79cf8e29d1056ede1313e2420b7b3b15d0a768b4921f549df" +checksum = "fa0faa943b50f3db30a20aa7e265dbc66076993efed8463e8de414e5d06d3471" dependencies = [ "proc-macro2", "quote", - "syn 2.0.41", + "syn 2.0.48", ] [[package]] @@ -1599,12 +1557,13 @@ dependencies = [ [[package]] name = "time" -version = "0.3.30" +version = "0.3.34" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c4a34ab300f2dee6e562c10a046fc05e358b29f9bf92277f30c3c8d82275f6f5" +checksum = "c8248b6521bb14bc45b4067159b9b6ad792e2d6d754d6c41fb50e29fefe38749" dependencies = [ "deranged", "itoa", + "num-conv", "powerfmt", "serde", "time-core", @@ -1619,10 +1578,11 @@ checksum = "ef927ca75afb808a4d64dd374f00a2adf8d0fcff8e7b184af886c3c87ec4a3f3" [[package]] name = "time-macros" -version = "0.2.15" +version = "0.2.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4ad70d68dba9e1f8aceda7aa6711965dfec1cac869f311a51bd08b3a2ccbce20" +checksum = "7ba3a3ef41e6672a2f0f001392bb5dcd3ff0a9992d618ca761a11c3121547774" dependencies = [ + "num-conv", "time-core", ] @@ -1664,7 +1624,7 @@ dependencies = [ "proc-macro2", "quote", "rustc-hash", - "syn 2.0.41", + "syn 2.0.48", "tl-scheme", ] @@ -1683,9 +1643,9 @@ dependencies = [ [[package]] name = "tokio" -version = "1.35.0" +version = "1.36.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "841d45b238a16291a4e1584e61820b8ae57d696cc5015c459c229ccc6990cc1c" +checksum = "61285f6515fa018fb2d1e46eb21223fff441ee8db5d0f1435e8ab4f5cdb80931" dependencies = [ "backtrace", "bytes", @@ -1706,7 +1666,7 @@ checksum = "5b8a1e28f2deaa14e508979454cb3a223b10b938b45af148bc0986de36f1923b" dependencies = [ "proc-macro2", "quote", - "syn 2.0.41", + "syn 2.0.48", ] [[package]] @@ -1723,40 +1683,12 @@ dependencies = [ "tracing", ] -[[package]] -name = "tower" -version = "0.4.13" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b8fa9be0de6cf49e536ce1851f987bd21a43b771b09473c3549a6c853db37c1c" -dependencies = [ - "futures-core", - "futures-util", - "pin-project", - "pin-project-lite", - "tower-layer", - "tower-service", - "tracing", -] - -[[package]] -name = "tower-layer" -version = "0.3.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c20c8dbed6283a09604c3e69b4b7eeb54e298b8a600d4d5ecb5ad39de609f1d0" - -[[package]] -name = "tower-service" -version = "0.3.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b6bc1c9ce2b5135ac7f93c72918fc37feb872bdc6a5533a8b85eb4b86bfdae52" - [[package]] name = "tracing" version = "0.1.40" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c3523ab5a71916ccf420eebdf5521fcef02141234bbc0b8a49f2fdc4544364ef" dependencies = [ - "log", "pin-project-lite", "tracing-attributes", "tracing-core", @@ -1770,7 +1702,7 @@ checksum = "34704c8d6ebcbc939824180af020566b01a7c01f80641264eba0999f6c2b6be7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.41", + "syn 2.0.48", ] [[package]] @@ -1848,9 +1780,9 @@ dependencies = [ "anyhow", "bincode", "bytes", + "futures-util", "serde", "tokio", - "tower", "tracing", "tracing-test", "tycho-network", @@ -1895,7 +1827,6 @@ dependencies = [ "tl-proto", "tokio", "tokio-util", - "tower", "tracing", "tracing-test", "tycho-util", @@ -1912,6 +1843,10 @@ dependencies = [ [[package]] name = "tycho-util" version = "0.0.1" +dependencies = [ + "ahash", + "dashmap", +] [[package]] name = "tycho-validator" @@ -1970,9 +1905,9 @@ checksum = "8ecb6da28b8a351d773b68d5825ac39017e680750f980f3a1a85cd8dd28a47c1" [[package]] name = "uuid" -version = "1.6.1" +version = "1.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5e395fcf16a7a3d8127ec99782007af141946b4795001f876d54fb0d55978560" +checksum = "f00cc9702ca12d3c81455259621e676d0f7251cec66a21e98fe2e9a37db93b2a" dependencies = [ "getrandom", ] @@ -2013,9 +1948,9 @@ checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423" [[package]] name = "wasm-bindgen" -version = "0.2.89" +version = "0.2.91" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0ed0d4f68a3015cc185aff4db9506a015f4b96f95303897bfa23f846db54064e" +checksum = "c1e124130aee3fb58c5bdd6b639a0509486b0338acaaae0c84a5124b0f588b7f" dependencies = [ "cfg-if", "wasm-bindgen-macro", @@ -2023,24 +1958,24 @@ dependencies = [ [[package]] name = "wasm-bindgen-backend" -version = "0.2.89" +version = "0.2.91" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1b56f625e64f3a1084ded111c4d5f477df9f8c92df113852fa5a374dbda78826" +checksum = "c9e7e1900c352b609c8488ad12639a311045f40a35491fb69ba8c12f758af70b" dependencies = [ "bumpalo", "log", "once_cell", "proc-macro2", "quote", - "syn 2.0.41", + "syn 2.0.48", "wasm-bindgen-shared", ] [[package]] name = "wasm-bindgen-macro" -version = "0.2.89" +version = "0.2.91" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0162dbf37223cd2afce98f3d0785506dcb8d266223983e4b5b525859e6e182b2" +checksum = "b30af9e2d358182b5c7449424f017eba305ed32a7010509ede96cdc4696c46ed" dependencies = [ "quote", "wasm-bindgen-macro-support", @@ -2048,28 +1983,28 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro-support" -version = "0.2.89" +version = "0.2.91" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f0eb82fcb7930ae6219a7ecfd55b217f5f0893484b7a13022ebb2b2bf20b5283" +checksum = "642f325be6301eb8107a83d12a8ac6c1e1c54345a7ef1a9261962dfefda09e66" dependencies = [ "proc-macro2", "quote", - "syn 2.0.41", + "syn 2.0.48", "wasm-bindgen-backend", "wasm-bindgen-shared", ] [[package]] name = "wasm-bindgen-shared" -version = "0.2.89" +version = "0.2.91" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7ab9b36309365056cd639da3134bf87fa8f3d86008abf99e612384a6eecd459f" +checksum = "4f186bd2dcf04330886ce82d6f33dd75a7bfcf69ecf5763b89fcde53b6ac9838" [[package]] name = "web-sys" -version = "0.3.66" +version = "0.3.68" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "50c24a44ec86bb68fbecd1b3efed7e85ea5621b39b35ef2766b66cd984f8010f" +checksum = "96565907687f7aceb35bc5fc03770a8a0471d82e479f25832f54a0e3f4b28446" dependencies = [ "js-sys", "wasm-bindgen", @@ -2278,22 +2213,22 @@ dependencies = [ [[package]] name = "zerocopy" -version = "0.7.31" +version = "0.7.32" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1c4061bedbb353041c12f413700357bec76df2c7e2ca8e4df8bac24c6bf68e3d" +checksum = "74d4d3961e53fa4c9a25a8637fc2bfaf2595b3d3ae34875568a5cf64787716be" dependencies = [ "zerocopy-derive", ] [[package]] name = "zerocopy-derive" -version = "0.7.31" +version = "0.7.32" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b3c129550b3e6de3fd0ba67ba5c81818f9805e58b8d7fee80a3a59d2c9fc601a" +checksum = "9ce1b18ccd8e73a9321186f97e46f9f04b778851177567b1975109d26a08d2a6" dependencies = [ "proc-macro2", "quote", - "syn 2.0.41", + "syn 2.0.48", ] [[package]] diff --git a/consensus/Cargo.toml b/consensus/Cargo.toml index 53d247dd2..ba9255877 100644 --- a/consensus/Cargo.toml +++ b/consensus/Cargo.toml @@ -9,8 +9,8 @@ description = "DAG-based consensus for external messages queue." anyhow = "1.0" bincode = "1.3" bytes = { version = "1.0", features = ["serde"] } +futures-util = { version = "0.3" } serde = { version = "1.0", features = ["derive"] } -tower = "0.4" tracing = "0.1" weedb = "0.1" @@ -24,4 +24,4 @@ tokio = { version = "1", features = ["rt-multi-thread", "macros"] } tracing-test = "0.2" [lints] -workspace = true \ No newline at end of file +workspace = true diff --git a/consensus/src/intercom/dispatcher.rs b/consensus/src/intercom/dispatcher.rs index 6424655f2..775065f47 100644 --- a/consensus/src/intercom/dispatcher.rs +++ b/consensus/src/intercom/dispatcher.rs @@ -1,21 +1,16 @@ -use std::convert::Infallible; -use std::net::SocketAddr; -use std::str::FromStr; +use std::net::{Ipv4Addr, SocketAddr}; use std::sync::Arc; use anyhow::{anyhow, Result}; use bytes::Bytes; use serde::{Deserialize, Serialize}; -use tower::util::BoxCloneService; -use tower::ServiceExt; use tycho_network::util::NetworkExt; -use tycho_network::{Config, InboundServiceRequest, Network, Response, Version}; +use tycho_network::{service_query_fn, Config, InboundServiceRequest, Network, Response, Version}; use crate::intercom::responses::*; use crate::models::{Location, Point, PointId, RoundId, Signature}; -const LOCAL_ADDR: &str = "127.0.0.1:0"; #[derive(Serialize, Deserialize, Debug)] enum MPRequest { // by author @@ -56,14 +51,17 @@ pub struct Dispatcher { impl Dispatcher { pub fn new() -> Result { let inner = Arc::new(DispatcherInner {}); - let handler = inner.clone(); - let service_fn: BoxCloneService, Response, Infallible> = - tower::service_fn(move |a| handler.clone().handle(a)).boxed_clone(); + let service_fn = service_query_fn({ + let inner = inner.clone(); + move |req| inner.clone().handle(req) + }); + let network = Network::builder() .with_config(Config::default()) .with_random_private_key() .with_service_name("tycho-mempool-router") - .build(LOCAL_ADDR, service_fn)?; + .build((Ipv4Addr::LOCALHOST, 0), service_fn)?; + Ok(Self { inner, network }) } @@ -157,96 +155,66 @@ struct DispatcherInner { } impl DispatcherInner { - async fn handle( - self: Arc, - request: InboundServiceRequest, - ) -> Result, Infallible> { - let result = match bincode::deserialize::(&request.body) { - Ok(request_body) => { - let result: Result = match &request_body { - MPRequest::Broadcast { point } => { - // 1.1 sigs for my block + 1.2 my next includes - // ?? + 3.1 ask last - Ok(MPResponse::Broadcast(BroadcastResponse { - current_round: RoundId(0), - signature: Signature(Bytes::new()), - signer_point: None, - })) - } - MPRequest::Point { id } => { - // 1.2 my next includes (merged with Broadcast flow) - Ok(MPResponse::Point(PointResponse { - current_round: RoundId(0), - point: None, - })) - } - MPRequest::Vertex { id } => { - // verification flow: downloader - Ok(MPResponse::Vertex(VertexResponse { - current_round: RoundId(0), - vertex: None, - })) - } - MPRequest::Evidence { vertex_id } => { - // verification flow: downloader - Ok(MPResponse::Evidence(EvidenceResponse { - current_round: RoundId(0), - point: None, - })) - } - MPRequest::Vertices { round } => { - // cold sync flow: downloader - Ok(MPResponse::Vertices(VerticesResponse { - vertices: Vec::new(), - })) - } - }; - result - .map(|r| MPRemoteResult::Ok(r)) - .map_err(|e| { - let msg = format!("{e:?}"); - tracing::error!( - "failed to process request {:?} from {:?}: {msg}", - request_body, - request.metadata.as_ref() - ); - MPRemoteResult::Err(format!("remote exception in execution: {msg}")) - }) - .unwrap() - } + async fn handle(self: Arc, req: InboundServiceRequest) -> Option> { + let body = match bincode::deserialize::(&req.body) { + Ok(body) => body, Err(e) => { - let msg = format!("{e:?}"); - tracing::warn!( - "unexpected request from {:?}: {msg}", - request.metadata.as_ref() - ); - MPRemoteResult::Err(format!( - "remote exception on request deserialization: {msg}" - )) + tracing::error!("unexpected request from {:?}: {e:?}", req.metadata); + // NOTE: malformed request is a reason to ignore it + return None; } }; - let body = bincode::serialize(&result) - .map(Bytes::from) - .unwrap_or_else(|err| { - let msg = format!("{err:?}"); - tracing::error!( - "cannot serialize response to {:?}: {msg}; data: {result:?}", - request.metadata.as_ref() - ); - bincode::serialize(&MPRemoteResult::Err(format!( - "remote exception on response serialization: {msg}" - ))) - .map(Bytes::from) - // empty body denotes a failure during serialization of error serialization, unlikely to happen - .unwrap_or(Bytes::new()) - }); - - let response = Response { - version: Version::default(), - body, + let response = match body { + MPRequest::Broadcast { point } => { + // 1.1 sigs for my block + 1.2 my next includes + // ?? + 3.1 ask last + MPResponse::Broadcast(BroadcastResponse { + current_round: RoundId(0), + signature: Signature(Bytes::new()), + signer_point: None, + }) + } + MPRequest::Point { id } => { + // 1.2 my next includes (merged with Broadcast flow) + MPResponse::Point(PointResponse { + current_round: RoundId(0), + point: None, + }) + } + MPRequest::Vertex { id } => { + // verification flow: downloader + MPResponse::Vertex(VertexResponse { + current_round: RoundId(0), + vertex: None, + }) + } + MPRequest::Evidence { vertex_id } => { + // verification flow: downloader + MPResponse::Evidence(EvidenceResponse { + current_round: RoundId(0), + point: None, + }) + } + MPRequest::Vertices { round } => { + // cold sync flow: downloader + MPResponse::Vertices(VerticesResponse { + vertices: Vec::new(), + }) + } }; - Ok::<_, Infallible>(response) + + Some(Response { + version: Version::default(), + body: Bytes::from(match bincode::serialize(&response) { + Ok(data) => data, + Err(e) => { + tracing::error!("failed to serialize response to {:?}: {e:?}", req.metadata); + bincode::serialize(&MPRemoteResult::Err(format!("internal error"))) + .expect("must not fail") + } + }), + }) } } diff --git a/consensus/src/models.rs b/consensus/src/models.rs index 2c9ea00df..b8ab3bff5 100644 --- a/consensus/src/models.rs +++ b/consensus/src/models.rs @@ -1,6 +1,6 @@ use bytes::Bytes; use serde::{Deserialize, Serialize}; -use tycho_network::FastHashMap; +use tycho_util::FastHashMap; pub const POINT_DIGEST_SIZE: usize = 32; pub const SIGNATURE_SIZE: usize = 64; diff --git a/network/Cargo.toml b/network/Cargo.toml index 00c31f74a..a35b7890e 100644 --- a/network/Cargo.toml +++ b/network/Cargo.toml @@ -30,7 +30,6 @@ thiserror = "1" tl-proto = "0.4" tokio = { version = "1", features = ["rt", "sync", "io-util", "macros"] } tokio-util = { version = "0.7", features = ["codec"] } -tower = { version = "0.4", features = ["util"] } tracing = "0.1" x509-parser = "0.15" diff --git a/network/src/dht/mod.rs b/network/src/dht/mod.rs index afb9ded51..e792d5159 100644 --- a/network/src/dht/mod.rs +++ b/network/src/dht/mod.rs @@ -2,12 +2,14 @@ use std::sync::{Arc, Mutex}; use std::time::Instant; use anyhow::Result; +use bytes::Bytes; +use futures_util::future::BoxFuture; use self::routing::RoutingTable; use self::storage::Storage; use crate::network::WeakNetwork; -use crate::proto; -use crate::types::PeerId; +use crate::types::{PeerId, Response, Service}; +use crate::{proto, InboundServiceRequest}; mod routing; mod storage; @@ -30,7 +32,30 @@ impl Dht { struct DhtInner { local_id: PeerId, routing_table: Mutex, - last_table_refersh: Instant, + last_table_refresh: Instant, storage: Storage, network: WeakNetwork, } + +impl Service> for Dht { + type QueryResponse = Response; + type OnQueryFuture = BoxFuture<'static, Option>; + type OnMessageFuture = futures_util::future::Ready<()>; + type OnDatagramFuture = futures_util::future::Ready<()>; + + fn on_query(&mut self, req: InboundServiceRequest) -> Self::OnQueryFuture { + // TODO: parse query and dispatch to appropriate method + + todo!() + } + + #[inline] + fn on_message(&mut self, req: InboundServiceRequest) -> Self::OnMessageFuture { + futures_util::future::ready(()) + } + + #[inline] + fn on_datagram(&mut self, req: InboundServiceRequest) -> Self::OnDatagramFuture { + futures_util::future::ready(()) + } +} diff --git a/network/src/dht/storage.rs b/network/src/dht/storage.rs index c150f9df9..3a3ecab55 100644 --- a/network/src/dht/storage.rs +++ b/network/src/dht/storage.rs @@ -13,7 +13,7 @@ use crate::proto; type DhtCache = Cache; type DhtCacheBuilder = CacheBuilder>; -pub trait OverlayValueMerger { +pub trait OverlayValueMerger: Send + Sync + 'static { fn check_value(&self, new: &proto::dht::OverlayValue) -> Result<(), StorageError>; fn merge_value( diff --git a/network/src/lib.rs b/network/src/lib.rs index a04961655..5eb184862 100644 --- a/network/src/lib.rs +++ b/network/src/lib.rs @@ -2,8 +2,10 @@ pub use config::{Config, QuicConfig}; pub use dht::Dht; pub use network::{Network, NetworkBuilder, Peer, WeakNetwork}; pub use types::{ - Address, AddressList, Direction, FastDashMap, FastHashMap, InboundServiceRequest, PeerId, - Request, Response, RpcQuery, Version, + service_datagram_fn, service_message_fn, service_query_fn, Address, AddressList, + BoxCloneService, BoxService, Direction, DisconnectReason, InboundRequestMeta, + InboundServiceRequest, PeerId, Request, Response, RpcQuery, Service, ServiceDatagramFn, + ServiceExt, ServiceMessageFn, ServiceQueryFn, Version, }; mod config; diff --git a/network/src/network/connection_manager.rs b/network/src/network/connection_manager.rs index dad4f41bf..e133c7ebd 100644 --- a/network/src/network/connection_manager.rs +++ b/network/src/network/connection_manager.rs @@ -1,4 +1,3 @@ -use std::convert::Infallible; use std::sync::atomic::{AtomicUsize, Ordering}; use std::sync::{Arc, Weak}; use std::time::{Duration, Instant}; @@ -8,7 +7,7 @@ use anyhow::Result; use bytes::Bytes; use tokio::sync::{broadcast, mpsc, oneshot}; use tokio::task::JoinSet; -use tower::util::BoxCloneService; +use tycho_util::{FastDashMap, FastHashMap}; use super::request_handler::InboundRequestHandler; use super::wire::handshake; @@ -16,8 +15,8 @@ use crate::config::Config; use crate::connection::Connection; use crate::endpoint::{Connecting, Endpoint}; use crate::types::{ - Address, Direction, DisconnectReason, FastDashMap, FastHashMap, InboundServiceRequest, - PeerAffinity, PeerEvent, PeerId, PeerInfo, Response, + Address, BoxCloneService, Direction, DisconnectReason, InboundServiceRequest, PeerAffinity, + PeerEvent, PeerId, PeerInfo, Response, }; #[derive(Debug)] @@ -41,7 +40,7 @@ pub struct ConnectionManager { active_peers: ActivePeers, known_peers: KnownPeers, - service: BoxCloneService, Response, Infallible>, + service: BoxCloneService, Response>, } impl Drop for ConnectionManager { @@ -56,7 +55,7 @@ impl ConnectionManager { endpoint: Arc, active_peers: ActivePeers, known_peers: KnownPeers, - service: BoxCloneService, Response, Infallible>, + service: BoxCloneService, Response>, ) -> (Self, mpsc::Sender) { let (mailbox_tx, mailbox) = mpsc::channel(config.connection_manager_channel_capacity); let connection_manager = Self { diff --git a/network/src/network/mod.rs b/network/src/network/mod.rs index 0e7e4c58b..f8e76c46f 100644 --- a/network/src/network/mod.rs +++ b/network/src/network/mod.rs @@ -1,16 +1,17 @@ -use std::convert::Infallible; use std::net::{SocketAddr, ToSocketAddrs}; use std::sync::{Arc, Weak}; -use anyhow::{Context, Result}; +use anyhow::Result; use bytes::Bytes; use rand::Rng; use tokio::sync::{broadcast, mpsc, oneshot}; -use tower::ServiceExt; use crate::config::{Config, EndpointConfig}; use crate::endpoint::Endpoint; -use crate::types::{Address, DisconnectReason, InboundServiceRequest, PeerEvent, PeerId, Response}; +use crate::types::{ + Address, DisconnectReason, InboundServiceRequest, PeerEvent, PeerId, Response, Service, + ServiceExt, +}; pub use self::peer::Peer; @@ -68,12 +69,7 @@ impl NetworkBuilder { pub fn build(self, bind_address: T, service: S) -> Result where S: Clone + Send + 'static, - S: tower::Service< - InboundServiceRequest, - Response = Response, - Error = Infallible, - >, - >>::Future: Send + 'static, + S: Service, QueryResponse = Response>, { use socket2::{Domain, Protocol, Socket, Type}; @@ -290,22 +286,21 @@ struct NetworkShutdownError; #[cfg(test)] mod tests { - use tower::util::BoxCloneService; use tracing_test::traced_test; use super::*; + use crate::types::{service_query_fn, BoxCloneService}; - fn echo_service() -> BoxCloneService, Response, Infallible> - { + fn echo_service() -> BoxCloneService, Response> { let handle = |request: InboundServiceRequest| async move { tracing::trace!("received: {}", request.body.escape_ascii()); let response = Response { version: Default::default(), body: request.body, }; - Ok::<_, Infallible>(response) + Some(response) }; - tower::service_fn(handle).boxed_clone() + service_query_fn(handle).boxed_clone() } #[tokio::test] diff --git a/network/src/network/request_handler.rs b/network/src/network/request_handler.rs index d294ae0e7..1188d47ba 100644 --- a/network/src/network/request_handler.rs +++ b/network/src/network/request_handler.rs @@ -1,4 +1,3 @@ -use std::convert::Infallible; use std::sync::Arc; use anyhow::Result; @@ -6,18 +5,19 @@ use bytes::Bytes; use quinn::RecvStream; use tokio::task::JoinSet; use tokio_util::codec::{FramedRead, FramedWrite, LengthDelimitedCodec}; -use tower::util::{BoxCloneService, ServiceExt}; use super::connection_manager::ActivePeers; use super::wire::{make_codec, recv_request, send_response}; use crate::config::Config; use crate::connection::{Connection, SendStream}; -use crate::types::{DisconnectReason, InboundRequestMeta, InboundServiceRequest, Response}; +use crate::types::{ + BoxCloneService, DisconnectReason, InboundRequestMeta, InboundServiceRequest, Response, Service, +}; pub struct InboundRequestHandler { config: Arc, connection: Connection, - service: BoxCloneService, Response, Infallible>, + service: BoxCloneService, Response>, active_peers: ActivePeers, } @@ -25,7 +25,7 @@ impl InboundRequestHandler { pub fn new( config: Arc, connection: Connection, - service: BoxCloneService, Response, Infallible>, + service: BoxCloneService, Response>, active_peers: ActivePeers, ) -> Self { Self { @@ -100,7 +100,7 @@ impl InboundRequestHandler { struct BiStreamRequestHandler { meta: Arc, - service: BoxCloneService, Response, Infallible>, + service: BoxCloneService, Response>, send_stream: FramedWrite, recv_stream: FramedRead, } @@ -109,7 +109,7 @@ impl BiStreamRequestHandler { fn new( config: &Config, meta: Arc, - service: BoxCloneService, Response, Infallible>, + service: BoxCloneService, Response>, send_stream: SendStream, recv_stream: RecvStream, ) -> Self { @@ -129,22 +129,21 @@ impl BiStreamRequestHandler { async fn do_handle(mut self) -> Result<()> { let req = recv_request(&mut self.recv_stream).await?; - let res = { - let handler = self.service.oneshot(InboundServiceRequest { - metadata: self.meta, - body: req.body, - }); - - let stopped = self.send_stream.get_mut().stopped(); - tokio::select! { - res = handler => res.expect("infallible always succeeds"), - _ = stopped => anyhow::bail!("send_stream closed by remote"), - } - }; - - send_response(&mut self.send_stream, res).await?; - self.send_stream.get_mut().finish().await?; - - Ok(()) + let handler = self.service.on_query(InboundServiceRequest { + metadata: self.meta, + body: req.body, + }); + + let stopped = self.send_stream.get_mut().stopped(); + tokio::select! { + res = handler => { + if let Some(res) = res { + send_response(&mut self.send_stream, res).await?; + } + self.send_stream.get_mut().finish().await?; + Ok(()) + }, + _ = stopped => anyhow::bail!("send_stream closed by remote"), + } } } diff --git a/network/src/types/mod.rs b/network/src/types/mod.rs index 9f8fbac2b..8ac478235 100644 --- a/network/src/types/mod.rs +++ b/network/src/types/mod.rs @@ -1,17 +1,18 @@ -use std::collections::HashMap; use std::net::SocketAddr; use std::sync::Arc; pub use self::address::{Address, AddressList}; pub use self::peer_id::{Direction, PeerId}; pub use self::rpc::RpcQuery; +pub use self::service::{ + service_datagram_fn, service_message_fn, service_query_fn, BoxCloneService, BoxService, + Service, ServiceDatagramFn, ServiceExt, ServiceMessageFn, ServiceQueryFn, +}; mod address; mod peer_id; mod rpc; - -pub type FastDashMap = dashmap::DashMap; -pub type FastHashMap = HashMap; +mod service; #[derive(Default, Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord)] #[repr(u16)] diff --git a/network/src/types/service.rs b/network/src/types/service.rs new file mode 100644 index 000000000..3cd6aace9 --- /dev/null +++ b/network/src/types/service.rs @@ -0,0 +1,430 @@ +use std::future::Future; +use std::marker::PhantomData; + +use futures_util::future::BoxFuture; + +pub trait Service: Send { + type QueryResponse: Send + 'static; + type OnQueryFuture: Future> + Send + 'static; + type OnMessageFuture: Future + Send + 'static; + type OnDatagramFuture: Future + Send + 'static; + + /// Called when a query is received. + /// + /// Returns a future that resolves to the either response to the query if `Some`, + /// or cancellation of the query if `None`. + fn on_query(&mut self, req: Request) -> Self::OnQueryFuture; + + /// Called when a message is received. + fn on_message(&mut self, req: Request) -> Self::OnMessageFuture; + + /// Called when a datagram is received. + fn on_datagram(&mut self, req: Request) -> Self::OnDatagramFuture; +} + +pub trait ServiceExt: Service { + #[inline] + fn boxed(self) -> BoxService + where + Self: Sized + Send + 'static, + Self::OnQueryFuture: Send + 'static, + Self::OnMessageFuture: Send + 'static, + Self::OnDatagramFuture: Send + 'static, + { + BoxService::new(self) + } + + #[inline] + fn boxed_clone(self) -> BoxCloneService + where + Self: Clone + Sized + Send + 'static, + Self::OnQueryFuture: Send + 'static, + Self::OnMessageFuture: Send + 'static, + Self::OnDatagramFuture: Send + 'static, + { + BoxCloneService::new(self) + } +} + +impl ServiceExt for T where T: Service + Send + ?Sized {} + +impl<'a, S, Request> Service for &'a mut S +where + S: Service + 'a, +{ + type QueryResponse = S::QueryResponse; + type OnQueryFuture = S::OnQueryFuture; + type OnMessageFuture = S::OnMessageFuture; + type OnDatagramFuture = S::OnDatagramFuture; + + #[inline] + fn on_query(&mut self, req: Request) -> Self::OnQueryFuture { + >::on_query(*self, req) + } + + #[inline] + fn on_message(&mut self, req: Request) -> Self::OnMessageFuture { + >::on_message(*self, req) + } + + #[inline] + fn on_datagram(&mut self, req: Request) -> Self::OnDatagramFuture { + >::on_datagram(*self, req) + } +} + +impl Service for Box +where + S: Service + ?Sized, +{ + type QueryResponse = S::QueryResponse; + type OnQueryFuture = S::OnQueryFuture; + type OnMessageFuture = S::OnMessageFuture; + type OnDatagramFuture = S::OnDatagramFuture; + + #[inline] + fn on_query(&mut self, req: Request) -> Self::OnQueryFuture { + >::on_query(self.as_mut(), req) + } + + #[inline] + fn on_message(&mut self, req: Request) -> Self::OnMessageFuture { + >::on_message(self.as_mut(), req) + } + + #[inline] + fn on_datagram(&mut self, req: Request) -> Self::OnDatagramFuture { + >::on_datagram(self.as_mut(), req) + } +} + +#[repr(transparent)] +pub struct BoxService { + inner: Box< + dyn Service< + Request, + QueryResponse = Q, + OnQueryFuture = BoxFuture<'static, Option>, + OnMessageFuture = BoxFuture<'static, ()>, + OnDatagramFuture = BoxFuture<'static, ()>, + > + Send, + >, +} + +impl BoxService { + pub fn new(inner: S) -> Self + where + S: Service + Send + 'static, + S::OnQueryFuture: Send + 'static, + S::OnMessageFuture: Send + 'static, + S::OnDatagramFuture: Send + 'static, + { + BoxService { + inner: Box::new(BoxPinFutures(inner)), + } + } +} + +impl Service for BoxService +where + Request: Send + 'static, + Q: Send + 'static, +{ + type QueryResponse = Q; + type OnQueryFuture = BoxFuture<'static, Option>; + type OnMessageFuture = BoxFuture<'static, ()>; + type OnDatagramFuture = BoxFuture<'static, ()>; + + #[inline] + fn on_query(&mut self, req: Request) -> Self::OnQueryFuture { + self.inner.on_query(req) + } + + #[inline] + fn on_message(&mut self, req: Request) -> Self::OnMessageFuture { + self.inner.on_message(req) + } + + #[inline] + fn on_datagram(&mut self, req: Request) -> Self::OnDatagramFuture { + self.inner.on_datagram(req) + } +} + +#[repr(transparent)] +pub struct BoxCloneService { + inner: Box< + dyn CloneService< + Request, + QueryResponse = Q, + OnQueryFuture = BoxFuture<'static, Option>, + OnMessageFuture = BoxFuture<'static, ()>, + OnDatagramFuture = BoxFuture<'static, ()>, + > + Send, + >, +} + +impl BoxCloneService +where + Q: Send + 'static, +{ + pub fn new(inner: S) -> Self + where + S: Service + Clone + Send + 'static, + S::OnQueryFuture: Send + 'static, + S::OnMessageFuture: Send + 'static, + S::OnDatagramFuture: Send + 'static, + { + BoxCloneService { + inner: Box::new(BoxPinFutures(inner)), + } + } +} + +impl Service for BoxCloneService +where + Request: Send + 'static, + Q: Send + 'static, +{ + type QueryResponse = Q; + type OnQueryFuture = BoxFuture<'static, Option>; + type OnMessageFuture = BoxFuture<'static, ()>; + type OnDatagramFuture = BoxFuture<'static, ()>; + + #[inline] + fn on_query(&mut self, req: Request) -> Self::OnQueryFuture { + self.inner.on_query(req) + } + + #[inline] + fn on_message(&mut self, req: Request) -> Self::OnMessageFuture { + self.inner.on_message(req) + } + + #[inline] + fn on_datagram(&mut self, req: Request) -> Self::OnDatagramFuture { + self.inner.on_datagram(req) + } +} + +impl Clone for BoxCloneService +where + Q: Send + 'static, +{ + fn clone(&self) -> Self { + BoxCloneService { + inner: self.inner.clone_box(), + } + } +} + +trait CloneService: Service { + fn clone_box( + &self, + ) -> Box< + dyn CloneService< + Request, + QueryResponse = Self::QueryResponse, + OnQueryFuture = Self::OnQueryFuture, + OnMessageFuture = Self::OnMessageFuture, + OnDatagramFuture = Self::OnDatagramFuture, + > + Send, + >; +} + +impl CloneService for S +where + S: Service + Clone + Send + 'static, + S::OnQueryFuture: Send + 'static, + S::OnMessageFuture: Send + 'static, + S::OnDatagramFuture: Send + 'static, +{ + fn clone_box( + &self, + ) -> Box< + dyn CloneService< + Request, + QueryResponse = Self::QueryResponse, + OnQueryFuture = Self::OnQueryFuture, + OnMessageFuture = Self::OnMessageFuture, + OnDatagramFuture = Self::OnDatagramFuture, + > + Send, + > { + Box::new(self.clone()) + } +} + +#[repr(transparent)] +struct BoxPinFutures(S); + +impl Clone for BoxPinFutures { + #[inline] + fn clone(&self) -> Self { + BoxPinFutures(self.0.clone()) + } +} + +impl Service for BoxPinFutures +where + S: Service, +{ + type QueryResponse = S::QueryResponse; + type OnQueryFuture = BoxFuture<'static, Option>; + type OnMessageFuture = BoxFuture<'static, ()>; + type OnDatagramFuture = BoxFuture<'static, ()>; + + #[inline] + fn on_query(&mut self, req: Request) -> Self::OnQueryFuture { + Box::pin(self.0.on_query(req)) + } + + #[inline] + fn on_message(&mut self, req: Request) -> Self::OnMessageFuture { + Box::pin(self.0.on_message(req)) + } + + #[inline] + fn on_datagram(&mut self, req: Request) -> Self::OnDatagramFuture { + Box::pin(self.0.on_datagram(req)) + } +} + +pub fn service_query_fn(f: T) -> ServiceQueryFn { + ServiceQueryFn { f } +} + +pub struct ServiceQueryFn { + f: T, +} + +impl Clone for ServiceQueryFn { + #[inline] + fn clone(&self) -> Self { + ServiceQueryFn { f: self.f.clone() } + } +} + +impl Service for ServiceQueryFn +where + Q: Send + 'static, + T: FnMut(Request) -> F + Send + 'static, + F: Future> + Send + 'static, +{ + type QueryResponse = Q; + type OnQueryFuture = F; + type OnMessageFuture = futures_util::future::Ready<()>; + type OnDatagramFuture = futures_util::future::Ready<()>; + + #[inline] + fn on_query(&mut self, req: Request) -> Self::OnQueryFuture { + (self.f)(req) + } + + #[inline] + fn on_message(&mut self, _req: Request) -> Self::OnMessageFuture { + futures_util::future::ready(()) + } + + #[inline] + fn on_datagram(&mut self, _req: Request) -> Self::OnDatagramFuture { + futures_util::future::ready(()) + } +} + +pub fn service_message_fn(f: T) -> ServiceMessageFn { + ServiceMessageFn { + f, + _response: PhantomData, + } +} + +impl Clone for ServiceMessageFn { + #[inline] + fn clone(&self) -> Self { + ServiceMessageFn { + f: self.f.clone(), + _response: PhantomData, + } + } +} + +pub struct ServiceMessageFn { + f: T, + _response: PhantomData, +} + +impl Service for ServiceMessageFn +where + Q: Send + 'static, + T: FnMut(Request) -> F + Send + 'static, + F: Future + Send + 'static, +{ + type QueryResponse = Q; + type OnQueryFuture = futures_util::future::Ready>; + type OnMessageFuture = F; + type OnDatagramFuture = futures_util::future::Ready<()>; + + #[inline] + fn on_query(&mut self, _req: Request) -> Self::OnQueryFuture { + futures_util::future::ready(None) + } + + #[inline] + fn on_message(&mut self, req: Request) -> Self::OnMessageFuture { + (self.f)(req) + } + + #[inline] + fn on_datagram(&mut self, _req: Request) -> Self::OnDatagramFuture { + futures_util::future::ready(()) + } +} + +pub fn service_datagram_fn(f: T) -> ServiceDatagramFn { + ServiceDatagramFn { + f, + _response: PhantomData, + } +} + +pub struct ServiceDatagramFn { + f: T, + _response: PhantomData, +} + +impl Clone for ServiceDatagramFn { + #[inline] + fn clone(&self) -> Self { + ServiceDatagramFn { + f: self.f.clone(), + _response: PhantomData, + } + } +} + +impl Service for ServiceDatagramFn +where + Q: Send + 'static, + T: FnMut(Request) -> F + Send + 'static, + F: Future + Send + 'static, +{ + type QueryResponse = Q; + type OnQueryFuture = futures_util::future::Ready>; + type OnMessageFuture = futures_util::future::Ready<()>; + type OnDatagramFuture = F; + + #[inline] + fn on_query(&mut self, _req: Request) -> Self::OnQueryFuture { + futures_util::future::ready(None) + } + + #[inline] + fn on_message(&mut self, _req: Request) -> Self::OnMessageFuture { + futures_util::future::ready(()) + } + + #[inline] + fn on_datagram(&mut self, req: Request) -> Self::OnDatagramFuture { + (self.f)(req) + } +} diff --git a/rust-toolchain b/rust-toolchain deleted file mode 100644 index 283edc6d7..000000000 --- a/rust-toolchain +++ /dev/null @@ -1 +0,0 @@ -1.74.0 \ No newline at end of file diff --git a/util/Cargo.toml b/util/Cargo.toml index 87ed39ac4..8e0b0f498 100644 --- a/util/Cargo.toml +++ b/util/Cargo.toml @@ -6,8 +6,10 @@ description = "Shared utilities for node components." [dependencies] # crates.io deps +ahash = "0.8" +dashmap = "5.4" # local deps [lints] -workspace= true \ No newline at end of file +workspace = true diff --git a/util/src/lib.rs b/util/src/lib.rs index 077885d7b..25dd5fe8f 100644 --- a/util/src/lib.rs +++ b/util/src/lib.rs @@ -1 +1,6 @@ +use std::collections::HashMap; + pub mod time; + +pub type FastDashMap = dashmap::DashMap; +pub type FastHashMap = HashMap; From e516a61fca43e96755e6dc5ce8269d1a7b51274c Mon Sep 17 00:00:00 2001 From: Ivan Kalinin Date: Thu, 8 Feb 2024 22:48:46 +0100 Subject: [PATCH 12/35] network: Add service router --- Cargo.lock | 15 +++ network/Cargo.toml | 4 + network/src/dht/mod.rs | 8 +- network/src/network/mod.rs | 7 +- network/src/types/service.rs | 184 ++++++++++++++++++++--------------- network/src/util/mod.rs | 80 +-------------- network/src/util/router.rs | 106 ++++++++++++++++++++ network/src/util/traits.rs | 62 ++++++++++++ 8 files changed, 304 insertions(+), 162 deletions(-) create mode 100644 network/src/util/router.rs create mode 100644 network/src/util/traits.rs diff --git a/Cargo.lock b/Cargo.lock index 3a58eba4a..a89b3ba60 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -231,6 +231,14 @@ dependencies = [ "serde_json", ] +[[package]] +name = "castaway" +version = "0.2.3" +source = "git+https://github.com/sagebind/castaway.git#564b11fb3394802b895f44fe42a7bba7b17df69b" +dependencies = [ + "rustversion", +] + [[package]] name = "cc" version = "1.0.83" @@ -1289,6 +1297,12 @@ dependencies = [ "untrusted 0.9.0", ] +[[package]] +name = "rustversion" +version = "1.0.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7ffc183a10b4478d04cbbbfc96d0873219d962dd5accaff2ffbd4ceb7df837f4" + [[package]] name = "ryu" version = "1.0.16" @@ -1807,6 +1821,7 @@ dependencies = [ "ahash", "anyhow", "bytes", + "castaway", "dashmap", "ed25519", "everscale-crypto", diff --git a/network/Cargo.toml b/network/Cargo.toml index a35b7890e..08ecc3982 100644 --- a/network/Cargo.toml +++ b/network/Cargo.toml @@ -33,6 +33,10 @@ tokio-util = { version = "0.7", features = ["codec"] } tracing = "0.1" x509-parser = "0.15" +# NOTE: use crates.io dependency when it is released +# https://github.com/sagebind/castaway/issues/18 +castaway = { git = "https://github.com/sagebind/castaway.git" } + # local deps tycho-util = { path = "../util", version = "=0.0.1" } diff --git a/network/src/dht/mod.rs b/network/src/dht/mod.rs index e792d5159..0ade9ee85 100644 --- a/network/src/dht/mod.rs +++ b/network/src/dht/mod.rs @@ -37,25 +37,25 @@ struct DhtInner { network: WeakNetwork, } -impl Service> for Dht { +impl Service> for DhtInner { type QueryResponse = Response; type OnQueryFuture = BoxFuture<'static, Option>; type OnMessageFuture = futures_util::future::Ready<()>; type OnDatagramFuture = futures_util::future::Ready<()>; - fn on_query(&mut self, req: InboundServiceRequest) -> Self::OnQueryFuture { + fn on_query(&self, req: InboundServiceRequest) -> Self::OnQueryFuture { // TODO: parse query and dispatch to appropriate method todo!() } #[inline] - fn on_message(&mut self, req: InboundServiceRequest) -> Self::OnMessageFuture { + fn on_message(&self, req: InboundServiceRequest) -> Self::OnMessageFuture { futures_util::future::ready(()) } #[inline] - fn on_datagram(&mut self, req: InboundServiceRequest) -> Self::OnDatagramFuture { + fn on_datagram(&self, req: InboundServiceRequest) -> Self::OnDatagramFuture { futures_util::future::ready(()) } } diff --git a/network/src/network/mod.rs b/network/src/network/mod.rs index f8e76c46f..5495bdce4 100644 --- a/network/src/network/mod.rs +++ b/network/src/network/mod.rs @@ -68,8 +68,10 @@ impl NetworkBuilder<(T1, ())> { impl NetworkBuilder { pub fn build(self, bind_address: T, service: S) -> Result where - S: Clone + Send + 'static, - S: Service, QueryResponse = Response>, + S: Service, QueryResponse = Response> + + Send + + Clone + + 'static, { use socket2::{Domain, Protocol, Socket, Type}; @@ -147,6 +149,7 @@ impl NetworkBuilder { } #[derive(Clone)] +#[repr(transparent)] pub struct WeakNetwork(Weak); impl WeakNetwork { diff --git a/network/src/types/service.rs b/network/src/types/service.rs index 3cd6aace9..bbe7aefb0 100644 --- a/network/src/types/service.rs +++ b/network/src/types/service.rs @@ -1,9 +1,10 @@ use std::future::Future; use std::marker::PhantomData; +use std::sync::Arc; use futures_util::future::BoxFuture; -pub trait Service: Send { +pub trait Service { type QueryResponse: Send + 'static; type OnQueryFuture: Future> + Send + 'static; type OnMessageFuture: Future + Send + 'static; @@ -13,13 +14,13 @@ pub trait Service: Send { /// /// Returns a future that resolves to the either response to the query if `Some`, /// or cancellation of the query if `None`. - fn on_query(&mut self, req: Request) -> Self::OnQueryFuture; + fn on_query(&self, req: Request) -> Self::OnQueryFuture; /// Called when a message is received. - fn on_message(&mut self, req: Request) -> Self::OnMessageFuture; + fn on_message(&self, req: Request) -> Self::OnMessageFuture; /// Called when a datagram is received. - fn on_datagram(&mut self, req: Request) -> Self::OnDatagramFuture; + fn on_datagram(&self, req: Request) -> Self::OnDatagramFuture; } pub trait ServiceExt: Service { @@ -48,9 +49,9 @@ pub trait ServiceExt: Service { impl ServiceExt for T where T: Service + Send + ?Sized {} -impl<'a, S, Request> Service for &'a mut S +impl<'a, S, Request> Service for &'a S where - S: Service + 'a, + S: Service + Sync + 'a, { type QueryResponse = S::QueryResponse; type OnQueryFuture = S::OnQueryFuture; @@ -58,21 +59,46 @@ where type OnDatagramFuture = S::OnDatagramFuture; #[inline] - fn on_query(&mut self, req: Request) -> Self::OnQueryFuture { + fn on_query(&self, req: Request) -> Self::OnQueryFuture { >::on_query(*self, req) } #[inline] - fn on_message(&mut self, req: Request) -> Self::OnMessageFuture { + fn on_message(&self, req: Request) -> Self::OnMessageFuture { >::on_message(*self, req) } #[inline] - fn on_datagram(&mut self, req: Request) -> Self::OnDatagramFuture { + fn on_datagram(&self, req: Request) -> Self::OnDatagramFuture { >::on_datagram(*self, req) } } +impl Service for Arc +where + S: Service + Sync + ?Sized, +{ + type QueryResponse = S::QueryResponse; + type OnQueryFuture = S::OnQueryFuture; + type OnMessageFuture = S::OnMessageFuture; + type OnDatagramFuture = S::OnDatagramFuture; + + #[inline] + fn on_query(&self, req: Request) -> Self::OnQueryFuture { + >::on_query(self.as_ref(), req) + } + + #[inline] + fn on_message(&self, req: Request) -> Self::OnMessageFuture { + >::on_message(self.as_ref(), req) + } + + #[inline] + fn on_datagram(&self, req: Request) -> Self::OnDatagramFuture { + >::on_datagram(self.as_ref(), req) + } +} + impl Service for Box where S: Service + ?Sized, @@ -83,34 +109,34 @@ where type OnDatagramFuture = S::OnDatagramFuture; #[inline] - fn on_query(&mut self, req: Request) -> Self::OnQueryFuture { - >::on_query(self.as_mut(), req) + fn on_query(&self, req: Request) -> Self::OnQueryFuture { + >::on_query(self.as_ref(), req) } #[inline] - fn on_message(&mut self, req: Request) -> Self::OnMessageFuture { - >::on_message(self.as_mut(), req) + fn on_message(&self, req: Request) -> Self::OnMessageFuture { + >::on_message(self.as_ref(), req) } #[inline] - fn on_datagram(&mut self, req: Request) -> Self::OnDatagramFuture { - >::on_datagram(self.as_mut(), req) + fn on_datagram(&self, req: Request) -> Self::OnDatagramFuture { + >::on_datagram(self.as_ref(), req) } } #[repr(transparent)] pub struct BoxService { - inner: Box< - dyn Service< - Request, - QueryResponse = Q, - OnQueryFuture = BoxFuture<'static, Option>, - OnMessageFuture = BoxFuture<'static, ()>, - OnDatagramFuture = BoxFuture<'static, ()>, - > + Send, - >, + inner: Box>, } +type DynBoxService = dyn Service< + Request, + QueryResponse = Q, + OnQueryFuture = BoxFuture<'static, Option>, + OnMessageFuture = BoxFuture<'static, ()>, + OnDatagramFuture = BoxFuture<'static, ()>, + > + Send; + impl BoxService { pub fn new(inner: S) -> Self where @@ -136,34 +162,34 @@ where type OnDatagramFuture = BoxFuture<'static, ()>; #[inline] - fn on_query(&mut self, req: Request) -> Self::OnQueryFuture { + fn on_query(&self, req: Request) -> Self::OnQueryFuture { self.inner.on_query(req) } #[inline] - fn on_message(&mut self, req: Request) -> Self::OnMessageFuture { + fn on_message(&self, req: Request) -> Self::OnMessageFuture { self.inner.on_message(req) } #[inline] - fn on_datagram(&mut self, req: Request) -> Self::OnDatagramFuture { + fn on_datagram(&self, req: Request) -> Self::OnDatagramFuture { self.inner.on_datagram(req) } } #[repr(transparent)] pub struct BoxCloneService { - inner: Box< - dyn CloneService< - Request, - QueryResponse = Q, - OnQueryFuture = BoxFuture<'static, Option>, - OnMessageFuture = BoxFuture<'static, ()>, - OnDatagramFuture = BoxFuture<'static, ()>, - > + Send, - >, + inner: Box>, } +type DynBoxCloneService = dyn CloneService< + Request, + QueryResponse = Q, + OnQueryFuture = BoxFuture<'static, Option>, + OnMessageFuture = BoxFuture<'static, ()>, + OnDatagramFuture = BoxFuture<'static, ()>, + > + Send; + impl BoxCloneService where Q: Send + 'static, @@ -192,17 +218,17 @@ where type OnDatagramFuture = BoxFuture<'static, ()>; #[inline] - fn on_query(&mut self, req: Request) -> Self::OnQueryFuture { + fn on_query(&self, req: Request) -> Self::OnQueryFuture { self.inner.on_query(req) } #[inline] - fn on_message(&mut self, req: Request) -> Self::OnMessageFuture { + fn on_message(&self, req: Request) -> Self::OnMessageFuture { self.inner.on_message(req) } #[inline] - fn on_datagram(&mut self, req: Request) -> Self::OnDatagramFuture { + fn on_datagram(&self, req: Request) -> Self::OnDatagramFuture { self.inner.on_datagram(req) } } @@ -219,17 +245,7 @@ where } trait CloneService: Service { - fn clone_box( - &self, - ) -> Box< - dyn CloneService< - Request, - QueryResponse = Self::QueryResponse, - OnQueryFuture = Self::OnQueryFuture, - OnMessageFuture = Self::OnMessageFuture, - OnDatagramFuture = Self::OnDatagramFuture, - > + Send, - >; + fn clone_box(&self) -> Box>; } impl CloneService for S @@ -239,21 +255,19 @@ where S::OnMessageFuture: Send + 'static, S::OnDatagramFuture: Send + 'static, { - fn clone_box( - &self, - ) -> Box< - dyn CloneService< - Request, - QueryResponse = Self::QueryResponse, - OnQueryFuture = Self::OnQueryFuture, - OnMessageFuture = Self::OnMessageFuture, - OnDatagramFuture = Self::OnDatagramFuture, - > + Send, - > { + fn clone_box(&self) -> Box> { Box::new(self.clone()) } } +type DynCloneService = dyn CloneService< + Request, + QueryResponse = >::QueryResponse, + OnQueryFuture = >::OnQueryFuture, + OnMessageFuture = >::OnMessageFuture, + OnDatagramFuture = >::OnDatagramFuture, + > + Send; + #[repr(transparent)] struct BoxPinFutures(S); @@ -274,18 +288,30 @@ where type OnDatagramFuture = BoxFuture<'static, ()>; #[inline] - fn on_query(&mut self, req: Request) -> Self::OnQueryFuture { - Box::pin(self.0.on_query(req)) + fn on_query(&self, req: Request) -> Self::OnQueryFuture { + let f = self.0.on_query(req); + match castaway::cast!(f, Self::OnQueryFuture) { + Ok(f) => f, + Err(f) => Box::pin(f), + } } #[inline] - fn on_message(&mut self, req: Request) -> Self::OnMessageFuture { - Box::pin(self.0.on_message(req)) + fn on_message(&self, req: Request) -> Self::OnMessageFuture { + let f = self.0.on_message(req); + match castaway::cast!(f, Self::OnMessageFuture) { + Ok(f) => f, + Err(f) => Box::pin(f), + } } #[inline] - fn on_datagram(&mut self, req: Request) -> Self::OnDatagramFuture { - Box::pin(self.0.on_datagram(req)) + fn on_datagram(&self, req: Request) -> Self::OnDatagramFuture { + let f = self.0.on_datagram(req); + match castaway::cast!(f, Self::OnDatagramFuture) { + Ok(f) => f, + Err(f) => Box::pin(f), + } } } @@ -307,7 +333,7 @@ impl Clone for ServiceQueryFn { impl Service for ServiceQueryFn where Q: Send + 'static, - T: FnMut(Request) -> F + Send + 'static, + T: Fn(Request) -> F + Send + 'static, F: Future> + Send + 'static, { type QueryResponse = Q; @@ -316,17 +342,17 @@ where type OnDatagramFuture = futures_util::future::Ready<()>; #[inline] - fn on_query(&mut self, req: Request) -> Self::OnQueryFuture { + fn on_query(&self, req: Request) -> Self::OnQueryFuture { (self.f)(req) } #[inline] - fn on_message(&mut self, _req: Request) -> Self::OnMessageFuture { + fn on_message(&self, _req: Request) -> Self::OnMessageFuture { futures_util::future::ready(()) } #[inline] - fn on_datagram(&mut self, _req: Request) -> Self::OnDatagramFuture { + fn on_datagram(&self, _req: Request) -> Self::OnDatagramFuture { futures_util::future::ready(()) } } @@ -356,7 +382,7 @@ pub struct ServiceMessageFn { impl Service for ServiceMessageFn where Q: Send + 'static, - T: FnMut(Request) -> F + Send + 'static, + T: Fn(Request) -> F + Send + 'static, F: Future + Send + 'static, { type QueryResponse = Q; @@ -365,17 +391,17 @@ where type OnDatagramFuture = futures_util::future::Ready<()>; #[inline] - fn on_query(&mut self, _req: Request) -> Self::OnQueryFuture { + fn on_query(&self, _req: Request) -> Self::OnQueryFuture { futures_util::future::ready(None) } #[inline] - fn on_message(&mut self, req: Request) -> Self::OnMessageFuture { + fn on_message(&self, req: Request) -> Self::OnMessageFuture { (self.f)(req) } #[inline] - fn on_datagram(&mut self, _req: Request) -> Self::OnDatagramFuture { + fn on_datagram(&self, _req: Request) -> Self::OnDatagramFuture { futures_util::future::ready(()) } } @@ -405,7 +431,7 @@ impl Clone for ServiceDatagramFn { impl Service for ServiceDatagramFn where Q: Send + 'static, - T: FnMut(Request) -> F + Send + 'static, + T: Fn(Request) -> F + Send + 'static, F: Future + Send + 'static, { type QueryResponse = Q; @@ -414,17 +440,17 @@ where type OnDatagramFuture = F; #[inline] - fn on_query(&mut self, _req: Request) -> Self::OnQueryFuture { + fn on_query(&self, _req: Request) -> Self::OnQueryFuture { futures_util::future::ready(None) } #[inline] - fn on_message(&mut self, _req: Request) -> Self::OnMessageFuture { + fn on_message(&self, _req: Request) -> Self::OnMessageFuture { futures_util::future::ready(()) } #[inline] - fn on_datagram(&mut self, req: Request) -> Self::OnDatagramFuture { + fn on_datagram(&self, req: Request) -> Self::OnDatagramFuture { (self.f)(req) } } diff --git a/network/src/util/mod.rs b/network/src/util/mod.rs index 00cbfb26d..ca32fba08 100644 --- a/network/src/util/mod.rs +++ b/network/src/util/mod.rs @@ -1,78 +1,4 @@ -use std::future::Future; -use std::pin::Pin; -use std::task::{Context, Poll}; +pub use self::traits::NetworkExt; -use anyhow::Result; -use bytes::Bytes; -use futures_util::FutureExt; - -use crate::network::Network; -use crate::types::{PeerEvent, PeerId, Request, Response}; - -pub trait NetworkExt { - fn query(&self, peer_id: &PeerId, request: Request) -> Query; -} - -impl NetworkExt for Network { - fn query(&self, peer_id: &PeerId, request: Request) -> Query { - use tokio::sync::broadcast::error::RecvError; - - let network = self.clone(); - let peer_id = *peer_id; - Query(Box::pin(async move { - let mut peer_events = network.subscribe()?; - - // Make query if already connected - if let Some(peer) = network.peer(&peer_id) { - return peer.rpc(request).await; - } - - match network.known_peers().get(&peer_id) { - // Initiate a connection of it is a known peer - Some(peer_info) => { - network - .connect_with_peer_id(peer_info.address, &peer_id) - .await?; - } - // Error otherwise - None => anyhow::bail!("trying to query an unknown peer: {peer_id}"), - } - - loop { - match peer_events.recv().await { - Ok(PeerEvent::NewPeer(peer_id)) if peer_id == peer_id => { - if let Some(peer) = network.peer(&peer_id) { - return peer.rpc(request).await; - } - } - Ok(_) => {} - Err(RecvError::Closed) => anyhow::bail!("network subscription closed"), - Err(RecvError::Lagged(_)) => { - peer_events = peer_events.resubscribe(); - - if let Some(peer) = network.peer(&peer_id) { - return peer.rpc(request).await; - } - } - } - - anyhow::ensure!( - network.known_peers().contains(&peer_id), - "waiting for a connection to an unknown peer: {peer_id}", - ); - } - })) - } -} - -// TODO: replace with RPITIT -pub struct Query(Pin>> + Send + 'static>>); - -impl Future for Query { - type Output = Result>; - - #[inline] - fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { - self.0.poll_unpin(cx) - } -} +mod router; +mod traits; diff --git a/network/src/util/router.rs b/network/src/util/router.rs new file mode 100644 index 000000000..91be5c02b --- /dev/null +++ b/network/src/util/router.rs @@ -0,0 +1,106 @@ +use std::marker::PhantomData; +use std::pin::Pin; +use std::sync::Arc; +use std::task::{Context, Poll}; + +use futures_util::future::BoxFuture; +use futures_util::{Future, FutureExt}; +use tycho_util::FastHashMap; + +use crate::types::{BoxService, Service}; + +pub struct Router { + inner: Arc>, +} + +impl Service for Router +where + Request: Send + AsRef<[u8]> + 'static, + Q: Send + 'static, +{ + type QueryResponse = Q; + type OnQueryFuture = BoxFutureOrNoop>; + type OnMessageFuture = BoxFutureOrNoop<()>; + type OnDatagramFuture = BoxFutureOrNoop<()>; + + fn on_query(&self, req: Request) -> Self::OnQueryFuture { + match find_handler(&req, &self.inner.query_handlers, &self.inner.services) { + Some(service) => BoxFutureOrNoop::Boxed(service.on_query(req)), + None => BoxFutureOrNoop::Noop, + } + } + + fn on_message(&self, req: Request) -> Self::OnMessageFuture { + match find_handler(&req, &self.inner.message_handlers, &self.inner.services) { + Some(service) => BoxFutureOrNoop::Boxed(service.on_message(req)), + None => BoxFutureOrNoop::Noop, + } + } + + fn on_datagram(&self, req: Request) -> Self::OnDatagramFuture { + match find_handler(&req, &self.inner.datagram_handlers, &self.inner.services) { + Some(service) => BoxFutureOrNoop::Boxed(service.on_datagram(req)), + None => BoxFutureOrNoop::Noop, + } + } +} + +fn find_handler<'a, T: AsRef<[u8]>, S>( + req: &T, + indices: &FastHashMap, + handlers: &'a [S], +) -> Option<&'a S> { + if let Some(id) = read_le_u32(req.as_ref()) { + if let Some(&index) = indices.get(&id) { + // NOTE: intentionally panics if index is out of bounds as it is + // an implementation error. + return Some(handlers.get(index).expect("index must be in bounds")); + } + } + None +} + +pub enum BoxFutureOrNoop { + Boxed(BoxFuture<'static, T>), + Noop, +} + +impl Future for BoxFutureOrNoop<()> { + type Output = (); + + fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { + match self.get_mut() { + BoxFutureOrNoop::Boxed(fut) => fut.poll_unpin(cx), + BoxFutureOrNoop::Noop => std::task::Poll::Ready(()), + } + } +} + +impl Future for BoxFutureOrNoop> { + type Output = Option; + + fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { + match self.get_mut() { + BoxFutureOrNoop::Boxed(fut) => fut.poll_unpin(cx), + BoxFutureOrNoop::Noop => std::task::Poll::Ready(None), + } + } +} + +struct Inner { + services: Vec>, + query_handlers: FastHashMap, + message_handlers: FastHashMap, + datagram_handlers: FastHashMap, + _response: PhantomData, +} + +fn read_le_u32(buf: &[u8]) -> Option { + if buf.len() >= 4 { + let mut bytes = [0; 4]; + bytes.copy_from_slice(&buf[..4]); + Some(u32::from_le_bytes(bytes)) + } else { + None + } +} diff --git a/network/src/util/traits.rs b/network/src/util/traits.rs new file mode 100644 index 000000000..c716dd069 --- /dev/null +++ b/network/src/util/traits.rs @@ -0,0 +1,62 @@ +use std::future::Future; + +use anyhow::Result; +use bytes::Bytes; + +use crate::network::Network; +use crate::types::{PeerEvent, PeerId, Request, Response}; + +pub trait NetworkExt { + fn query( + &self, + peer_id: &PeerId, + request: Request, + ) -> impl Future>> + Send; +} + +impl NetworkExt for Network { + async fn query(&self, peer_id: &PeerId, request: Request) -> Result> { + use tokio::sync::broadcast::error::RecvError; + + let mut peer_events = self.subscribe()?; + + // Make query if already connected + if let Some(peer) = self.peer(peer_id) { + return peer.rpc(request).await; + } + + match self.known_peers().get(peer_id) { + // Initiate a connection of it is a known peer + Some(peer_info) => { + self.connect_with_peer_id(peer_info.address, peer_id) + .await?; + } + // Error otherwise + None => anyhow::bail!("trying to query an unknown peer: {peer_id}"), + } + + loop { + match peer_events.recv().await { + Ok(PeerEvent::NewPeer(new_peer_id)) if &new_peer_id == peer_id => { + if let Some(peer) = self.peer(peer_id) { + return peer.rpc(request).await; + } + } + Ok(_) => {} + Err(RecvError::Closed) => anyhow::bail!("network subscription closed"), + Err(RecvError::Lagged(_)) => { + peer_events = peer_events.resubscribe(); + + if let Some(peer) = self.peer(peer_id) { + return peer.rpc(request).await; + } + } + } + + anyhow::ensure!( + self.known_peers().contains(peer_id), + "waiting for a connection to an unknown peer: {peer_id}", + ); + } + } +} From 7b75d6e5039776bdef32efb5a4fa866aabb407e9 Mon Sep 17 00:00:00 2001 From: Ivan Kalinin Date: Fri, 9 Feb 2024 13:59:05 +0100 Subject: [PATCH 13/35] network: Add dht builder --- consensus/src/intercom/dispatcher.rs | 5 +- network/src/dht/mod.rs | 215 ++++++++++++++++++++++++--- network/src/lib.rs | 6 +- network/src/util/futures.rs | 45 ++++++ network/src/util/mod.rs | 3 + network/src/util/router.rs | 114 ++++++++++---- 6 files changed, 329 insertions(+), 59 deletions(-) create mode 100644 network/src/util/futures.rs diff --git a/consensus/src/intercom/dispatcher.rs b/consensus/src/intercom/dispatcher.rs index 775065f47..4c34ea84e 100644 --- a/consensus/src/intercom/dispatcher.rs +++ b/consensus/src/intercom/dispatcher.rs @@ -5,8 +5,9 @@ use anyhow::{anyhow, Result}; use bytes::Bytes; use serde::{Deserialize, Serialize}; -use tycho_network::util::NetworkExt; -use tycho_network::{service_query_fn, Config, InboundServiceRequest, Network, Response, Version}; +use tycho_network::{ + service_query_fn, Config, InboundServiceRequest, Network, NetworkExt, Response, Version, +}; use crate::intercom::responses::*; use crate::models::{Location, Point, PointId, RoundId, Signature}; diff --git a/network/src/dht/mod.rs b/network/src/dht/mod.rs index 0ade9ee85..467807742 100644 --- a/network/src/dht/mod.rs +++ b/network/src/dht/mod.rs @@ -2,21 +2,162 @@ use std::sync::{Arc, Mutex}; use std::time::Instant; use anyhow::Result; -use bytes::Bytes; -use futures_util::future::BoxFuture; +use bytes::{Buf, Bytes}; +use futures_util::Future; -use self::routing::RoutingTable; -use self::storage::Storage; -use crate::network::WeakNetwork; +use self::routing::{RoutingTable, RoutingTableBuilder}; +use self::storage::{Storage, StorageBuilder}; use crate::types::{PeerId, Response, Service}; -use crate::{proto, InboundServiceRequest}; +use crate::util::{BoxFutureOrNoop, Routable}; +use crate::{proto, InboundServiceRequest, Network}; mod routing; mod storage; -pub struct Dht(Arc); +pub struct DhtBuilder { + mandatory_fields: MandatoryFields, + local_id: PeerId, +} + +impl DhtBuilder { + pub fn build(self) -> (DhtClientBuilder, DhtService) { + let (routing_table, storage) = self.mandatory_fields; + + let inner = Arc::new(DhtInner { + local_id: self.local_id, + routing_table: Mutex::new(routing_table), + last_table_refresh: Instant::now(), + storage, + }); + + (DhtClientBuilder(inner.clone()), DhtService(inner)) + } +} + +impl DhtBuilder<(T1, ())> { + pub fn with_storage(self, f: F) -> DhtBuilder<(T1, Storage)> + where + F: FnOnce(StorageBuilder) -> StorageBuilder, + { + let (routing_table, _) = self.mandatory_fields; + let storage = f(Storage::builder()).build(); + DhtBuilder { + mandatory_fields: (routing_table, storage), + local_id: self.local_id, + } + } +} + +impl DhtBuilder<((), T2)> { + pub fn with_routing_table(self, f: F) -> DhtBuilder<(RoutingTable, T2)> + where + F: FnOnce(RoutingTableBuilder) -> RoutingTableBuilder, + { + let routing_table = f(RoutingTable::builder(self.local_id)).build(); + let (_, storage) = self.mandatory_fields; + DhtBuilder { + mandatory_fields: (routing_table, storage), + local_id: self.local_id, + } + } +} + +pub struct DhtClientBuilder(Arc); + +impl DhtClientBuilder { + pub fn build(self, network: Network) -> DhtClient { + // TODO: spawn background tasks here + + DhtClient { + inner: self.0, + network, + } + } +} + +pub struct DhtService(Arc); + +impl DhtService { + pub fn builder(local_id: PeerId) -> DhtBuilder<((), ())> { + DhtBuilder { + mandatory_fields: ((), ()), + local_id, + } + } + + pub fn make_client(&self, network: Network) -> DhtClient { + DhtClient { + inner: self.0.clone(), + network, + } + } +} + +macro_rules! match_req { + ($req:ident, { + $($ty:path as $pat:pat => $expr:expr),*$(,)? + }) => {{ + let e = if $req.body.len() >= 4 { + match $req.body.as_ref().get_u32_le() { + $( + <$ty>::TL_ID => match tl_proto::deserialize::<$ty>(&$req.body) { + Ok($pat) => return ($expr).boxed_or_noop(), + Err(e) => e, + } + )* + _ => tl_proto::TlError::UnknownConstructor, + } + } else { + tl_proto::TlError::UnexpectedEof + }; + tracing::debug!("failed to deserialize request: {e:?}"); + BoxFutureOrNoop::Noop + }}; +} + +impl Service> for DhtService { + type QueryResponse = Response; + type OnQueryFuture = BoxFutureOrNoop>; + type OnMessageFuture = futures_util::future::Ready<()>; + type OnDatagramFuture = futures_util::future::Ready<()>; -impl Dht { + fn on_query(&self, req: InboundServiceRequest) -> Self::OnQueryFuture { + match_req!(req, { + proto::dht::rpc::Store as req => self.0.clone().store(req), + proto::dht::rpc::FindNode as req => self.0.clone().find_node(req), + proto::dht::rpc::FindValue as req => self.0.clone().find_value(req), + proto::dht::rpc::GetNodeInfo as req => self.0.clone().get_node_info(req), + }) + } + + #[inline] + fn on_message(&self, _req: InboundServiceRequest) -> Self::OnMessageFuture { + futures_util::future::ready(()) + } + + #[inline] + fn on_datagram(&self, _req: InboundServiceRequest) -> Self::OnDatagramFuture { + futures_util::future::ready(()) + } +} + +impl Routable for DhtService { + fn query_ids(&self) -> impl IntoIterator { + [ + proto::dht::rpc::Store::TL_ID, + proto::dht::rpc::FindNode::TL_ID, + proto::dht::rpc::FindValue::TL_ID, + proto::dht::rpc::GetNodeInfo::TL_ID, + ] + } +} + +pub struct DhtClient { + inner: Arc, + network: Network, +} + +impl DhtClient { pub async fn find_peers(&self, key: &PeerId) -> Result> { todo!() } @@ -34,28 +175,58 @@ struct DhtInner { routing_table: Mutex, last_table_refresh: Instant, storage: Storage, - network: WeakNetwork, } -impl Service> for DhtInner { - type QueryResponse = Response; - type OnQueryFuture = BoxFuture<'static, Option>; - type OnMessageFuture = futures_util::future::Ready<()>; - type OnDatagramFuture = futures_util::future::Ready<()>; +impl DhtInner { + async fn store(self: Arc, req: proto::dht::rpc::Store) -> Result { + todo!() + } - fn on_query(&self, req: InboundServiceRequest) -> Self::OnQueryFuture { - // TODO: parse query and dispatch to appropriate method + async fn find_node( + self: Arc, + req: proto::dht::rpc::FindNode, + ) -> Result { + todo!() + } + async fn find_value( + self: Arc, + req: proto::dht::rpc::FindValue, + ) -> Result { todo!() } - #[inline] - fn on_message(&self, req: InboundServiceRequest) -> Self::OnMessageFuture { - futures_util::future::ready(()) + async fn get_node_info( + self: Arc, + req: proto::dht::rpc::GetNodeInfo, + ) -> Result { + todo!() } +} - #[inline] - fn on_datagram(&self, req: InboundServiceRequest) -> Self::OnDatagramFuture { - futures_util::future::ready(()) +trait HandlerFuture { + fn boxed_or_noop(self) -> BoxFutureOrNoop; +} + +impl HandlerFuture>> for F +where + F: Future> + Send + 'static, + T: tl_proto::TlWrite, + E: std::fmt::Debug, +{ + fn boxed_or_noop(self) -> BoxFutureOrNoop>> { + BoxFutureOrNoop::Boxed(Box::pin(async move { + match self.await { + Ok(res) => Some(Response { + version: Default::default(), + body: Bytes::from(tl_proto::serialize(&res)), + }), + Err(e) => { + tracing::debug!("failed to handle request: {e:?}"); + // TODO: return error response here? + None + } + } + })) } } diff --git a/network/src/lib.rs b/network/src/lib.rs index 5eb184862..d60601305 100644 --- a/network/src/lib.rs +++ b/network/src/lib.rs @@ -1,5 +1,6 @@ +pub use self::util::{NetworkExt, Routable, Router, RouterBuilder}; pub use config::{Config, QuicConfig}; -pub use dht::Dht; +pub use dht::{DhtClient, DhtService, DhtBuilder}; pub use network::{Network, NetworkBuilder, Peer, WeakNetwork}; pub use types::{ service_datagram_fn, service_message_fn, service_query_fn, Address, AddressList, @@ -15,8 +16,7 @@ mod dht; mod endpoint; mod network; mod types; - -pub mod util; +mod util; pub mod proto { pub mod dht; diff --git a/network/src/util/futures.rs b/network/src/util/futures.rs new file mode 100644 index 000000000..66fd9c5fa --- /dev/null +++ b/network/src/util/futures.rs @@ -0,0 +1,45 @@ +use std::pin::Pin; +use std::task::{Context, Poll}; + +use futures_util::future::BoxFuture; +use futures_util::{Future, FutureExt}; + +pub enum BoxFutureOrNoop { + Boxed(BoxFuture<'static, T>), + Noop, +} + +impl BoxFutureOrNoop { + #[inline] + pub fn future(f: F) -> Self + where + F: Future + Send + 'static, + { + match castaway::cast!(f, BoxFuture<'static, T>) { + Ok(f) => BoxFutureOrNoop::Boxed(f), + Err(f) => BoxFutureOrNoop::Boxed(f.boxed()), + } + } +} + +impl Future for BoxFutureOrNoop<()> { + type Output = (); + + fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { + match self.get_mut() { + BoxFutureOrNoop::Boxed(fut) => fut.poll_unpin(cx), + BoxFutureOrNoop::Noop => std::task::Poll::Ready(()), + } + } +} + +impl Future for BoxFutureOrNoop> { + type Output = Option; + + fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { + match self.get_mut() { + BoxFutureOrNoop::Boxed(fut) => fut.poll_unpin(cx), + BoxFutureOrNoop::Noop => std::task::Poll::Ready(None), + } + } +} diff --git a/network/src/util/mod.rs b/network/src/util/mod.rs index ca32fba08..7871938d7 100644 --- a/network/src/util/mod.rs +++ b/network/src/util/mod.rs @@ -1,4 +1,7 @@ +pub use self::router::{Routable, Router, RouterBuilder}; pub use self::traits::NetworkExt; +pub use self::futures::BoxFutureOrNoop; mod router; mod traits; +mod futures; diff --git a/network/src/util/router.rs b/network/src/util/router.rs index 91be5c02b..f83a4bd97 100644 --- a/network/src/util/router.rs +++ b/network/src/util/router.rs @@ -1,18 +1,95 @@ use std::marker::PhantomData; -use std::pin::Pin; use std::sync::Arc; -use std::task::{Context, Poll}; -use futures_util::future::BoxFuture; -use futures_util::{Future, FutureExt}; use tycho_util::FastHashMap; -use crate::types::{BoxService, Service}; +use crate::types::{BoxService, Service, ServiceExt}; +use crate::util::BoxFutureOrNoop; + +pub trait Routable { + #[inline] + fn query_ids(&self) -> impl IntoIterator { + std::iter::empty() + } + + #[inline] + fn message_ids(&self) -> impl IntoIterator { + std::iter::empty() + } + + #[inline] + fn datagram_ids(&self) -> impl IntoIterator { + std::iter::empty() + } +} + +pub struct RouterBuilder { + inner: Inner, +} + +impl RouterBuilder { + pub fn route(mut self, service: S) -> Self + where + S: Service + Routable + Send + 'static, + { + let index = self.inner.services.len(); + for id in service.query_ids() { + let prev = self.inner.query_handlers.insert(id, index); + assert!(prev.is_none(), "duplicate query id: {:08x}", id); + } + for id in service.message_ids() { + let prev = self.inner.message_handlers.insert(id, index); + assert!(prev.is_none(), "duplicate message id: {:08x}", id); + } + for id in service.datagram_ids() { + let prev = self.inner.datagram_handlers.insert(id, index); + assert!(prev.is_none(), "duplicate datagram id: {:08x}", id); + } + + self.inner.services.push(service.boxed()); + self + } + + pub fn build(self) -> Router { + Router { + inner: Arc::new(self.inner), + } + } +} + +impl Default for RouterBuilder { + fn default() -> Self { + Self { + inner: Inner { + services: Vec::new(), + query_handlers: FastHashMap::default(), + message_handlers: FastHashMap::default(), + datagram_handlers: FastHashMap::default(), + _response: PhantomData, + }, + } + } +} pub struct Router { inner: Arc>, } +impl Router { + pub fn builder() -> RouterBuilder { + RouterBuilder::default() + } +} + +impl Clone for Router { + #[inline] + fn clone(&self) -> Self { + Self { + inner: self.inner.clone(), + } + } +} + impl Service for Router where Request: Send + AsRef<[u8]> + 'static, @@ -60,33 +137,6 @@ fn find_handler<'a, T: AsRef<[u8]>, S>( None } -pub enum BoxFutureOrNoop { - Boxed(BoxFuture<'static, T>), - Noop, -} - -impl Future for BoxFutureOrNoop<()> { - type Output = (); - - fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { - match self.get_mut() { - BoxFutureOrNoop::Boxed(fut) => fut.poll_unpin(cx), - BoxFutureOrNoop::Noop => std::task::Poll::Ready(()), - } - } -} - -impl Future for BoxFutureOrNoop> { - type Output = Option; - - fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { - match self.get_mut() { - BoxFutureOrNoop::Boxed(fut) => fut.poll_unpin(cx), - BoxFutureOrNoop::Noop => std::task::Poll::Ready(None), - } - } -} - struct Inner { services: Vec>, query_handlers: FastHashMap, From 909f35f3cb494d69545aa0459b4b7a12c49a399b Mon Sep 17 00:00:00 2001 From: Ivan Kalinin Date: Fri, 9 Feb 2024 16:52:18 +0100 Subject: [PATCH 14/35] network: Impl dht service --- network/src/dht/mod.rs | 68 +++++++++++++++++++++++++++-------- network/src/dht/routing.rs | 73 ++++++++++++++++++++++++++------------ network/src/dht/storage.rs | 5 +++ network/src/network/mod.rs | 13 +++++++ 4 files changed, 122 insertions(+), 37 deletions(-) diff --git a/network/src/dht/mod.rs b/network/src/dht/mod.rs index 467807742..4d137c5db 100644 --- a/network/src/dht/mod.rs +++ b/network/src/dht/mod.rs @@ -4,6 +4,7 @@ use std::time::Instant; use anyhow::Result; use bytes::{Buf, Bytes}; use futures_util::Future; +use tl_proto::TlWrite; use self::routing::{RoutingTable, RoutingTableBuilder}; use self::storage::{Storage, StorageBuilder}; @@ -28,6 +29,7 @@ impl DhtBuilder { routing_table: Mutex::new(routing_table), last_table_refresh: Instant::now(), storage, + node_info: Mutex::new(None), }); (DhtClientBuilder(inner.clone()), DhtService(inner)) @@ -66,7 +68,9 @@ pub struct DhtClientBuilder(Arc); impl DhtClientBuilder { pub fn build(self, network: Network) -> DhtClient { - // TODO: spawn background tasks here + // TODO: spawn background tasks here: + // - refresh routing table + // - update and broadcast node info DhtClient { inner: self.0, @@ -126,7 +130,7 @@ impl Service> for DhtService { proto::dht::rpc::Store as req => self.0.clone().store(req), proto::dht::rpc::FindNode as req => self.0.clone().find_node(req), proto::dht::rpc::FindValue as req => self.0.clone().find_value(req), - proto::dht::rpc::GetNodeInfo as req => self.0.clone().get_node_info(req), + proto::dht::rpc::GetNodeInfo as _ => self.0.clone().get_node_info(), }) } @@ -175,32 +179,50 @@ struct DhtInner { routing_table: Mutex, last_table_refresh: Instant, storage: Storage, + node_info: Mutex>, } impl DhtInner { async fn store(self: Arc, req: proto::dht::rpc::Store) -> Result { - todo!() + self.storage.insert(&req.value)?; + Ok(proto::dht::Stored) } - async fn find_node( - self: Arc, - req: proto::dht::rpc::FindNode, - ) -> Result { - todo!() + async fn find_node(self: Arc, req: proto::dht::rpc::FindNode) -> Result { + let nodes = self + .routing_table + .lock() + .unwrap() + .closest(&req.key, req.k as usize); + + Ok(NodeResponseRaw { nodes }) } async fn find_value( self: Arc, req: proto::dht::rpc::FindValue, - ) -> Result { - todo!() + ) -> Result { + match self.storage.get(&req.key) { + Some(value) => Ok(ValueResponseRaw::Found(value)), + None => { + let nodes = self + .routing_table + .lock() + .unwrap() + .closest(&req.key, req.k as usize); + + Ok(ValueResponseRaw::NotFound( + nodes.into_iter().map(|node| node.into()).collect(), + )) + } + } } - async fn get_node_info( - self: Arc, - req: proto::dht::rpc::GetNodeInfo, - ) -> Result { - todo!() + async fn get_node_info(self: Arc) -> Result { + match self.node_info.lock().unwrap().as_ref() { + Some(info) => Ok(proto::dht::NodeInfoResponse { info: info.clone() }), + None => Err(anyhow::anyhow!("node info not available")), + } } } @@ -230,3 +252,19 @@ where })) } } + +#[derive(Debug, Clone, TlWrite)] +#[tl(boxed, scheme = "proto.tl")] +enum ValueResponseRaw { + #[tl(id = "dht.valueFound")] + Found(Bytes), + #[tl(id = "dht.valueNotFound")] + NotFound(Vec>), +} + +#[derive(Debug, Clone, TlWrite)] +#[tl(boxed, id = "dht.nodesFound", scheme = "proto.tl")] +pub struct NodeResponseRaw { + /// List of nodes closest to the key. + pub nodes: Vec>, +} diff --git a/network/src/dht/routing.rs b/network/src/dht/routing.rs index cce7544e3..f45a23c1b 100644 --- a/network/src/dht/routing.rs +++ b/network/src/dht/routing.rs @@ -1,6 +1,8 @@ use std::collections::{BTreeMap, VecDeque}; +use std::sync::Arc; use std::time::{Duration, Instant}; +use crate::proto::dht; use crate::types::PeerId; pub struct RoutingTableBuilder { @@ -41,8 +43,8 @@ impl RoutingTable { } } - pub fn add(&mut self, key: &PeerId) -> bool { - let distance = distance(&self.local_id, key); + pub fn add(&mut self, node: Arc) -> bool { + let distance = distance(&self.local_id, &node.id); if distance == 0 { return false; } @@ -50,7 +52,7 @@ impl RoutingTable { self.buckets .entry(distance) .or_insert_with(|| Bucket::with_capacity(self.max_k)) - .insert(key, self.max_k, &self.node_timeout) + .insert(node, self.max_k, &self.node_timeout) } pub fn remove(&mut self, key: &PeerId) -> bool { @@ -62,7 +64,7 @@ impl RoutingTable { } } - pub fn closest(&self, key: &PeerId, count: usize) -> Vec { + pub fn closest(&self, key: &[u8; 32], count: usize) -> Vec> { let count = count.min(self.max_k); if count == 0 { return Vec::new(); @@ -70,7 +72,7 @@ impl RoutingTable { // TODO: fill secure and unsecure buckets in parallel let mut result = Vec::with_capacity(count); - let distance = distance(&self.local_id, key); + let distance = distance(&self.local_id, PeerId::wrap(key)); // Search for closest nodes first for i in (distance..=MAX_DISTANCE).chain((0..distance).rev()) { @@ -81,7 +83,7 @@ impl RoutingTable { if let Some(bucket) = self.buckets.get(&i) { for node in bucket.nodes.iter().take(remaining) { - result.push(node.id); + result.push(node.data.clone()); } } } @@ -117,8 +119,12 @@ impl Bucket { } } - fn insert(&mut self, key: &PeerId, max_k: usize, timeout: &Duration) -> bool { - if let Some(index) = self.nodes.iter_mut().position(|node| &node.id == key) { + fn insert(&mut self, node: Arc, max_k: usize, timeout: &Duration) -> bool { + if let Some(index) = self + .nodes + .iter_mut() + .position(|item| item.data.id == node.id) + { self.nodes.remove(index); } else if self.nodes.len() >= max_k { if matches!(self.nodes.front(), Some(node) if node.is_expired(timeout)) { @@ -128,12 +134,12 @@ impl Bucket { } } - self.nodes.push_back(Node::new(key)); + self.nodes.push_back(Node::new(node)); true } fn remove(&mut self, key: &PeerId) -> bool { - if let Some(index) = self.nodes.iter().position(|node| &node.id == key) { + if let Some(index) = self.nodes.iter().position(|node| &node.data.id == key) { self.nodes.remove(index); true } else { @@ -142,7 +148,7 @@ impl Bucket { } fn contains(&self, key: &PeerId) -> bool { - self.nodes.iter().any(|node| &node.id == key) + self.nodes.iter().any(|node| &node.data.id == key) } fn is_empty(&self) -> bool { @@ -151,14 +157,14 @@ impl Bucket { } struct Node { - id: PeerId, + data: Arc, last_updated_at: Instant, } impl Node { - fn new(peer_id: &PeerId) -> Self { + fn new(data: Arc) -> Self { Self { - id: *peer_id, + data, last_updated_at: Instant::now(), } } @@ -187,15 +193,30 @@ const MAX_DISTANCE: usize = 256; mod tests { use std::str::FromStr; + use crate::AddressList; + use super::*; + fn make_node(id: PeerId) -> Arc { + Arc::new(dht::NodeInfo { + id, + address_list: AddressList { + items: Default::default(), + created_at: 0, + expires_at: 0, + }, + created_at: 0, + signature: Default::default(), + }) + } + #[test] fn buckets_are_sets() { let mut table = RoutingTable::builder(PeerId::random()).build(); let peer = PeerId::random(); - assert!(table.add(&peer)); - assert!(table.add(&peer)); // returns true because the node was updated + assert!(table.add(make_node(peer))); + assert!(table.add(make_node(peer))); // returns true because the node was updated assert_eq!(table.len(), 1); } @@ -204,7 +225,7 @@ mod tests { let local_id = PeerId::random(); let mut table = RoutingTable::builder(local_id).build(); - assert!(!table.add(&local_id)); + assert!(!table.add(make_node(local_id))); assert!(table.is_empty()); } @@ -215,9 +236,9 @@ mod tests { let mut bucket = Bucket::with_capacity(k); for _ in 0..k { - assert!(bucket.insert(&PeerId::random(), k, &timeout)); + assert!(bucket.insert(make_node(PeerId::random()), k, &timeout)); } - assert!(!bucket.insert(&PeerId::random(), k, &timeout)); + assert!(!bucket.insert(make_node(PeerId::random()), k, &timeout)); } #[test] @@ -336,7 +357,7 @@ mod tests { let mut table = RoutingTable::builder(local_id).build(); for id in ids { - table.add(&id); + table.add(make_node(id)); } { @@ -368,7 +389,11 @@ mod tests { .collect::, _>>() .unwrap(); - let mut closest = table.closest(&local_id, 20); + let mut closest = table + .closest(local_id.as_bytes(), 20) + .into_iter() + .map(|item| item.id) + .collect::>(); closest.sort(); assert_eq!(closest, expected_closest_ids); } @@ -407,7 +432,11 @@ mod tests { ) .unwrap(); - let mut closest = table.closest(&target, 20); + let mut closest = table + .closest(target.as_bytes(), 20) + .into_iter() + .map(|item| item.id) + .collect::>(); closest.sort(); assert_eq!(closest, expected_closest_ids); } diff --git a/network/src/dht/storage.rs b/network/src/dht/storage.rs index 3a3ecab55..b3249554c 100644 --- a/network/src/dht/storage.rs +++ b/network/src/dht/storage.rs @@ -124,6 +124,11 @@ impl Storage { StorageBuilder::default() } + pub fn get(&self, key: &[u8; 32]) -> Option { + let stored_value = self.cache.get(key)?; + (stored_value.expires_at > now_sec()).then(|| stored_value.data) + } + pub fn insert(&self, value: &proto::dht::Value) -> Result { match value.expires_at().checked_sub(now_sec()) { Some(0) | None => return Err(StorageError::ValueExpired), diff --git a/network/src/network/mod.rs b/network/src/network/mod.rs index 5495bdce4..229053809 100644 --- a/network/src/network/mod.rs +++ b/network/src/network/mod.rs @@ -3,6 +3,7 @@ use std::sync::{Arc, Weak}; use anyhow::Result; use bytes::Bytes; +use everscale_crypto::ed25519; use rand::Rng; use tokio::sync::{broadcast, mpsc, oneshot}; @@ -79,6 +80,8 @@ impl NetworkBuilder { let quic_config = config.quic.clone().unwrap_or_default(); let (service_name, private_key) = self.mandatory_fields; + let keypair = ed25519::KeyPair::from(&ed25519::SecretKey::from_bytes(private_key)); + let endpoint_config = EndpointConfig::builder() .with_service_name(service_name) .with_private_key(private_key) @@ -141,6 +144,7 @@ impl NetworkBuilder { active_peers: weak_active_peers, known_peers, connection_manager_handle, + keypair, } }); @@ -219,6 +223,14 @@ impl Network { pub fn is_closed(&self) -> bool { self.0.is_closed() } + + pub fn sign_tl(&self, data: T) -> [u8; 64] { + self.0.keypair.sign(data) + } + + pub fn sign_raw(&self, data: &[u8]) -> [u8; 64] { + self.0.keypair.sign_raw(data) + } } struct NetworkInner { @@ -227,6 +239,7 @@ struct NetworkInner { active_peers: WeakActivePeers, known_peers: KnownPeers, connection_manager_handle: mpsc::Sender, + keypair: ed25519::KeyPair, } impl NetworkInner { From b09ea2ee6f8d8efa31865d63cd3cd1c032e382a4 Mon Sep 17 00:00:00 2001 From: Ivan Kalinin Date: Fri, 9 Feb 2024 20:50:37 +0100 Subject: [PATCH 15/35] network: Fix service trait bounds --- network/src/dht/mod.rs | 334 ++++++++++++++++++++--------------- network/src/dht/routing.rs | 2 +- network/src/dht/storage.rs | 2 +- network/src/lib.rs | 36 +++- network/src/network/mod.rs | 10 +- network/src/types/mod.rs | 21 +++ network/src/types/peer_id.rs | 7 + network/src/types/service.rs | 23 +-- network/src/util/mod.rs | 28 ++- network/src/util/router.rs | 2 +- 10 files changed, 301 insertions(+), 164 deletions(-) diff --git a/network/src/dht/mod.rs b/network/src/dht/mod.rs index 4d137c5db..792e2b61c 100644 --- a/network/src/dht/mod.rs +++ b/network/src/dht/mod.rs @@ -1,137 +1,183 @@ use std::sync::{Arc, Mutex}; -use std::time::Instant; +use std::time::Duration; use anyhow::Result; use bytes::{Buf, Bytes}; -use futures_util::Future; use tl_proto::TlWrite; +use tycho_util::time::now_sec; -use self::routing::{RoutingTable, RoutingTableBuilder}; -use self::storage::{Storage, StorageBuilder}; +use self::routing::RoutingTable; +use self::storage::{Storage, StorageError}; +use crate::proto::dht; use crate::types::{PeerId, Response, Service}; -use crate::util::{BoxFutureOrNoop, Routable}; -use crate::{proto, InboundServiceRequest, Network}; +use crate::util::Routable; +use crate::{AddressList, InboundServiceRequest, Network, WeakNetwork}; + +pub use self::routing::RoutingTableBuilder; +pub use self::storage::StorageBuilder; mod routing; mod storage; -pub struct DhtBuilder { +pub struct DhtClientBuilder { + inner: Arc, + disable_background_tasks: bool, +} + +impl DhtClientBuilder { + pub fn disable_background_tasks(mut self) -> Self { + self.disable_background_tasks = true; + self + } + + pub fn build(self, network: Network) -> DhtClient { + if !self.disable_background_tasks { + self.inner + .start_background_tasks(Network::downgrade(&network)); + } + + DhtClient { + inner: self.inner, + network, + } + } +} + +#[derive(Clone)] +pub struct DhtClient { + inner: Arc, + network: Network, +} + +impl DhtClient { + pub fn network(&self) -> &Network { + &self.network + } + + pub fn add_peer(&self, peer: Arc) { + self.inner.routing_table.lock().unwrap().add(peer); + } + + pub async fn find_peers(&self, key: &PeerId) -> Result> { + todo!() + } + + pub async fn find_value(&self, key: T) -> Result> + where + T: dht::WithValue, + { + todo!() + } +} + +pub struct DhtServiceBuilder { mandatory_fields: MandatoryFields, local_id: PeerId, } -impl DhtBuilder { +impl DhtServiceBuilder { pub fn build(self) -> (DhtClientBuilder, DhtService) { let (routing_table, storage) = self.mandatory_fields; let inner = Arc::new(DhtInner { local_id: self.local_id, routing_table: Mutex::new(routing_table), - last_table_refresh: Instant::now(), storage, node_info: Mutex::new(None), }); - (DhtClientBuilder(inner.clone()), DhtService(inner)) + let client_builder = DhtClientBuilder { + inner: inner.clone(), + disable_background_tasks: false, + }; + + (client_builder, DhtService(inner)) } } -impl DhtBuilder<(T1, ())> { - pub fn with_storage(self, f: F) -> DhtBuilder<(T1, Storage)> +impl DhtServiceBuilder<(T1, ())> { + pub fn with_storage(self, f: F) -> DhtServiceBuilder<(T1, Storage)> where F: FnOnce(StorageBuilder) -> StorageBuilder, { let (routing_table, _) = self.mandatory_fields; let storage = f(Storage::builder()).build(); - DhtBuilder { + DhtServiceBuilder { mandatory_fields: (routing_table, storage), local_id: self.local_id, } } } -impl DhtBuilder<((), T2)> { - pub fn with_routing_table(self, f: F) -> DhtBuilder<(RoutingTable, T2)> +impl DhtServiceBuilder<((), T2)> { + pub fn with_routing_table(self, f: F) -> DhtServiceBuilder<(RoutingTable, T2)> where F: FnOnce(RoutingTableBuilder) -> RoutingTableBuilder, { let routing_table = f(RoutingTable::builder(self.local_id)).build(); let (_, storage) = self.mandatory_fields; - DhtBuilder { + DhtServiceBuilder { mandatory_fields: (routing_table, storage), local_id: self.local_id, } } } -pub struct DhtClientBuilder(Arc); - -impl DhtClientBuilder { - pub fn build(self, network: Network) -> DhtClient { - // TODO: spawn background tasks here: - // - refresh routing table - // - update and broadcast node info - - DhtClient { - inner: self.0, - network, - } - } -} - +#[derive(Clone)] pub struct DhtService(Arc); impl DhtService { - pub fn builder(local_id: PeerId) -> DhtBuilder<((), ())> { - DhtBuilder { + pub fn builder(local_id: PeerId) -> DhtServiceBuilder<((), ())> { + DhtServiceBuilder { mandatory_fields: ((), ()), local_id, } } - - pub fn make_client(&self, network: Network) -> DhtClient { - DhtClient { - inner: self.0.clone(), - network, - } - } -} - -macro_rules! match_req { - ($req:ident, { - $($ty:path as $pat:pat => $expr:expr),*$(,)? - }) => {{ - let e = if $req.body.len() >= 4 { - match $req.body.as_ref().get_u32_le() { - $( - <$ty>::TL_ID => match tl_proto::deserialize::<$ty>(&$req.body) { - Ok($pat) => return ($expr).boxed_or_noop(), - Err(e) => e, - } - )* - _ => tl_proto::TlError::UnknownConstructor, - } - } else { - tl_proto::TlError::UnexpectedEof - }; - tracing::debug!("failed to deserialize request: {e:?}"); - BoxFutureOrNoop::Noop - }}; } impl Service> for DhtService { type QueryResponse = Response; - type OnQueryFuture = BoxFutureOrNoop>; + type OnQueryFuture = futures_util::future::Ready>; type OnMessageFuture = futures_util::future::Ready<()>; type OnDatagramFuture = futures_util::future::Ready<()>; fn on_query(&self, req: InboundServiceRequest) -> Self::OnQueryFuture { - match_req!(req, { - proto::dht::rpc::Store as req => self.0.clone().store(req), - proto::dht::rpc::FindNode as req => self.0.clone().find_node(req), - proto::dht::rpc::FindValue as req => self.0.clone().find_value(req), - proto::dht::rpc::GetNodeInfo as _ => self.0.clone().get_node_info(), - }) + let response = crate::match_tl_request!(req.body, { + dht::rpc::Store as r => match self.0.handle_store(r) { + Ok(res) => Some(tl_proto::serialize(res)), + Err(e) => { + tracing::debug!( + peer_id = %req.metadata.peer_id, + addr = %req.metadata.remote_address, + "failed to store value: {e:?}" + ); + None + } + }, + dht::rpc::FindNode as r => { + let res = self.0.handle_find_node(r); + Some(tl_proto::serialize(res)) + }, + dht::rpc::FindValue as r => { + let res = self.0.handle_find_value(r); + Some(tl_proto::serialize(res)) + }, + dht::rpc::GetNodeInfo as _ => { + self.0.handle_get_node_info().map(tl_proto::serialize) + }, + }, e => { + tracing::debug!( + peer_id = %req.metadata.peer_id, + addr = %req.metadata.remote_address, + "failed to deserialize request from: {e:?}" + ); + None + }); + + futures_util::future::ready(response.map(|body| Response { + version: Default::default(), + body: Bytes::from(body), + })) } #[inline] @@ -148,62 +194,91 @@ impl Service> for DhtService { impl Routable for DhtService { fn query_ids(&self) -> impl IntoIterator { [ - proto::dht::rpc::Store::TL_ID, - proto::dht::rpc::FindNode::TL_ID, - proto::dht::rpc::FindValue::TL_ID, - proto::dht::rpc::GetNodeInfo::TL_ID, + dht::rpc::Store::TL_ID, + dht::rpc::FindNode::TL_ID, + dht::rpc::FindValue::TL_ID, + dht::rpc::GetNodeInfo::TL_ID, ] } } -pub struct DhtClient { - inner: Arc, - network: Network, -} - -impl DhtClient { - pub async fn find_peers(&self, key: &PeerId) -> Result> { - todo!() - } - - pub async fn find_value(&self, key: T) -> Result> - where - T: proto::dht::WithValue, - { - todo!() - } -} - struct DhtInner { local_id: PeerId, routing_table: Mutex, - last_table_refresh: Instant, storage: Storage, - node_info: Mutex>, + node_info: Mutex>, } impl DhtInner { - async fn store(self: Arc, req: proto::dht::rpc::Store) -> Result { - self.storage.insert(&req.value)?; - Ok(proto::dht::Stored) + fn start_background_tasks(self: &Arc, network: WeakNetwork) { + const INFO_TTL: u32 = 3600; + const INFO_UPDATE_INTERVAL: Duration = Duration::from_secs(60); + const ANNOUNCE_EVERY_N_STEPS: usize = 10; + + let this = Arc::downgrade(self); + tokio::spawn(async move { + tracing::debug!("background DHT loop started"); + let mut interval = tokio::time::interval(INFO_UPDATE_INTERVAL); + let mut step = 0; + loop { + interval.tick().await; + let (Some(this), Some(network)) = (this.upgrade(), network.upgrade()) else { + break; + }; + + this.refresh_local_node_info(&network, INFO_TTL); + + step = (step + 1) % ANNOUNCE_EVERY_N_STEPS; + if step == 0 { + if let Err(e) = this.announce_local_node_info(&network).await { + tracing::error!("failed to announce local DHT node info: {e:?}"); + } + } + } + tracing::debug!("background DHT loop finished"); + }); + } + + fn refresh_local_node_info(&self, network: &Network, ttl: u32) { + let now = now_sec(); + let mut node_info = dht::NodeInfo { + id: self.local_id, + address_list: AddressList { + items: vec![network.local_addr().into()], + created_at: now, + expires_at: now + ttl, + }, + created_at: now, + signature: Bytes::new(), + }; + let signature = network.sign_tl(&node_info); + node_info.signature = signature.to_vec().into(); + + *self.node_info.lock().unwrap() = Some(node_info); + } + + async fn announce_local_node_info(self: &Arc, _network: &Network) -> Result<()> { + // TODO: store node info in the DHT + todo!() + } + + fn handle_store(&self, req: dht::rpc::Store) -> Result { + self.storage.insert(&req.value).map(|_| dht::Stored) } - async fn find_node(self: Arc, req: proto::dht::rpc::FindNode) -> Result { + fn handle_find_node(&self, req: dht::rpc::FindNode) -> NodeResponseRaw { let nodes = self .routing_table .lock() .unwrap() .closest(&req.key, req.k as usize); - Ok(NodeResponseRaw { nodes }) + NodeResponseRaw { nodes } } - async fn find_value( - self: Arc, - req: proto::dht::rpc::FindValue, - ) -> Result { + fn handle_find_value(&self, req: dht::rpc::FindValue) -> ValueResponseRaw { match self.storage.get(&req.key) { - Some(value) => Ok(ValueResponseRaw::Found(value)), + Some(value) => ValueResponseRaw::Found(value), None => { let nodes = self .routing_table @@ -211,60 +286,31 @@ impl DhtInner { .unwrap() .closest(&req.key, req.k as usize); - Ok(ValueResponseRaw::NotFound( - nodes.into_iter().map(|node| node.into()).collect(), - )) + ValueResponseRaw::NotFound(nodes.into_iter().map(|node| node.into()).collect()) } } } - async fn get_node_info(self: Arc) -> Result { - match self.node_info.lock().unwrap().as_ref() { - Some(info) => Ok(proto::dht::NodeInfoResponse { info: info.clone() }), - None => Err(anyhow::anyhow!("node info not available")), - } - } -} - -trait HandlerFuture { - fn boxed_or_noop(self) -> BoxFutureOrNoop; -} - -impl HandlerFuture>> for F -where - F: Future> + Send + 'static, - T: tl_proto::TlWrite, - E: std::fmt::Debug, -{ - fn boxed_or_noop(self) -> BoxFutureOrNoop>> { - BoxFutureOrNoop::Boxed(Box::pin(async move { - match self.await { - Ok(res) => Some(Response { - version: Default::default(), - body: Bytes::from(tl_proto::serialize(&res)), - }), - Err(e) => { - tracing::debug!("failed to handle request: {e:?}"); - // TODO: return error response here? - None - } - } - })) + fn handle_get_node_info(&self) -> Option { + self.node_info + .lock() + .unwrap() + .clone() + .map(|info| dht::NodeInfoResponse { info }) } } -#[derive(Debug, Clone, TlWrite)] +#[derive(TlWrite)] #[tl(boxed, scheme = "proto.tl")] enum ValueResponseRaw { #[tl(id = "dht.valueFound")] Found(Bytes), #[tl(id = "dht.valueNotFound")] - NotFound(Vec>), + NotFound(Vec>), } -#[derive(Debug, Clone, TlWrite)] +#[derive(TlWrite)] #[tl(boxed, id = "dht.nodesFound", scheme = "proto.tl")] -pub struct NodeResponseRaw { - /// List of nodes closest to the key. - pub nodes: Vec>, +struct NodeResponseRaw { + nodes: Vec>, } diff --git a/network/src/dht/routing.rs b/network/src/dht/routing.rs index f45a23c1b..1c1b85227 100644 --- a/network/src/dht/routing.rs +++ b/network/src/dht/routing.rs @@ -12,7 +12,7 @@ pub struct RoutingTableBuilder { } impl RoutingTableBuilder { - pub fn build(self) -> RoutingTable { + pub(crate) fn build(self) -> RoutingTable { RoutingTable { local_id: self.local_id, buckets: BTreeMap::default(), diff --git a/network/src/dht/storage.rs b/network/src/dht/storage.rs index b3249554c..6795cb500 100644 --- a/network/src/dht/storage.rs +++ b/network/src/dht/storage.rs @@ -59,7 +59,7 @@ impl Default for StorageBuilder { } impl StorageBuilder { - pub fn build(self) -> Storage { + pub(crate) fn build(self) -> Storage { fn weigher(_key: &StorageKeyId, value: &StoredValue) -> u32 { std::mem::size_of::() as u32 + std::mem::size_of::() as u32 diff --git a/network/src/lib.rs b/network/src/lib.rs index d60601305..b6f2ae842 100644 --- a/network/src/lib.rs +++ b/network/src/lib.rs @@ -1,6 +1,8 @@ pub use self::util::{NetworkExt, Routable, Router, RouterBuilder}; pub use config::{Config, QuicConfig}; -pub use dht::{DhtClient, DhtService, DhtBuilder}; +pub use dht::{ + DhtClient, DhtClientBuilder, DhtService, DhtServiceBuilder, RoutingTableBuilder, StorageBuilder, +}; pub use network::{Network, NetworkBuilder, Peer, WeakNetwork}; pub use types::{ service_datagram_fn, service_message_fn, service_query_fn, Address, AddressList, @@ -21,3 +23,35 @@ mod util; pub mod proto { pub mod dht; } + +#[doc(hidden)] +pub mod __internal { + pub use tl_proto; +} + +#[cfg(test)] +mod tests { + use std::net::Ipv4Addr; + + use super::*; + + #[tokio::test] + async fn init_works() { + let keypair = everscale_crypto::ed25519::KeyPair::generate(&mut rand::thread_rng()); + + let (dht_client, dht) = DhtService::builder(keypair.public_key.into()) + .with_storage(|builder| builder) + .with_routing_table(|builder| builder) + .build(); + + let router = Router::builder().route(dht).build(); + + let network = Network::builder() + .with_random_private_key() + .with_service_name("test-service") + .build((Ipv4Addr::LOCALHOST, 0), router) + .unwrap(); + + let _dht_client = dht_client.build(network); + } +} diff --git a/network/src/network/mod.rs b/network/src/network/mod.rs index 229053809..146715cac 100644 --- a/network/src/network/mod.rs +++ b/network/src/network/mod.rs @@ -69,10 +69,8 @@ impl NetworkBuilder<(T1, ())> { impl NetworkBuilder { pub fn build(self, bind_address: T, service: S) -> Result where - S: Service, QueryResponse = Response> - + Send - + Clone - + 'static, + S: Send + Sync + Clone + 'static, + S: Service, QueryResponse = Response>, { use socket2::{Domain, Protocol, Socket, Type}; @@ -231,6 +229,10 @@ impl Network { pub fn sign_raw(&self, data: &[u8]) -> [u8; 64] { self.0.keypair.sign_raw(data) } + + pub fn downgrade(this: &Self) -> WeakNetwork { + WeakNetwork(Arc::downgrade(&this.0)) + } } struct NetworkInner { diff --git a/network/src/types/mod.rs b/network/src/types/mod.rs index 8ac478235..c8e0ade7e 100644 --- a/network/src/types/mod.rs +++ b/network/src/types/mod.rs @@ -43,16 +43,37 @@ pub struct Request { pub body: T, } +impl> AsRef<[u8]> for Request { + #[inline] + fn as_ref(&self) -> &[u8] { + self.body.as_ref() + } +} + pub struct Response { pub version: Version, pub body: T, } +impl> AsRef<[u8]> for Response { + #[inline] + fn as_ref(&self) -> &[u8] { + self.body.as_ref() + } +} + pub struct InboundServiceRequest { pub metadata: Arc, pub body: T, } +impl> AsRef<[u8]> for InboundServiceRequest { + #[inline] + fn as_ref(&self) -> &[u8] { + self.body.as_ref() + } +} + #[derive(Debug, Clone)] pub struct InboundRequestMeta { pub peer_id: PeerId, diff --git a/network/src/types/peer_id.rs b/network/src/types/peer_id.rs index c96e46251..289db00b8 100644 --- a/network/src/types/peer_id.rs +++ b/network/src/types/peer_id.rs @@ -65,6 +65,13 @@ impl FromStr for PeerId { } } +impl From for PeerId { + #[inline] + fn from(public_key: ed25519::PublicKey) -> Self { + Self(public_key.to_bytes()) + } +} + impl std::ops::BitXor for PeerId { type Output = PeerId; diff --git a/network/src/types/service.rs b/network/src/types/service.rs index bbe7aefb0..4a7dcb402 100644 --- a/network/src/types/service.rs +++ b/network/src/types/service.rs @@ -27,7 +27,7 @@ pub trait ServiceExt: Service { #[inline] fn boxed(self) -> BoxService where - Self: Sized + Send + 'static, + Self: Sized + Send + Sync + 'static, Self::OnQueryFuture: Send + 'static, Self::OnMessageFuture: Send + 'static, Self::OnDatagramFuture: Send + 'static, @@ -38,7 +38,7 @@ pub trait ServiceExt: Service { #[inline] fn boxed_clone(self) -> BoxCloneService where - Self: Clone + Sized + Send + 'static, + Self: Clone + Sized + Send + Sync + 'static, Self::OnQueryFuture: Send + 'static, Self::OnMessageFuture: Send + 'static, Self::OnDatagramFuture: Send + 'static, @@ -47,7 +47,7 @@ pub trait ServiceExt: Service { } } -impl ServiceExt for T where T: Service + Send + ?Sized {} +impl ServiceExt for T where T: Service + ?Sized {} impl<'a, S, Request> Service for &'a S where @@ -135,12 +135,13 @@ type DynBoxService = dyn Service< OnQueryFuture = BoxFuture<'static, Option>, OnMessageFuture = BoxFuture<'static, ()>, OnDatagramFuture = BoxFuture<'static, ()>, - > + Send; + > + Send + + Sync; impl BoxService { pub fn new(inner: S) -> Self where - S: Service + Send + 'static, + S: Service + Send + Sync + 'static, S::OnQueryFuture: Send + 'static, S::OnMessageFuture: Send + 'static, S::OnDatagramFuture: Send + 'static, @@ -188,7 +189,8 @@ type DynBoxCloneService = dyn CloneService< OnQueryFuture = BoxFuture<'static, Option>, OnMessageFuture = BoxFuture<'static, ()>, OnDatagramFuture = BoxFuture<'static, ()>, - > + Send; + > + Send + + Sync; impl BoxCloneService where @@ -196,7 +198,7 @@ where { pub fn new(inner: S) -> Self where - S: Service + Clone + Send + 'static, + S: Service + Clone + Send + Sync + 'static, S::OnQueryFuture: Send + 'static, S::OnMessageFuture: Send + 'static, S::OnDatagramFuture: Send + 'static, @@ -235,7 +237,7 @@ where impl Clone for BoxCloneService where - Q: Send + 'static, + Q: Send + Sync + 'static, { fn clone(&self) -> Self { BoxCloneService { @@ -250,7 +252,7 @@ trait CloneService: Service { impl CloneService for S where - S: Service + Clone + Send + 'static, + S: Service + Clone + Send + Sync + 'static, S::OnQueryFuture: Send + 'static, S::OnMessageFuture: Send + 'static, S::OnDatagramFuture: Send + 'static, @@ -266,7 +268,8 @@ type DynCloneService = dyn CloneService< OnQueryFuture = >::OnQueryFuture, OnMessageFuture = >::OnMessageFuture, OnDatagramFuture = >::OnDatagramFuture, - > + Send; + > + Send + + Sync; #[repr(transparent)] struct BoxPinFutures(S); diff --git a/network/src/util/mod.rs b/network/src/util/mod.rs index 7871938d7..d7d1fcb1b 100644 --- a/network/src/util/mod.rs +++ b/network/src/util/mod.rs @@ -1,7 +1,31 @@ +pub use self::futures::BoxFutureOrNoop; pub use self::router::{Routable, Router, RouterBuilder}; pub use self::traits::NetworkExt; -pub use self::futures::BoxFutureOrNoop; +mod futures; mod router; mod traits; -mod futures; + +#[macro_export] +macro_rules! match_tl_request { + ($req_body:expr, { + $($ty:path as $pat:pat => $expr:expr),*$(,)? + }, $err:pat => $err_exr:expr) => { + '__match_req: { + let $err = if ($req_body).len() >= 4 { + match ($req_body).as_ref().get_u32_le() { + $( + <$ty>::TL_ID => match $crate::__internal::tl_proto::deserialize::<$ty>(&($req_body)) { + Ok($pat) => break '__match_req ($expr), + Err(e) => e, + } + )* + _ => $crate::__internal::tl_proto::TlError::UnknownConstructor, + } + } else { + $crate::__internal::tl_proto::TlError::UnexpectedEof + }; + $err_exr + } + }; +} diff --git a/network/src/util/router.rs b/network/src/util/router.rs index f83a4bd97..25e1d6722 100644 --- a/network/src/util/router.rs +++ b/network/src/util/router.rs @@ -30,7 +30,7 @@ pub struct RouterBuilder { impl RouterBuilder { pub fn route(mut self, service: S) -> Self where - S: Service + Routable + Send + 'static, + S: Service + Routable + Send + Sync + 'static, { let index = self.inner.services.len(); for id in service.query_ids() { From e01e02a2ed66946eee72c51639872fc50a5f41dd Mon Sep 17 00:00:00 2001 From: Ivan Kalinin Date: Mon, 12 Feb 2024 13:26:18 +0100 Subject: [PATCH 16/35] network: Use unistream for queries with unit response --- network/src/dht/mod.rs | 44 ++++++++++------ network/src/dht/query.rs | 30 +++++++++++ network/src/network/request_handler.rs | 69 ++++++++++++++++++++++++-- network/src/proto.tl | 8 +-- network/src/proto/dht.rs | 9 ---- util/src/lib.rs | 3 ++ 6 files changed, 127 insertions(+), 36 deletions(-) create mode 100644 network/src/dht/query.rs diff --git a/network/src/dht/mod.rs b/network/src/dht/mod.rs index 792e2b61c..79a055e06 100644 --- a/network/src/dht/mod.rs +++ b/network/src/dht/mod.rs @@ -16,6 +16,7 @@ use crate::{AddressList, InboundServiceRequest, Network, WeakNetwork}; pub use self::routing::RoutingTableBuilder; pub use self::storage::StorageBuilder; +mod query; mod routing; mod storage; @@ -143,17 +144,6 @@ impl Service> for DhtService { fn on_query(&self, req: InboundServiceRequest) -> Self::OnQueryFuture { let response = crate::match_tl_request!(req.body, { - dht::rpc::Store as r => match self.0.handle_store(r) { - Ok(res) => Some(tl_proto::serialize(res)), - Err(e) => { - tracing::debug!( - peer_id = %req.metadata.peer_id, - addr = %req.metadata.remote_address, - "failed to store value: {e:?}" - ); - None - } - }, dht::rpc::FindNode as r => { let res = self.0.handle_find_node(r); Some(tl_proto::serialize(res)) @@ -169,7 +159,7 @@ impl Service> for DhtService { tracing::debug!( peer_id = %req.metadata.peer_id, addr = %req.metadata.remote_address, - "failed to deserialize request from: {e:?}" + "failed to deserialize query from: {e:?}" ); None }); @@ -181,7 +171,26 @@ impl Service> for DhtService { } #[inline] - fn on_message(&self, _req: InboundServiceRequest) -> Self::OnMessageFuture { + fn on_message(&self, req: InboundServiceRequest) -> Self::OnMessageFuture { + crate::match_tl_request!(req.body, { + dht::rpc::Store as r => match self.0.handle_store(r) { + Ok(_) => {}, + Err(e) => { + tracing::debug!( + peer_id = %req.metadata.peer_id, + addr = %req.metadata.remote_address, + "failed to store value: {e:?}" + ); + } + } + }, e => { + tracing::debug!( + peer_id = %req.metadata.peer_id, + addr = %req.metadata.remote_address, + "failed to deserialize message from: {e:?}" + ); + }); + futures_util::future::ready(()) } @@ -194,12 +203,15 @@ impl Service> for DhtService { impl Routable for DhtService { fn query_ids(&self) -> impl IntoIterator { [ - dht::rpc::Store::TL_ID, dht::rpc::FindNode::TL_ID, dht::rpc::FindValue::TL_ID, dht::rpc::GetNodeInfo::TL_ID, ] } + + fn message_ids(&self) -> impl IntoIterator { + [dht::rpc::Store::TL_ID] + } } struct DhtInner { @@ -262,8 +274,8 @@ impl DhtInner { todo!() } - fn handle_store(&self, req: dht::rpc::Store) -> Result { - self.storage.insert(&req.value).map(|_| dht::Stored) + fn handle_store(&self, req: dht::rpc::Store) -> Result { + self.storage.insert(&req.value) } fn handle_find_node(&self, req: dht::rpc::FindNode) -> NodeResponseRaw { diff --git a/network/src/dht/query.rs b/network/src/dht/query.rs new file mode 100644 index 000000000..ad43bdd48 --- /dev/null +++ b/network/src/dht/query.rs @@ -0,0 +1,30 @@ +use std::time::Duration; + +use bytes::Bytes; +use tycho_util::FastHashSet; + +use crate::network::Network; +use crate::types::{PeerId, Request}; +use crate::util::NetworkExt; + +pub struct Query { + network: Network, + request_body: Bytes, + visited: FastHashSet, + timeout: Duration, +} + +// impl Query { +// fn visit(&mut self, peer: &PeerId) { +// tokio::time::timeout( +// self.timeout, +// self.network.query( +// peer_id, +// Request { +// version: Default::default(), +// body: self.request_body, +// }, +// ), +// ) +// } +// } diff --git a/network/src/network/request_handler.rs b/network/src/network/request_handler.rs index 1188d47ba..240561578 100644 --- a/network/src/network/request_handler.rs +++ b/network/src/network/request_handler.rs @@ -44,7 +44,16 @@ impl InboundRequestHandler { let reason: quinn::ConnectionError = loop { tokio::select! { uni = self.connection.accept_uni() => match uni { - Ok(stream) => tracing::trace!(id = %stream.id(), "incoming uni stream"), + Ok(stream) => { + tracing::trace!(id = %stream.id(), "incoming uni stream"); + let handler = UniStreamRequestHandler::new( + &self.config, + self.connection.request_meta().clone(), + self.service.clone(), + stream, + ); + inflight_requests.spawn(handler.handle()); + }, Err(e) => { tracing::trace!("failed to accept an incoming uni stream: {e:?}"); break e; @@ -68,7 +77,22 @@ impl InboundRequestHandler { } }, datagram = self.connection.read_datagram() => match datagram { - Ok(datagram) => tracing::trace!(byte_len = datagram.len(), "incoming datagram"), + Ok(datagram) => { + tracing::trace!(byte_len = datagram.len(), "incoming datagram"); + + inflight_requests.spawn({ + let metadata = self.connection.request_meta().clone(); + let service = self.service.clone(); + async move { + service + .on_datagram(InboundServiceRequest { + metadata, + body: datagram, + }) + .await + } + }); + }, Err(e) => { tracing::trace!("failed to read datagram: {e:?}"); break e; @@ -79,9 +103,8 @@ impl InboundRequestHandler { Err(e) => { if e.is_panic() { std::panic::resume_unwind(e.into_panic()); - } else { - tracing::trace!("request handler task cancelled"); } + tracing::trace!("request handler task cancelled"); } } } @@ -98,6 +121,44 @@ impl InboundRequestHandler { } } +struct UniStreamRequestHandler { + meta: Arc, + service: BoxCloneService, Response>, + recv_stream: FramedRead, +} + +impl UniStreamRequestHandler { + fn new( + config: &Config, + meta: Arc, + service: BoxCloneService, Response>, + recv_stream: RecvStream, + ) -> Self { + Self { + meta, + service, + recv_stream: FramedRead::new(recv_stream, make_codec(config)), + } + } + + async fn handle(self) { + if let Err(e) = self.do_handle().await { + tracing::trace!("request handler task failed: {e:?}"); + } + } + + async fn do_handle(mut self) -> Result<()> { + let req = recv_request(&mut self.recv_stream).await?; + self.service + .on_query(InboundServiceRequest { + metadata: self.meta, + body: req.body, + }) + .await; + Ok(()) + } +} + struct BiStreamRequestHandler { meta: Arc, service: BoxCloneService, Response>, diff --git a/network/src/proto.tl b/network/src/proto.tl index 84b67f2ce..73bfa9cd0 100644 --- a/network/src/proto.tl +++ b/network/src/proto.tl @@ -91,12 +91,6 @@ dht.signedValue key:dht.signedKey data:bytes expires_at:int signature:bytes = dh dht.overlayValue key:dht.overlayKey data:bytes expires_at:int = dht.Value; -/** -* A response for the `dht.store` query -*/ -dht.stored = dht.Stored; - - /** * A response for the `dht.findNode` query * @@ -133,7 +127,7 @@ dht.nodeInfoFound info:dht.node = dht.NodeInfoResponse; * * @param value value to store */ -dht.store value:dht.Value = dht.Stored; +dht.store value:dht.Value = True; /** * Searches for k closest nodes * diff --git a/network/src/proto/dht.rs b/network/src/proto/dht.rs index 3e56d6785..ffa4eff42 100644 --- a/network/src/proto/dht.rs +++ b/network/src/proto/dht.rs @@ -149,11 +149,6 @@ impl<'a> TlRead<'a> for Value { } } -/// A response for the [`rpc::Store`] query. -#[derive(Debug, Clone, Copy, TlRead, TlWrite)] -#[tl(boxed, id = "dht.stored", scheme = "proto.tl")] -pub struct Stored; - /// A response for the [`rpc::FindNode`] query. #[derive(Debug, Clone, TlRead, TlWrite)] #[tl(boxed, id = "dht.nodesFound", scheme = "proto.tl")] @@ -198,10 +193,6 @@ pub mod rpc { pub value: Value, } - impl RpcQuery for Store { - type Response = Stored; - } - /// Search for `k` closest nodes. /// /// See [`NodeResponse`]. diff --git a/util/src/lib.rs b/util/src/lib.rs index 25dd5fe8f..71786a6c3 100644 --- a/util/src/lib.rs +++ b/util/src/lib.rs @@ -1,6 +1,9 @@ use std::collections::HashMap; +use std::collections::HashSet; pub mod time; pub type FastDashMap = dashmap::DashMap; +pub type FastDashSet = dashmap::DashSet; pub type FastHashMap = HashMap; +pub type FastHashSet = HashSet; From 8757dcadf438498426803728491668ba8fcc6eec Mon Sep 17 00:00:00 2001 From: Ivan Kalinin Date: Mon, 12 Feb 2024 17:35:47 +0100 Subject: [PATCH 17/35] network: Impl find dht nodes query --- network/src/dht/mod.rs | 86 +++++++++------ network/src/dht/query.rs | 196 +++++++++++++++++++++++++++++++---- network/src/dht/routing.rs | 94 +++++++++-------- network/src/lib.rs | 5 +- network/src/types/mod.rs | 33 ++++++ network/src/types/peer_id.rs | 6 ++ 6 files changed, 324 insertions(+), 96 deletions(-) diff --git a/network/src/dht/mod.rs b/network/src/dht/mod.rs index 79a055e06..33818a10a 100644 --- a/network/src/dht/mod.rs +++ b/network/src/dht/mod.rs @@ -6,14 +6,14 @@ use bytes::{Buf, Bytes}; use tl_proto::TlWrite; use tycho_util::time::now_sec; +use self::query::FindNodesQuery; use self::routing::RoutingTable; use self::storage::{Storage, StorageError}; +use crate::network::{Network, WeakNetwork}; use crate::proto::dht; -use crate::types::{PeerId, Response, Service}; -use crate::util::Routable; -use crate::{AddressList, InboundServiceRequest, Network, WeakNetwork}; +use crate::types::{AddressList, InboundServiceRequest, PeerId, Request, Response, Service}; +use crate::util::{NetworkExt, Routable}; -pub use self::routing::RoutingTableBuilder; pub use self::storage::StorageBuilder; mod query; @@ -56,11 +56,24 @@ impl DhtClient { } pub fn add_peer(&self, peer: Arc) { - self.inner.routing_table.lock().unwrap().add(peer); + self.inner + .routing_table + .lock() + .unwrap() + .add(peer, self.inner.max_k, &self.inner.node_ttl); } - pub async fn find_peers(&self, key: &PeerId) -> Result> { - todo!() + pub async fn find_peers(&self, key: &PeerId) -> Result>> { + let max_k = self.inner.max_k; + let closest_nodes = { + let routing_table = self.inner.routing_table.lock().unwrap(); + routing_table.closest(key.as_bytes(), max_k) + }; + // TODO: deduplicate shared futures + let nodes = FindNodesQuery::new(self.network.clone(), key, closest_nodes, max_k) + .run() + .await; + Ok(nodes.into_values().collect()) } pub async fn find_value(&self, key: T) -> Result> @@ -69,22 +82,34 @@ impl DhtClient { { todo!() } + + pub async fn get_node_info(&self, peer_id: &PeerId) -> Result { + self.network + .query(peer_id, Request::from_tl(dht::rpc::GetNodeInfo)) + .await? + .parse_tl() + .map_err(Into::into) + } } -pub struct DhtServiceBuilder { +pub struct DhtServiceBuilder { mandatory_fields: MandatoryFields, local_id: PeerId, + node_ttl: Duration, + max_k: usize, } impl DhtServiceBuilder { pub fn build(self) -> (DhtClientBuilder, DhtService) { - let (routing_table, storage) = self.mandatory_fields; + let storage = self.mandatory_fields; let inner = Arc::new(DhtInner { local_id: self.local_id, - routing_table: Mutex::new(routing_table), + routing_table: Mutex::new(RoutingTable::new(self.local_id)), storage, node_info: Mutex::new(None), + max_k: self.max_k, + node_ttl: self.node_ttl, }); let client_builder = DhtClientBuilder { @@ -94,32 +119,29 @@ impl DhtServiceBuilder { (client_builder, DhtService(inner)) } -} -impl DhtServiceBuilder<(T1, ())> { - pub fn with_storage(self, f: F) -> DhtServiceBuilder<(T1, Storage)> - where - F: FnOnce(StorageBuilder) -> StorageBuilder, - { - let (routing_table, _) = self.mandatory_fields; - let storage = f(Storage::builder()).build(); - DhtServiceBuilder { - mandatory_fields: (routing_table, storage), - local_id: self.local_id, - } + pub fn with_max_k(mut self, max_k: usize) -> Self { + self.max_k = max_k; + self + } + + pub fn with_node_ttl(mut self, ttl: Duration) -> Self { + self.node_ttl = ttl; + self } } -impl DhtServiceBuilder<((), T2)> { - pub fn with_routing_table(self, f: F) -> DhtServiceBuilder<(RoutingTable, T2)> +impl DhtServiceBuilder<()> { + pub fn with_storage(self, f: F) -> DhtServiceBuilder where - F: FnOnce(RoutingTableBuilder) -> RoutingTableBuilder, + F: FnOnce(StorageBuilder) -> StorageBuilder, { - let routing_table = f(RoutingTable::builder(self.local_id)).build(); - let (_, storage) = self.mandatory_fields; + let storage = f(Storage::builder()).build(); DhtServiceBuilder { - mandatory_fields: (routing_table, storage), + mandatory_fields: storage, local_id: self.local_id, + node_ttl: self.node_ttl, + max_k: self.max_k, } } } @@ -128,10 +150,12 @@ impl DhtServiceBuilder<((), T2)> { pub struct DhtService(Arc); impl DhtService { - pub fn builder(local_id: PeerId) -> DhtServiceBuilder<((), ())> { + pub fn builder(local_id: PeerId) -> DhtServiceBuilder<()> { DhtServiceBuilder { - mandatory_fields: ((), ()), + mandatory_fields: (), local_id, + node_ttl: Duration::from_secs(15 * 60), + max_k: 20, } } } @@ -219,6 +243,8 @@ struct DhtInner { routing_table: Mutex, storage: Storage, node_info: Mutex>, + max_k: usize, + node_ttl: Duration, } impl DhtInner { diff --git a/network/src/dht/query.rs b/network/src/dht/query.rs index ad43bdd48..a11f5b864 100644 --- a/network/src/dht/query.rs +++ b/network/src/dht/query.rs @@ -1,30 +1,188 @@ +use std::collections::hash_map; +use std::sync::Arc; use std::time::Duration; +use ahash::HashMapExt; +use anyhow::Result; use bytes::Bytes; -use tycho_util::FastHashSet; +use futures_util::stream::FuturesUnordered; +use futures_util::StreamExt; +use tycho_util::time::now_sec; +use tycho_util::FastHashMap; +use crate::dht::routing::RoutingTable; use crate::network::Network; +use crate::proto::dht; use crate::types::{PeerId, Request}; use crate::util::NetworkExt; -pub struct Query { +pub struct FindNodesQuery { network: Network, - request_body: Bytes, - visited: FastHashSet, - timeout: Duration, + candidates: RoutingTable, + max_k: usize, } -// impl Query { -// fn visit(&mut self, peer: &PeerId) { -// tokio::time::timeout( -// self.timeout, -// self.network.query( -// peer_id, -// Request { -// version: Default::default(), -// body: self.request_body, -// }, -// ), -// ) -// } -// } +impl FindNodesQuery { + pub fn new( + network: Network, + target: &PeerId, + nodes: Vec>, + max_k: usize, + ) -> Self { + let mut candidates = RoutingTable::new(*target); + for node in nodes { + candidates.add(node, max_k, &Duration::MAX); + } + + Self { + network, + candidates, + max_k, + } + } + + pub async fn run(mut self) -> FastHashMap> { + // Prepare shared request + let request_body = Bytes::from(tl_proto::serialize(dht::rpc::FindNode { + k: self.max_k as u32, + key: self.candidates.local_id().to_bytes(), + })); + + // Prepare request to initial candidates + let mut futures = FuturesUnordered::new(); + self.candidates + .visit_closest(self.candidates.local_id().as_bytes(), self.max_k, |node| { + futures.push(Self::visit( + self.network.clone(), + node.clone(), + request_body.clone(), + )); + }); + + // Process responses and refill futures until all peers are traversed + let mut result = FastHashMap::>::new(); + while let Some((node, res)) = futures.next().await { + match res { + // Refill futures from the nodes response + Some(Ok(nodes)) => { + tracing::debug!(peer_id = %node.id, count = nodes.len(), "received nodes"); + if !self.update_candidates(now_sec(), self.max_k, nodes, &mut result) { + // Do nothing if candidates were not changed + continue; + } + + // Add new nodes from the closest range + self.candidates.visit_closest( + self.candidates.local_id().as_bytes(), + self.max_k, + |node| { + if result.contains_key(&node.id) { + // Skip already visited nodes + return; + } + futures.push(Self::visit( + self.network.clone(), + node.clone(), + request_body.clone(), + )); + }, + ); + } + // Do nothing on error + Some(Err(e)) => { + tracing::warn!(peer_id = %node.id, "failed to query nodes: {e:?}"); + } + // Do nothing on timeout + None => { + tracing::warn!(peer_id = %node.id, "failed to query nodes: timeout"); + } + } + } + + // Done + result + } + + fn update_candidates( + &mut self, + now: u32, + max_k: usize, + nodes: Vec, + result: &mut FastHashMap>, + ) -> bool { + let mut has_new = false; + for node in nodes { + // Skip invalid entries + if !validate_node_info(now, &node) { + continue; + } + + match result.entry(node.id) { + // Insert a new entry + hash_map::Entry::Vacant(entry) => { + let node = entry.insert(Arc::new(node)).clone(); + self.candidates.add(node, max_k, &Duration::MAX); + has_new = true; + } + // Try to replace an old entry + hash_map::Entry::Occupied(mut entry) => { + if entry.get().created_at < node.created_at { + *entry.get_mut() = Arc::new(node); + } + } + } + } + + has_new + } + + async fn visit( + network: Network, + node: Arc, + request_body: Bytes, + ) -> (Arc, Option>) { + let req = network.query( + &node.id, + Request { + version: Default::default(), + body: request_body.clone(), + }, + ); + + let res = match tokio::time::timeout(REQUEST_TIMEOUT, req).await { + Ok(res) => Some(res.and_then(|res| { + tl_proto::deserialize::(&res.body) + .map_err(Into::into) + .map(|res| res.nodes) + })), + Err(_) => None, + }; + + (node, res) + } +} + +fn validate_node_info(now: u32, info: &dht::NodeInfo) -> bool { + const CLOCK_THRESHOLD: u32 = 1; + if info.created_at > now + CLOCK_THRESHOLD + || info.address_list.created_at > now + CLOCK_THRESHOLD + || info.address_list.expires_at >= now + || info.address_list.items.is_empty() + { + return false; + } + + let Some(pubkey) = info.id.as_public_key() else { + return false; + }; + + let Ok::<&[u8; 64], _>(signature) = info.signature.as_ref().try_into() else { + return false; + }; + + pubkey.verify(info, signature) +} + +const REQUEST_TIMEOUT: Duration = Duration::from_millis(500); + +type Nodes = Vec; diff --git a/network/src/dht/routing.rs b/network/src/dht/routing.rs index 1c1b85227..b6b96defd 100644 --- a/network/src/dht/routing.rs +++ b/network/src/dht/routing.rs @@ -5,45 +5,32 @@ use std::time::{Duration, Instant}; use crate::proto::dht; use crate::types::PeerId; -pub struct RoutingTableBuilder { +pub struct RoutingTable { local_id: PeerId, - max_k: usize, - node_timeout: Duration, + buckets: BTreeMap, } -impl RoutingTableBuilder { - pub(crate) fn build(self) -> RoutingTable { - RoutingTable { - local_id: self.local_id, - buckets: BTreeMap::default(), - max_k: self.max_k, - node_timeout: self.node_timeout, +impl RoutingTable { + pub fn new(local_id: PeerId) -> Self { + Self { + local_id, + buckets: Default::default(), } } - pub fn with_node_timeout(mut self, timeout: Duration) -> Self { - self.node_timeout = timeout; - self + pub fn local_id(&self) -> &PeerId { + &self.local_id } -} -pub struct RoutingTable { - local_id: PeerId, - buckets: BTreeMap, - max_k: usize, - node_timeout: Duration, -} + pub fn is_empty(&self) -> bool { + self.buckets.values().all(Bucket::is_empty) + } -impl RoutingTable { - pub fn builder(local_id: PeerId) -> RoutingTableBuilder { - RoutingTableBuilder { - local_id, - max_k: 20, - node_timeout: Duration::from_secs(15 * 60), - } + pub fn len(&self) -> usize { + self.buckets.values().map(|bucket| bucket.nodes.len()).sum() } - pub fn add(&mut self, node: Arc) -> bool { + pub fn add(&mut self, node: Arc, max_k: usize, node_ttl: &Duration) -> bool { let distance = distance(&self.local_id, &node.id); if distance == 0 { return false; @@ -51,8 +38,8 @@ impl RoutingTable { self.buckets .entry(distance) - .or_insert_with(|| Bucket::with_capacity(self.max_k)) - .insert(node, self.max_k, &self.node_timeout) + .or_insert_with(|| Bucket::with_capacity(max_k)) + .insert(node, max_k, &node_ttl) } pub fn remove(&mut self, key: &PeerId) -> bool { @@ -65,7 +52,6 @@ impl RoutingTable { } pub fn closest(&self, key: &[u8; 32], count: usize) -> Vec> { - let count = count.min(self.max_k); if count == 0 { return Vec::new(); } @@ -91,12 +77,32 @@ impl RoutingTable { result } - pub fn is_empty(&self) -> bool { - self.buckets.values().all(Bucket::is_empty) - } + pub fn visit_closest(&self, key: &[u8; 32], count: usize, mut f: F) + where + F: FnMut(&Arc), + { + if count == 0 { + return; + } - pub fn len(&self) -> usize { - self.buckets.values().map(|bucket| bucket.nodes.len()).sum() + let distance = distance(&self.local_id, PeerId::wrap(key)); + + let mut processed = 0; + + // Search for closest nodes first + for i in (distance..=MAX_DISTANCE).chain((0..distance).rev()) { + let remaining = match count.checked_sub(processed) { + None | Some(0) => break, + Some(n) => n, + }; + + if let Some(bucket) = self.buckets.get(&i) { + for node in bucket.nodes.iter().take(remaining) { + f(&node.data); + processed += 1; + } + } + } } pub fn contains(&self, key: &PeerId) -> bool { @@ -197,6 +203,8 @@ mod tests { use super::*; + const MAX_K: usize = 20; + fn make_node(id: PeerId) -> Arc { Arc::new(dht::NodeInfo { id, @@ -212,20 +220,20 @@ mod tests { #[test] fn buckets_are_sets() { - let mut table = RoutingTable::builder(PeerId::random()).build(); + let mut table = RoutingTable::new(PeerId::random()); let peer = PeerId::random(); - assert!(table.add(make_node(peer))); - assert!(table.add(make_node(peer))); // returns true because the node was updated + assert!(table.add(make_node(peer), MAX_K, &Duration::MAX)); + assert!(table.add(make_node(peer), MAX_K, &Duration::MAX)); // returns true because the node was updated assert_eq!(table.len(), 1); } #[test] fn sould_not_add_seld() { let local_id = PeerId::random(); - let mut table = RoutingTable::builder(local_id).build(); + let mut table = RoutingTable::new(local_id); - assert!(!table.add(make_node(local_id))); + assert!(!table.add(make_node(local_id), MAX_K, &Duration::MAX)); assert!(table.is_empty()); } @@ -355,9 +363,9 @@ mod tests { PeerId::from_str("bdbc554024c65b463b0f0a01037b55985190f4fc01c47dc81c19aab4b4b2d9ab") .unwrap(); - let mut table = RoutingTable::builder(local_id).build(); + let mut table = RoutingTable::new(local_id); for id in ids { - table.add(make_node(id)); + table.add(make_node(id), MAX_K, &Duration::MAX); } { diff --git a/network/src/lib.rs b/network/src/lib.rs index b6f2ae842..8cc88c882 100644 --- a/network/src/lib.rs +++ b/network/src/lib.rs @@ -1,8 +1,6 @@ pub use self::util::{NetworkExt, Routable, Router, RouterBuilder}; pub use config::{Config, QuicConfig}; -pub use dht::{ - DhtClient, DhtClientBuilder, DhtService, DhtServiceBuilder, RoutingTableBuilder, StorageBuilder, -}; +pub use dht::{DhtClient, DhtClientBuilder, DhtService, DhtServiceBuilder, StorageBuilder}; pub use network::{Network, NetworkBuilder, Peer, WeakNetwork}; pub use types::{ service_datagram_fn, service_message_fn, service_query_fn, Address, AddressList, @@ -41,7 +39,6 @@ mod tests { let (dht_client, dht) = DhtService::builder(keypair.public_key.into()) .with_storage(|builder| builder) - .with_routing_table(|builder| builder) .build(); let router = Router::builder().route(dht).build(); diff --git a/network/src/types/mod.rs b/network/src/types/mod.rs index c8e0ade7e..db9d8f8fe 100644 --- a/network/src/types/mod.rs +++ b/network/src/types/mod.rs @@ -1,6 +1,8 @@ use std::net::SocketAddr; use std::sync::Arc; +use bytes::Bytes; + pub use self::address::{Address, AddressList}; pub use self::peer_id::{Direction, PeerId}; pub use self::rpc::RpcQuery; @@ -43,6 +45,18 @@ pub struct Request { pub body: T, } +impl Request { + pub fn from_tl(body: T) -> Self + where + T: tl_proto::TlWrite, + { + Self { + version: Default::default(), + body: tl_proto::serialize(body).into(), + } + } +} + impl> AsRef<[u8]> for Request { #[inline] fn as_ref(&self) -> &[u8] { @@ -55,6 +69,25 @@ pub struct Response { pub body: T, } +impl Response { + pub fn from_tl(body: T) -> Self + where + T: tl_proto::TlWrite, + { + Self { + version: Default::default(), + body: tl_proto::serialize(body).into(), + } + } + + pub fn parse_tl(self) -> tl_proto::TlResult + where + for<'a> T: tl_proto::TlRead<'a>, + { + tl_proto::deserialize(self.body.as_ref()) + } +} + impl> AsRef<[u8]> for Response { #[inline] fn as_ref(&self) -> &[u8] { diff --git a/network/src/types/peer_id.rs b/network/src/types/peer_id.rs index 289db00b8..b429122e6 100644 --- a/network/src/types/peer_id.rs +++ b/network/src/types/peer_id.rs @@ -15,10 +15,16 @@ impl PeerId { unsafe { &*(bytes as *const [u8; 32]).cast::() } } + #[inline] pub fn as_bytes(&self) -> &[u8; 32] { &self.0 } + #[inline] + pub fn to_bytes(self) -> [u8; 32] { + self.0 + } + pub fn as_public_key(&self) -> Option { ed25519::PublicKey::from_bytes(self.0) } From f69ed53dbc60b2a34bec21fdff28f242a4b20dff Mon Sep 17 00:00:00 2001 From: Ivan Kalinin Date: Mon, 12 Feb 2024 18:31:56 +0100 Subject: [PATCH 18/35] network: Impl find dht value query --- network/src/dht/mod.rs | 72 ++++++---- network/src/dht/query.rs | 192 +++++++++++++++++++------ network/src/dht/routing.rs | 2 +- network/src/dht/storage.rs | 2 +- network/src/network/request_handler.rs | 2 +- network/src/proto/dht.rs | 18 ++- 6 files changed, 210 insertions(+), 78 deletions(-) diff --git a/network/src/dht/mod.rs b/network/src/dht/mod.rs index 33818a10a..a664dbdb9 100644 --- a/network/src/dht/mod.rs +++ b/network/src/dht/mod.rs @@ -6,7 +6,7 @@ use bytes::{Buf, Bytes}; use tl_proto::TlWrite; use tycho_util::time::now_sec; -use self::query::FindNodesQuery; +use self::query::Query; use self::routing::RoutingTable; use self::storage::{Storage, StorageError}; use crate::network::{Network, WeakNetwork}; @@ -63,32 +63,45 @@ impl DhtClient { .add(peer, self.inner.max_k, &self.inner.node_ttl); } - pub async fn find_peers(&self, key: &PeerId) -> Result>> { + pub async fn get_node_info(&self, peer_id: &PeerId) -> Result { + self.network + .query(peer_id, Request::from_tl(dht::rpc::GetNodeInfo)) + .await? + .parse_tl() + .map_err(Into::into) + } + + pub async fn find_peers(&self, target_id: &[u8; 32]) -> Result>> { let max_k = self.inner.max_k; let closest_nodes = { let routing_table = self.inner.routing_table.lock().unwrap(); - routing_table.closest(key.as_bytes(), max_k) + routing_table.closest(target_id, max_k) }; // TODO: deduplicate shared futures - let nodes = FindNodesQuery::new(self.network.clone(), key, closest_nodes, max_k) - .run() + let nodes = Query::new(self.network.clone(), target_id, closest_nodes, max_k) + .find_peers() .await; Ok(nodes.into_values().collect()) } - pub async fn find_value(&self, key: T) -> Result> + pub async fn find_value(&self, key: &T) -> Option>> where T: dht::WithValue, { - todo!() + let res = self.find_value_impl(&tl_proto::hash(key)).await?; + Some(res.and_then(|value| T::parse_value(value).map_err(Into::into))) } - pub async fn get_node_info(&self, peer_id: &PeerId) -> Result { - self.network - .query(peer_id, Request::from_tl(dht::rpc::GetNodeInfo)) - .await? - .parse_tl() - .map_err(Into::into) + async fn find_value_impl(&self, hash: &[u8; 32]) -> Option>> { + let max_k = self.inner.max_k; + let closest_nodes = { + let routing_table = self.inner.routing_table.lock().unwrap(); + routing_table.closest(hash, max_k) + }; + // TODO: deduplicate shared futures + Query::new(self.network.clone(), hash, closest_nodes, max_k) + .find_value() + .await } } @@ -168,11 +181,11 @@ impl Service> for DhtService { fn on_query(&self, req: InboundServiceRequest) -> Self::OnQueryFuture { let response = crate::match_tl_request!(req.body, { - dht::rpc::FindNode as r => { + dht::rpc::FindNode as ref r => { let res = self.0.handle_find_node(r); Some(tl_proto::serialize(res)) }, - dht::rpc::FindValue as r => { + dht::rpc::FindValue as ref r => { let res = self.0.handle_find_value(r); Some(tl_proto::serialize(res)) }, @@ -197,7 +210,7 @@ impl Service> for DhtService { #[inline] fn on_message(&self, req: InboundServiceRequest) -> Self::OnMessageFuture { crate::match_tl_request!(req.body, { - dht::rpc::Store as r => match self.0.handle_store(r) { + dht::rpc::Store as ref r => match self.0.handle_store(r) { Ok(_) => {}, Err(e) => { tracing::debug!( @@ -300,11 +313,11 @@ impl DhtInner { todo!() } - fn handle_store(&self, req: dht::rpc::Store) -> Result { + fn handle_store(&self, req: &dht::rpc::Store) -> Result { self.storage.insert(&req.value) } - fn handle_find_node(&self, req: dht::rpc::FindNode) -> NodeResponseRaw { + fn handle_find_node(&self, req: &dht::rpc::FindNode) -> NodeResponseRaw { let nodes = self .routing_table .lock() @@ -314,18 +327,17 @@ impl DhtInner { NodeResponseRaw { nodes } } - fn handle_find_value(&self, req: dht::rpc::FindValue) -> ValueResponseRaw { - match self.storage.get(&req.key) { - Some(value) => ValueResponseRaw::Found(value), - None => { - let nodes = self - .routing_table - .lock() - .unwrap() - .closest(&req.key, req.k as usize); - - ValueResponseRaw::NotFound(nodes.into_iter().map(|node| node.into()).collect()) - } + fn handle_find_value(&self, req: &dht::rpc::FindValue) -> ValueResponseRaw { + if let Some(value) = self.storage.get(&req.key) { + ValueResponseRaw::Found(value) + } else { + let nodes = self + .routing_table + .lock() + .unwrap() + .closest(&req.key, req.k as usize); + + ValueResponseRaw::NotFound(nodes) } } diff --git a/network/src/dht/query.rs b/network/src/dht/query.rs index a11f5b864..69a85b40f 100644 --- a/network/src/dht/query.rs +++ b/network/src/dht/query.rs @@ -2,13 +2,13 @@ use std::collections::hash_map; use std::sync::Arc; use std::time::Duration; -use ahash::HashMapExt; +use ahash::{HashMapExt, HashSetExt}; use anyhow::Result; use bytes::Bytes; use futures_util::stream::FuturesUnordered; use futures_util::StreamExt; use tycho_util::time::now_sec; -use tycho_util::FastHashMap; +use tycho_util::{FastHashMap, FastHashSet}; use crate::dht::routing::RoutingTable; use crate::network::Network; @@ -16,21 +16,21 @@ use crate::proto::dht; use crate::types::{PeerId, Request}; use crate::util::NetworkExt; -pub struct FindNodesQuery { +pub struct Query { network: Network, candidates: RoutingTable, max_k: usize, } -impl FindNodesQuery { +impl Query { pub fn new( network: Network, - target: &PeerId, - nodes: Vec>, + target_id: &[u8; 32], + closest_nodes: Vec>, max_k: usize, ) -> Self { - let mut candidates = RoutingTable::new(*target); - for node in nodes { + let mut candidates = RoutingTable::new(PeerId(*target_id)); + for node in closest_nodes { candidates.add(node, max_k, &Duration::MAX); } @@ -41,18 +41,90 @@ impl FindNodesQuery { } } - pub async fn run(mut self) -> FastHashMap> { + fn local_id(&self) -> &[u8; 32] { + self.candidates.local_id().as_bytes() + } + + pub async fn find_value(mut self) -> Option>> { + // Prepare shared request + let request_body = Bytes::from(tl_proto::serialize(dht::rpc::FindValue { + key: *self.local_id(), + k: self.max_k as u32, + })); + + // Prepare request to initial candidates + let mut futures = FuturesUnordered::new(); + self.candidates + .visit_closest(self.local_id(), self.max_k, |node| { + futures.push(Self::visit::( + self.network.clone(), + node.clone(), + request_body.clone(), + )); + }); + + // Process responses and refill futures until the value is found or all peers are traversed + let mut visited = FastHashSet::new(); + while let Some((node, res)) = futures.next().await { + match res { + // Return the value if found + Some(Ok(dht::ValueResponse::Found(value))) => { + if !validate_value(now_sec(), self.local_id(), &value) { + // Ignore invalid values + continue; + } + + return Some(Ok(value)); + } + // Refill futures from the nodes response + Some(Ok(dht::ValueResponse::NotFound(nodes))) => { + tracing::debug!(peer_id = %node.id, count = nodes.len(), "received nodes"); + if !self.update_candidates(now_sec(), self.max_k, nodes, &mut visited) { + // Do nothing if candidates were not changed + continue; + } + + // Add new nodes from the closest range + self.candidates + .visit_closest(self.local_id(), self.max_k, |node| { + if visited.contains(&node.id) { + // Skip already visited nodes + return; + } + futures.push(Self::visit::( + self.network.clone(), + node.clone(), + request_body.clone(), + )); + }); + } + // Do nothing on error + Some(Err(e)) => { + tracing::warn!(peer_id = %node.id, "failed to query nodes: {e:?}"); + } + // Do nothing on timeout + None => { + tracing::warn!(peer_id = %node.id, "failed to query nodes: timeout"); + } + } + } + + // Done + None + } + + pub async fn find_peers(mut self) -> FastHashMap> { // Prepare shared request let request_body = Bytes::from(tl_proto::serialize(dht::rpc::FindNode { + key: *self.local_id(), k: self.max_k as u32, - key: self.candidates.local_id().to_bytes(), })); // Prepare request to initial candidates let mut futures = FuturesUnordered::new(); self.candidates - .visit_closest(self.candidates.local_id().as_bytes(), self.max_k, |node| { - futures.push(Self::visit( + .visit_closest(self.local_id(), self.max_k, |node| { + futures.push(Self::visit::( self.network.clone(), node.clone(), request_body.clone(), @@ -64,29 +136,26 @@ impl FindNodesQuery { while let Some((node, res)) = futures.next().await { match res { // Refill futures from the nodes response - Some(Ok(nodes)) => { + Some(Ok(dht::NodeResponse { nodes })) => { tracing::debug!(peer_id = %node.id, count = nodes.len(), "received nodes"); - if !self.update_candidates(now_sec(), self.max_k, nodes, &mut result) { + if !self.update_candidates_full(now_sec(), self.max_k, nodes, &mut result) { // Do nothing if candidates were not changed continue; } // Add new nodes from the closest range - self.candidates.visit_closest( - self.candidates.local_id().as_bytes(), - self.max_k, - |node| { + self.candidates + .visit_closest(self.local_id(), self.max_k, |node| { if result.contains_key(&node.id) { // Skip already visited nodes return; } - futures.push(Self::visit( + futures.push(Self::visit::( self.network.clone(), node.clone(), request_body.clone(), )); - }, - ); + }); } // Do nothing on error Some(Err(e)) => { @@ -108,7 +177,31 @@ impl FindNodesQuery { now: u32, max_k: usize, nodes: Vec, - result: &mut FastHashMap>, + visited: &mut FastHashSet, + ) -> bool { + let mut has_new = false; + for node in nodes { + // Skip invalid entries + if !validate_node_info(now, &node) { + continue; + } + + // Insert a new entry + if visited.insert(node.id) { + self.candidates.add(Arc::new(node), max_k, &Duration::MAX); + has_new = true; + } + } + + has_new + } + + fn update_candidates_full( + &mut self, + now: u32, + max_k: usize, + nodes: Vec, + visited: &mut FastHashMap>, ) -> bool { let mut has_new = false; for node in nodes { @@ -117,7 +210,7 @@ impl FindNodesQuery { continue; } - match result.entry(node.id) { + match visited.entry(node.id) { // Insert a new entry hash_map::Entry::Vacant(entry) => { let node = entry.insert(Arc::new(node)).clone(); @@ -136,11 +229,14 @@ impl FindNodesQuery { has_new } - async fn visit( + async fn visit( network: Network, node: Arc, request_body: Bytes, - ) -> (Arc, Option>) { + ) -> (Arc, Option>) + where + for<'a> T: tl_proto::TlRead<'a, Repr = tl_proto::Boxed>, + { let req = network.query( &node.id, Request { @@ -150,11 +246,9 @@ impl FindNodesQuery { ); let res = match tokio::time::timeout(REQUEST_TIMEOUT, req).await { - Ok(res) => Some(res.and_then(|res| { - tl_proto::deserialize::(&res.body) - .map_err(Into::into) - .map(|res| res.nodes) - })), + Ok(res) => { + Some(res.and_then(|res| tl_proto::deserialize::(&res.body).map_err(Into::into))) + } Err(_) => None, }; @@ -163,26 +257,36 @@ impl FindNodesQuery { } fn validate_node_info(now: u32, info: &dht::NodeInfo) -> bool { - const CLOCK_THRESHOLD: u32 = 1; - if info.created_at > now + CLOCK_THRESHOLD - || info.address_list.created_at > now + CLOCK_THRESHOLD - || info.address_list.expires_at >= now - || info.address_list.items.is_empty() - { - return false; + info.created_at <= now + CLOCK_THRESHOLD + && info.address_list.created_at <= now + CLOCK_THRESHOLD + && info.address_list.expires_at < now + && !info.address_list.items.is_empty() + && validate_signature(&info.id, &info.signature, info) +} + +fn validate_value(now: u32, key: &[u8; 32], value: &dht::Value) -> bool { + match value { + dht::Value::Signed(value) => { + value.expires_at < now + && key == &tl_proto::hash(&value.key) + && validate_signature(&value.key.peer_id, &value.signature, value) + } + dht::Value::Overlay(value) => value.expires_at < now && key == &tl_proto::hash(&value.key), } +} - let Some(pubkey) = info.id.as_public_key() else { +fn validate_signature(peed_id: &PeerId, signature: &Bytes, data: &T) -> bool +where + T: tl_proto::TlWrite, +{ + let Some(pubkey) = peed_id.as_public_key() else { return false; }; - - let Ok::<&[u8; 64], _>(signature) = info.signature.as_ref().try_into() else { + let Ok::<&[u8; 64], _>(signature) = signature.as_ref().try_into() else { return false; }; - - pubkey.verify(info, signature) + pubkey.verify(data, signature) } const REQUEST_TIMEOUT: Duration = Duration::from_millis(500); - -type Nodes = Vec; +const CLOCK_THRESHOLD: u32 = 1; diff --git a/network/src/dht/routing.rs b/network/src/dht/routing.rs index b6b96defd..d380b05dc 100644 --- a/network/src/dht/routing.rs +++ b/network/src/dht/routing.rs @@ -39,7 +39,7 @@ impl RoutingTable { self.buckets .entry(distance) .or_insert_with(|| Bucket::with_capacity(max_k)) - .insert(node, max_k, &node_ttl) + .insert(node, max_k, node_ttl) } pub fn remove(&mut self, key: &PeerId) -> bool { diff --git a/network/src/dht/storage.rs b/network/src/dht/storage.rs index 6795cb500..e0dc3fc7f 100644 --- a/network/src/dht/storage.rs +++ b/network/src/dht/storage.rs @@ -126,7 +126,7 @@ impl Storage { pub fn get(&self, key: &[u8; 32]) -> Option { let stored_value = self.cache.get(key)?; - (stored_value.expires_at > now_sec()).then(|| stored_value.data) + (stored_value.expires_at > now_sec()).then_some(stored_value.data) } pub fn insert(&self, value: &proto::dht::Value) -> Result { diff --git a/network/src/network/request_handler.rs b/network/src/network/request_handler.rs index 240561578..060c5c06f 100644 --- a/network/src/network/request_handler.rs +++ b/network/src/network/request_handler.rs @@ -89,7 +89,7 @@ impl InboundRequestHandler { metadata, body: datagram, }) - .await + .await; } }); }, diff --git a/network/src/proto/dht.rs b/network/src/proto/dht.rs index ffa4eff42..4a10ce89a 100644 --- a/network/src/proto/dht.rs +++ b/network/src/proto/dht.rs @@ -21,6 +21,8 @@ pub trait WithValue: TlWrite + for<'a> TlRead<'a, Repr = tl_proto::Boxed> { type Value<'a>: TlWrite + TlRead<'a, Repr = tl_proto::Boxed>; + + fn parse_value(value: Box) -> tl_proto::TlResult>; } /// Key for values that can only be updated by the owner. @@ -37,6 +39,13 @@ pub struct SignedKey { impl WithValue for SignedKey { type Value<'a> = SignedValue; + + fn parse_value(value: Box) -> tl_proto::TlResult> { + match *value { + Value::Signed(value) => Ok(value), + Value::Overlay(_) => Err(tl_proto::TlError::UnknownConstructor), + } + } } /// Key for overlay-managed values. @@ -53,6 +62,13 @@ pub struct OverlayKey { impl WithValue for OverlayKey { type Value<'a> = OverlayValue; + + fn parse_value(value: Box) -> tl_proto::TlResult> { + match *value { + Value::Signed(_) => Err(tl_proto::TlError::UnknownConstructor), + Value::Overlay(value) => Ok(value), + } + } } /// Value with a known owner. @@ -163,7 +179,7 @@ pub struct NodeResponse { pub enum ValueResponse { /// An existing value for the specified key. #[tl(id = "dht.valueFound")] - Found(Value), + Found(Box), /// List of nodes closest to the key. #[tl(id = "dht.valueNotFound")] NotFound(Vec), From d17129525e3227ae24ef882cd2ebb726aa91e2bb Mon Sep 17 00:00:00 2001 From: Ivan Kalinin Date: Mon, 12 Feb 2024 19:30:22 +0100 Subject: [PATCH 19/35] network: Update dht nodes in background --- Cargo.lock | 2 + network/src/dht/mod.rs | 116 ++++++++++++++++++++++++++------------- network/src/dht/query.rs | 12 ++-- util/Cargo.toml | 2 + util/src/time.rs | 9 +++ 5 files changed, 97 insertions(+), 44 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index a89b3ba60..bf76e922b 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1861,6 +1861,8 @@ version = "0.0.1" dependencies = [ "ahash", "dashmap", + "rand", + "tokio", ] [[package]] diff --git a/network/src/dht/mod.rs b/network/src/dht/mod.rs index a664dbdb9..1d7e87970 100644 --- a/network/src/dht/mod.rs +++ b/network/src/dht/mod.rs @@ -4,7 +4,7 @@ use std::time::Duration; use anyhow::Result; use bytes::{Buf, Bytes}; use tl_proto::TlWrite; -use tycho_util::time::now_sec; +use tycho_util::time::{now_sec, shifted_interval}; use self::query::Query; use self::routing::RoutingTable; @@ -71,38 +71,16 @@ impl DhtClient { .map_err(Into::into) } - pub async fn find_peers(&self, target_id: &[u8; 32]) -> Result>> { - let max_k = self.inner.max_k; - let closest_nodes = { - let routing_table = self.inner.routing_table.lock().unwrap(); - routing_table.closest(target_id, max_k) - }; - // TODO: deduplicate shared futures - let nodes = Query::new(self.network.clone(), target_id, closest_nodes, max_k) - .find_peers() - .await; - Ok(nodes.into_values().collect()) - } - pub async fn find_value(&self, key: &T) -> Option>> where T: dht::WithValue, { - let res = self.find_value_impl(&tl_proto::hash(key)).await?; + let res = self + .inner + .find_value(&self.network, &tl_proto::hash(key)) + .await?; Some(res.and_then(|value| T::parse_value(value).map_err(Into::into))) } - - async fn find_value_impl(&self, hash: &[u8; 32]) -> Option>> { - let max_k = self.inner.max_k; - let closest_nodes = { - let routing_table = self.inner.routing_table.lock().unwrap(); - routing_table.closest(hash, max_k) - }; - // TODO: deduplicate shared futures - Query::new(self.network.clone(), hash, closest_nodes, max_k) - .find_value() - .await - } } pub struct DhtServiceBuilder { @@ -263,26 +241,53 @@ struct DhtInner { impl DhtInner { fn start_background_tasks(self: &Arc, network: WeakNetwork) { const INFO_TTL: u32 = 3600; - const INFO_UPDATE_INTERVAL: Duration = Duration::from_secs(60); - const ANNOUNCE_EVERY_N_STEPS: usize = 10; + const INFO_UPDATE_PERIOD: Duration = Duration::from_secs(60); + + const ANNOUNCE_PERIOD: Duration = Duration::from_secs(600); + const ANNOUNCE_SHIFT: Duration = Duration::from_secs(60); + const POPULATE_PERIOD: Duration = Duration::from_secs(60); + const POPULATE_SHIFT: Duration = Duration::from_secs(10); + + enum Action { + Refresh, + Announce, + Populate, + } let this = Arc::downgrade(self); tokio::spawn(async move { tracing::debug!("background DHT loop started"); - let mut interval = tokio::time::interval(INFO_UPDATE_INTERVAL); - let mut step = 0; + let mut refresh_interval = tokio::time::interval(INFO_UPDATE_PERIOD); + let mut announce_interval = shifted_interval(ANNOUNCE_PERIOD, ANNOUNCE_SHIFT); + let mut populate_interval = shifted_interval(POPULATE_PERIOD, POPULATE_SHIFT); + loop { - interval.tick().await; + let action = tokio::select! { + _ = refresh_interval.tick() => Action::Refresh, + _ = announce_interval.tick() => Action::Announce, + _ = populate_interval.tick() => Action::Populate, + }; + let (Some(this), Some(network)) = (this.upgrade(), network.upgrade()) else { break; }; - this.refresh_local_node_info(&network, INFO_TTL); - - step = (step + 1) % ANNOUNCE_EVERY_N_STEPS; - if step == 0 { - if let Err(e) = this.announce_local_node_info(&network).await { - tracing::error!("failed to announce local DHT node info: {e:?}"); + match action { + Action::Refresh => { + this.refresh_local_node_info(&network, INFO_TTL); + } + Action::Announce => { + // Always refresh node info before announcing + this.refresh_local_node_info(&network, INFO_TTL); + refresh_interval.reset(); + + if let Err(e) = this.announce_local_node_info(&network).await { + tracing::error!("failed to announce local DHT node info: {e:?}"); + } + } + Action::Populate => { + // TODO: spawn and await in the background? + this.find_more_dht_nodes(&network).await; } } } @@ -313,6 +318,41 @@ impl DhtInner { todo!() } + async fn find_more_dht_nodes(self: &Arc, network: &Network) { + // TODO: deduplicate shared futures + let query = Query::new( + network.clone(), + &self.routing_table.lock().unwrap(), + self.local_id.as_bytes(), + self.max_k, + ); + + // NOTE: expression is intentionally split to drop the routing table guard + let peers = query.find_peers().await; + + let mut routing_table = self.routing_table.lock().unwrap(); + for peer in peers { + routing_table.add(peer, self.max_k, &self.node_ttl); + } + } + + async fn find_value( + &self, + network: &Network, + key_hash: &[u8; 32], + ) -> Option>> { + // TODO: deduplicate shared futures + let query = Query::new( + network.clone(), + &self.routing_table.lock().unwrap(), + key_hash, + self.max_k, + ); + + // NOTE: expression is intentionally split to drop the routing table guard + query.find_value().await + } + fn handle_store(&self, req: &dht::rpc::Store) -> Result { self.storage.insert(&req.value) } diff --git a/network/src/dht/query.rs b/network/src/dht/query.rs index 69a85b40f..804177443 100644 --- a/network/src/dht/query.rs +++ b/network/src/dht/query.rs @@ -25,14 +25,14 @@ pub struct Query { impl Query { pub fn new( network: Network, + routing_table: &RoutingTable, target_id: &[u8; 32], - closest_nodes: Vec>, max_k: usize, ) -> Self { let mut candidates = RoutingTable::new(PeerId(*target_id)); - for node in closest_nodes { - candidates.add(node, max_k, &Duration::MAX); - } + routing_table.visit_closest(target_id, max_k, |node| { + candidates.add(node.clone(), max_k, &Duration::MAX); + }); Self { network, @@ -113,7 +113,7 @@ impl Query { None } - pub async fn find_peers(mut self) -> FastHashMap> { + pub async fn find_peers(mut self) -> impl Iterator> { // Prepare shared request let request_body = Bytes::from(tl_proto::serialize(dht::rpc::FindNode { key: *self.local_id(), @@ -169,7 +169,7 @@ impl Query { } // Done - result + result.into_values() } fn update_candidates( diff --git a/util/Cargo.toml b/util/Cargo.toml index 8e0b0f498..8e8bc5b84 100644 --- a/util/Cargo.toml +++ b/util/Cargo.toml @@ -8,6 +8,8 @@ description = "Shared utilities for node components." # crates.io deps ahash = "0.8" dashmap = "5.4" +rand = "0.8" +tokio = { version = "1", default-features = false, features = ["time"] } # local deps diff --git a/util/src/time.rs b/util/src/time.rs index ec8812309..985712a76 100644 --- a/util/src/time.rs +++ b/util/src/time.rs @@ -1,6 +1,15 @@ +use std::time::Duration; + +use rand::Rng; + pub fn now_sec() -> u32 { std::time::SystemTime::now() .duration_since(std::time::UNIX_EPOCH) .unwrap() .as_secs() as u32 } + +pub fn shifted_interval(period: Duration, max_shift: Duration) -> tokio::time::Interval { + let shift = rand::thread_rng().gen_range(Duration::ZERO..max_shift); + tokio::time::interval_at(tokio::time::Instant::now() + shift, period + shift) +} From a25700f75ec9bd4e3348ded0fa6af2a16864ce8e Mon Sep 17 00:00:00 2001 From: Ivan Kalinin Date: Tue, 13 Feb 2024 14:46:08 +0100 Subject: [PATCH 20/35] network: Impl dht value store --- network/src/connection.rs | 22 +++++--- network/src/dht/mod.rs | 21 ++++++- network/src/dht/query.rs | 93 +++++++++++++++++++++++++++++- network/src/network/peer.rs | 15 +++++ network/src/proto/dht.rs | 2 +- network/src/util/traits.rs | 110 +++++++++++++++++++++++++----------- 6 files changed, 217 insertions(+), 46 deletions(-) diff --git a/network/src/connection.rs b/network/src/connection.rs index 583eb7191..a40b1379c 100644 --- a/network/src/connection.rs +++ b/network/src/connection.rs @@ -5,7 +5,7 @@ use std::task::{Context, Poll}; use anyhow::{Context as _, Result}; use bytes::Bytes; -use quinn::{ConnectionError, RecvStream}; +use quinn::{ConnectionError, RecvStream, SendDatagramError}; use crate::types::{Direction, InboundRequestMeta, PeerId}; @@ -52,10 +52,6 @@ impl Connection { self.inner.close(0u8.into(), b"connection closed"); } - pub async fn open_uni(&self) -> Result { - self.inner.open_uni().await.map(SendStream) - } - pub async fn open_bi(&self) -> Result<(SendStream, RecvStream), ConnectionError> { self.inner .open_bi() @@ -63,10 +59,6 @@ impl Connection { .map(|(send, recv)| (SendStream(send), recv)) } - pub async fn accept_uni(&self) -> Result { - self.inner.accept_uni().await - } - pub async fn accept_bi(&self) -> Result<(SendStream, RecvStream), ConnectionError> { self.inner .accept_bi() @@ -74,6 +66,18 @@ impl Connection { .map(|(send, recv)| (SendStream(send), recv)) } + pub async fn open_uni(&self) -> Result { + self.inner.open_uni().await.map(SendStream) + } + + pub async fn accept_uni(&self) -> Result { + self.inner.accept_uni().await + } + + pub fn send_datagram(&self, data: Bytes) -> Result<(), SendDatagramError> { + self.inner.send_datagram(data) + } + pub async fn read_datagram(&self) -> Result { self.inner.read_datagram().await } diff --git a/network/src/dht/mod.rs b/network/src/dht/mod.rs index 1d7e87970..8ba51ce06 100644 --- a/network/src/dht/mod.rs +++ b/network/src/dht/mod.rs @@ -6,7 +6,7 @@ use bytes::{Buf, Bytes}; use tl_proto::TlWrite; use tycho_util::time::{now_sec, shifted_interval}; -use self::query::Query; +use self::query::{Query, StoreValue}; use self::routing::RoutingTable; use self::storage::{Storage, StorageError}; use crate::network::{Network, WeakNetwork}; @@ -81,6 +81,10 @@ impl DhtClient { .await?; Some(res.and_then(|value| T::parse_value(value).map_err(Into::into))) } + + pub async fn store_value(&self, value: Box) -> Result<()> { + self.inner.store_value(&self.network, value).await + } } pub struct DhtServiceBuilder { @@ -353,6 +357,21 @@ impl DhtInner { query.find_value().await } + async fn store_value(&self, network: &Network, value: Box) -> Result<()> { + self.storage.insert(&value)?; + + let query = StoreValue::new( + network.clone(), + &self.routing_table.lock().unwrap(), + value, + self.max_k, + ); + + // NOTE: expression is intentionally split to drop the routing table guard + query.run().await; + Ok(()) + } + fn handle_store(&self, req: &dht::rpc::Store) -> Result { self.storage.insert(&req.value) } diff --git a/network/src/dht/query.rs b/network/src/dht/query.rs index 804177443..e30422cc3 100644 --- a/network/src/dht/query.rs +++ b/network/src/dht/query.rs @@ -6,7 +6,8 @@ use ahash::{HashMapExt, HashSetExt}; use anyhow::Result; use bytes::Bytes; use futures_util::stream::FuturesUnordered; -use futures_util::StreamExt; +use futures_util::{Future, StreamExt}; +use tokio::sync::Semaphore; use tycho_util::time::now_sec; use tycho_util::{FastHashMap, FastHashSet}; @@ -53,6 +54,7 @@ impl Query { })); // Prepare request to initial candidates + let semaphore = Semaphore::new(MAX_PARALLEL_REQUESTS); let mut futures = FuturesUnordered::new(); self.candidates .visit_closest(self.local_id(), self.max_k, |node| { @@ -60,6 +62,7 @@ impl Query { self.network.clone(), node.clone(), request_body.clone(), + &semaphore, )); }); @@ -95,6 +98,7 @@ impl Query { self.network.clone(), node.clone(), request_body.clone(), + &semaphore, )); }); } @@ -121,6 +125,7 @@ impl Query { })); // Prepare request to initial candidates + let semaphore = Semaphore::new(MAX_PARALLEL_REQUESTS); let mut futures = FuturesUnordered::new(); self.candidates .visit_closest(self.local_id(), self.max_k, |node| { @@ -128,6 +133,7 @@ impl Query { self.network.clone(), node.clone(), request_body.clone(), + &semaphore, )); }); @@ -154,6 +160,7 @@ impl Query { self.network.clone(), node.clone(), request_body.clone(), + &semaphore, )); }); } @@ -233,10 +240,15 @@ impl Query { network: Network, node: Arc, request_body: Bytes, + semaphore: &Semaphore, ) -> (Arc, Option>) where for<'a> T: tl_proto::TlRead<'a, Repr = tl_proto::Boxed>, { + let Ok(_permit) = semaphore.acquire().await else { + return (node, None); + }; + let req = network.query( &node.id, Request { @@ -256,6 +268,84 @@ impl Query { } } +pub struct StoreValue { + futures: FuturesUnordered, +} + +impl StoreValue<()> { + pub fn new( + network: Network, + routing_table: &RoutingTable, + value: Box, + max_k: usize, + ) -> StoreValue, Option>)> + Send> { + let key_hash = match value.as_ref() { + dht::Value::Signed(value) => tl_proto::hash(&value.key), + dht::Value::Overlay(value) => tl_proto::hash(&value.key), + }; + + let request_body = Bytes::from(tl_proto::serialize(dht::rpc::Store { value })); + + let semaphore = Arc::new(Semaphore::new(10)); + let futures = futures_util::stream::FuturesUnordered::new(); + routing_table.visit_closest(&key_hash, max_k, |node| { + futures.push(Self::visit( + network.clone(), + node.clone(), + request_body.clone(), + semaphore.clone(), + )); + }); + + StoreValue { futures } + } + + async fn visit( + network: Network, + node: Arc, + request_body: Bytes, + semaphore: Arc, + ) -> (Arc, Option>) { + let Ok(_permit) = semaphore.acquire().await else { + return (node, None); + }; + + let req = network.send( + &node.id, + Request { + version: Default::default(), + body: request_body.clone(), + }, + ); + + let res = match tokio::time::timeout(REQUEST_TIMEOUT, req).await { + Ok(res) => Some(res), + Err(_) => None, + }; + + (node, res) + } +} + +impl, Option>)> + Send> StoreValue { + pub async fn run(mut self) { + while let Some((node, res)) = self.futures.next().await { + match res { + Some(Ok(())) => { + tracing::debug!(peer_id = %node.id, "value stored"); + } + Some(Err(e)) => { + tracing::warn!(peer_id = %node.id, "failed to store value: {e:?}"); + } + // Do nothing on timeout + None => { + tracing::warn!(peer_id = %node.id, "failed to store value: timeout"); + } + } + } + } +} + fn validate_node_info(now: u32, info: &dht::NodeInfo) -> bool { info.created_at <= now + CLOCK_THRESHOLD && info.address_list.created_at <= now + CLOCK_THRESHOLD @@ -290,3 +380,4 @@ where const REQUEST_TIMEOUT: Duration = Duration::from_millis(500); const CLOCK_THRESHOLD: u32 = 1; +const MAX_PARALLEL_REQUESTS: usize = 10; diff --git a/network/src/network/peer.rs b/network/src/network/peer.rs index 0bf269250..1718fa37a 100644 --- a/network/src/network/peer.rs +++ b/network/src/network/peer.rs @@ -34,4 +34,19 @@ impl Peer { recv_response(&mut recv_stream).await } + + pub async fn send_message(&self, request: Request) -> Result<()> { + let send_stream = self.connection.open_uni().await?; + let mut send_stream = FramedWrite::new(send_stream, make_codec(&self.config)); + + send_request(&mut send_stream, request).await?; + send_stream.get_mut().finish().await?; + + Ok(()) + } + + pub fn send_datagram(&self, request: Bytes) -> Result<()> { + self.connection.send_datagram(request)?; + Ok(()) + } } diff --git a/network/src/proto/dht.rs b/network/src/proto/dht.rs index 4a10ce89a..4896ceff7 100644 --- a/network/src/proto/dht.rs +++ b/network/src/proto/dht.rs @@ -206,7 +206,7 @@ pub mod rpc { #[tl(boxed, id = "dht.store", scheme = "proto.tl")] pub struct Store { /// A value to store. - pub value: Value, + pub value: Box, } /// Search for `k` closest nodes. diff --git a/network/src/util/traits.rs b/network/src/util/traits.rs index c716dd069..f031eae31 100644 --- a/network/src/util/traits.rs +++ b/network/src/util/traits.rs @@ -3,7 +3,7 @@ use std::future::Future; use anyhow::Result; use bytes::Bytes; -use crate::network::Network; +use crate::network::{Network, Peer}; use crate::types::{PeerEvent, PeerId, Request, Response}; pub trait NetworkExt { @@ -12,51 +12,93 @@ pub trait NetworkExt { peer_id: &PeerId, request: Request, ) -> impl Future>> + Send; + + fn send( + &self, + peer_id: &PeerId, + request: Request, + ) -> impl Future> + Send; } impl NetworkExt for Network { async fn query(&self, peer_id: &PeerId, request: Request) -> Result> { - use tokio::sync::broadcast::error::RecvError; + on_connected_peer(self, Peer::rpc, peer_id, request).await + } + + async fn send(&self, peer_id: &PeerId, request: Request) -> Result<()> { + on_connected_peer(self, Peer::send_message, peer_id, request).await + } +} - let mut peer_events = self.subscribe()?; +async fn on_connected_peer( + network: &Network, + f: F, + peer_id: &PeerId, + request: Request, +) -> Result +where + for<'a> F: PeerTask<'a, T>, +{ + use tokio::sync::broadcast::error::RecvError; - // Make query if already connected - if let Some(peer) = self.peer(peer_id) { - return peer.rpc(request).await; - } + let mut peer_events = network.subscribe()?; - match self.known_peers().get(peer_id) { - // Initiate a connection of it is a known peer - Some(peer_info) => { - self.connect_with_peer_id(peer_info.address, peer_id) - .await?; - } - // Error otherwise - None => anyhow::bail!("trying to query an unknown peer: {peer_id}"), + // Interact if already connected + if let Some(peer) = network.peer(peer_id) { + return f.call(&peer, request).await; + } + + match network.known_peers().get(peer_id) { + // Initiate a connection of it is a known peer + Some(peer_info) => { + network + .connect_with_peer_id(peer_info.address, peer_id) + .await?; } + // Error otherwise + None => anyhow::bail!("trying to interact with an unknown peer: {peer_id}"), + } - loop { - match peer_events.recv().await { - Ok(PeerEvent::NewPeer(new_peer_id)) if &new_peer_id == peer_id => { - if let Some(peer) = self.peer(peer_id) { - return peer.rpc(request).await; - } - } - Ok(_) => {} - Err(RecvError::Closed) => anyhow::bail!("network subscription closed"), - Err(RecvError::Lagged(_)) => { - peer_events = peer_events.resubscribe(); - - if let Some(peer) = self.peer(peer_id) { - return peer.rpc(request).await; - } + loop { + match peer_events.recv().await { + Ok(PeerEvent::NewPeer(new_peer_id)) if &new_peer_id == peer_id => { + if let Some(peer) = network.peer(peer_id) { + return f.call(&peer, request).await; } } + Ok(_) => {} + Err(RecvError::Closed) => anyhow::bail!("network subscription closed"), + Err(RecvError::Lagged(_)) => { + peer_events = peer_events.resubscribe(); - anyhow::ensure!( - self.known_peers().contains(peer_id), - "waiting for a connection to an unknown peer: {peer_id}", - ); + if let Some(peer) = network.peer(peer_id) { + return f.call(&peer, request).await; + } + } } + + anyhow::ensure!( + network.known_peers().contains(peer_id), + "waiting for a connection to an unknown peer: {peer_id}", + ); + } +} + +trait PeerTask<'a, T> { + type Output: Future> + 'a; + + fn call(self, peer: &'a Peer, request: Request) -> Self::Output; +} + +impl<'a, T, F, Fut> PeerTask<'a, T> for F +where + F: FnOnce(&'a Peer, Request) -> Fut, + Fut: Future> + 'a, +{ + type Output = Fut; + + #[inline] + fn call(self, peer: &'a Peer, request: Request) -> Fut { + self(peer, request) } } From 9c81e0fbb885064cd6922f1b9d4b78a8315fac64 Mon Sep 17 00:00:00 2001 From: Ivan Kalinin Date: Tue, 13 Feb 2024 17:33:07 +0100 Subject: [PATCH 21/35] network: Fix dht bootstrap --- Cargo.lock | 1 + network/Cargo.toml | 1 + network/src/dht/mod.rs | 135 +++++++++++++++++----- network/src/dht/query.rs | 34 +----- network/src/network/connection_manager.rs | 7 +- network/src/types/address.rs | 2 +- network/src/types/mod.rs | 2 +- network/src/util/mod.rs | 17 +++ network/tests/dht.rs | 99 ++++++++++++++++ 9 files changed, 234 insertions(+), 64 deletions(-) create mode 100644 network/tests/dht.rs diff --git a/Cargo.lock b/Cargo.lock index bf76e922b..e8de21899 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1843,6 +1843,7 @@ dependencies = [ "tokio", "tokio-util", "tracing", + "tracing-subscriber", "tracing-test", "tycho-util", "x509-parser", diff --git a/network/Cargo.toml b/network/Cargo.toml index 08ecc3982..6ad3b31a9 100644 --- a/network/Cargo.toml +++ b/network/Cargo.toml @@ -42,6 +42,7 @@ tycho-util = { path = "../util", version = "=0.0.1" } [dev-dependencies] tokio = { version = "1", features = ["rt-multi-thread"] } +tracing-subscriber = { version = "0.3", features = ["env-filter"] } tracing-test = "0.2" [lints] diff --git a/network/src/dht/mod.rs b/network/src/dht/mod.rs index 8ba51ce06..ec47c7aa7 100644 --- a/network/src/dht/mod.rs +++ b/network/src/dht/mod.rs @@ -11,8 +11,10 @@ use self::routing::RoutingTable; use self::storage::{Storage, StorageError}; use crate::network::{Network, WeakNetwork}; use crate::proto::dht; -use crate::types::{AddressList, InboundServiceRequest, PeerId, Request, Response, Service}; -use crate::util::{NetworkExt, Routable}; +use crate::types::{ + AddressList, InboundServiceRequest, PeerAffinity, PeerId, PeerInfo, Request, Response, Service, +}; +use crate::util::{validate_signature, NetworkExt, Routable}; pub use self::storage::StorageBuilder; @@ -55,20 +57,17 @@ impl DhtClient { &self.network } - pub fn add_peer(&self, peer: Arc) { - self.inner - .routing_table - .lock() - .unwrap() - .add(peer, self.inner.max_k, &self.inner.node_ttl); + pub fn add_peer(&self, peer: Arc) -> Result { + self.inner.add_peer(&self.network, peer) } pub async fn get_node_info(&self, peer_id: &PeerId) -> Result { - self.network + let res = self + .network .query(peer_id, Request::from_tl(dht::rpc::GetNodeInfo)) - .await? - .parse_tl() - .map_err(Into::into) + .await?; + let dht::NodeInfoResponse { info } = res.parse_tl()?; + Ok(info) } pub async fn find_value(&self, key: &T) -> Option>> @@ -114,16 +113,6 @@ impl DhtServiceBuilder { (client_builder, DhtService(inner)) } - - pub fn with_max_k(mut self, max_k: usize) -> Self { - self.max_k = max_k; - self - } - - pub fn with_node_ttl(mut self, ttl: Duration) -> Self { - self.node_ttl = ttl; - self - } } impl DhtServiceBuilder<()> { @@ -141,6 +130,18 @@ impl DhtServiceBuilder<()> { } } +impl DhtServiceBuilder { + pub fn with_max_k(mut self, max_k: usize) -> Self { + self.max_k = max_k; + self + } + + pub fn with_node_ttl(mut self, ttl: Duration) -> Self { + self.node_ttl = ttl; + self + } +} + #[derive(Clone)] pub struct DhtService(Arc); @@ -162,6 +163,13 @@ impl Service> for DhtService { type OnDatagramFuture = futures_util::future::Ready<()>; fn on_query(&self, req: InboundServiceRequest) -> Self::OnQueryFuture { + tracing::debug!( + peer_id = %req.metadata.peer_id, + addr = %req.metadata.remote_address, + byte_len = req.body.len(), + "processing DHT query", + ); + let response = crate::match_tl_request!(req.body, { dht::rpc::FindNode as ref r => { let res = self.0.handle_find_node(r); @@ -191,6 +199,13 @@ impl Service> for DhtService { #[inline] fn on_message(&self, req: InboundServiceRequest) -> Self::OnMessageFuture { + tracing::debug!( + peer_id = %req.metadata.peer_id, + addr = %req.metadata.remote_address, + byte_len = req.body.len(), + "processing DHT message", + ); + crate::match_tl_request!(req.body, { dht::rpc::Store as ref r => match self.0.handle_store(r) { Ok(_) => {}, @@ -285,7 +300,7 @@ impl DhtInner { this.refresh_local_node_info(&network, INFO_TTL); refresh_interval.reset(); - if let Err(e) = this.announce_local_node_info(&network).await { + if let Err(e) = this.announce_local_node_info(&network, INFO_TTL).await { tracing::error!("failed to announce local DHT node info: {e:?}"); } } @@ -311,15 +326,34 @@ impl DhtInner { created_at: now, signature: Bytes::new(), }; - let signature = network.sign_tl(&node_info); - node_info.signature = signature.to_vec().into(); + node_info.signature = network.sign_tl(&node_info).to_vec().into(); *self.node_info.lock().unwrap() = Some(node_info); } - async fn announce_local_node_info(self: &Arc, _network: &Network) -> Result<()> { - // TODO: store node info in the DHT - todo!() + async fn announce_local_node_info(self: &Arc, network: &Network, ttl: u32) -> Result<()> { + let value = { + let now = now_sec(); + + let mut value = dht::SignedValue { + key: dht::SignedKey { + name: "addr".to_owned().into(), + idx: 0, + peer_id: self.local_id, + }, + data: Bytes::from(tl_proto::serialize(AddressList { + items: vec![network.local_addr().into()], + created_at: now, + expires_at: now + ttl, + })), + expires_at: now_sec() + ttl, + signature: Bytes::new(), + }; + value.signature = network.sign_tl(&value).to_vec().into(); + Box::new(dht::Value::Signed(value)) + }; + + self.store_value(network, value).await } async fn find_more_dht_nodes(self: &Arc, network: &Network) { @@ -372,6 +406,30 @@ impl DhtInner { Ok(()) } + fn add_peer(&self, network: &Network, peer: Arc) -> Result { + anyhow::ensure!( + validate_node_info(now_sec(), &peer), + "invalid peer node info" + ); + + // TODO: add support for multiple addresses + let peer_info = match peer.address_list.items.first() { + Some(address) if peer.id != self.local_id => PeerInfo { + peer_id: peer.id, + affinity: PeerAffinity::Allowed, + address: address.clone(), + }, + _ => return Ok(false), + }; + + let mut routing_table = self.routing_table.lock().unwrap(); + let is_new = routing_table.add(peer, self.max_k, &self.node_ttl); + if is_new { + network.known_peers().insert(peer_info); + } + Ok(is_new) + } + fn handle_store(&self, req: &dht::rpc::Store) -> Result { self.storage.insert(&req.value) } @@ -409,6 +467,25 @@ impl DhtInner { } } +fn validate_node_info(now: u32, info: &dht::NodeInfo) -> bool { + info.created_at <= now + CLOCK_THRESHOLD + && info.address_list.created_at <= now + CLOCK_THRESHOLD + && info.address_list.expires_at >= now + && !info.address_list.items.is_empty() + && validate_signature(&info.id, &info.signature, info) +} + +fn validate_value(now: u32, key: &[u8; 32], value: &dht::Value) -> bool { + match value { + dht::Value::Signed(value) => { + value.expires_at >= now + && key == &tl_proto::hash(&value.key) + && validate_signature(&value.key.peer_id, &value.signature, value) + } + dht::Value::Overlay(value) => value.expires_at >= now && key == &tl_proto::hash(&value.key), + } +} + #[derive(TlWrite)] #[tl(boxed, scheme = "proto.tl")] enum ValueResponseRaw { @@ -423,3 +500,5 @@ enum ValueResponseRaw { struct NodeResponseRaw { nodes: Vec>, } + +const CLOCK_THRESHOLD: u32 = 1; diff --git a/network/src/dht/query.rs b/network/src/dht/query.rs index e30422cc3..8ffcdf194 100644 --- a/network/src/dht/query.rs +++ b/network/src/dht/query.rs @@ -12,6 +12,7 @@ use tycho_util::time::now_sec; use tycho_util::{FastHashMap, FastHashSet}; use crate::dht::routing::RoutingTable; +use crate::dht::{validate_node_info, validate_value}; use crate::network::Network; use crate::proto::dht; use crate::types::{PeerId, Request}; @@ -346,38 +347,5 @@ impl, Option>)> + Send> StoreV } } -fn validate_node_info(now: u32, info: &dht::NodeInfo) -> bool { - info.created_at <= now + CLOCK_THRESHOLD - && info.address_list.created_at <= now + CLOCK_THRESHOLD - && info.address_list.expires_at < now - && !info.address_list.items.is_empty() - && validate_signature(&info.id, &info.signature, info) -} - -fn validate_value(now: u32, key: &[u8; 32], value: &dht::Value) -> bool { - match value { - dht::Value::Signed(value) => { - value.expires_at < now - && key == &tl_proto::hash(&value.key) - && validate_signature(&value.key.peer_id, &value.signature, value) - } - dht::Value::Overlay(value) => value.expires_at < now && key == &tl_proto::hash(&value.key), - } -} - -fn validate_signature(peed_id: &PeerId, signature: &Bytes, data: &T) -> bool -where - T: tl_proto::TlWrite, -{ - let Some(pubkey) = peed_id.as_public_key() else { - return false; - }; - let Ok::<&[u8; 64], _>(signature) = signature.as_ref().try_into() else { - return false; - }; - pubkey.verify(data, signature) -} - const REQUEST_TIMEOUT: Duration = Duration::from_millis(500); -const CLOCK_THRESHOLD: u32 = 1; const MAX_PARALLEL_REQUESTS: usize = 10; diff --git a/network/src/network/connection_manager.rs b/network/src/network/connection_manager.rs index e133c7ebd..f314e76d2 100644 --- a/network/src/network/connection_manager.rs +++ b/network/src/network/connection_manager.rs @@ -73,7 +73,6 @@ impl ConnectionManager { (connection_manager, mailbox_tx) } - #[tracing::instrument(skip_all, fields(local_id = %self.endpoint.peer_id()))] pub async fn start(mut self) { tracing::info!("connection manager started"); @@ -580,4 +579,10 @@ impl KnownPeers { pub fn remove(&self, peer_id: &PeerId) -> Option { self.0.remove(peer_id).map(|(_, value)| value) } + + pub fn print_all(&self) { + for item in self.0.iter() { + println!("{}: {:?}", item.peer_id, item.value()); + } + } } diff --git a/network/src/types/address.rs b/network/src/types/address.rs index cde43e124..157ebb273 100644 --- a/network/src/types/address.rs +++ b/network/src/types/address.rs @@ -147,7 +147,7 @@ impl<'a> TlRead<'a> for AddressList { use tl_proto::TlError; let len = u32::read_from(packet, offset)? as usize; - if len > Self::MAX_LEN { + if len == 0 || len > Self::MAX_LEN { return Err(TlError::InvalidData); } diff --git a/network/src/types/mod.rs b/network/src/types/mod.rs index db9d8f8fe..cbc17eb09 100644 --- a/network/src/types/mod.rs +++ b/network/src/types/mod.rs @@ -82,7 +82,7 @@ impl Response { pub fn parse_tl(self) -> tl_proto::TlResult where - for<'a> T: tl_proto::TlRead<'a>, + for<'a> T: tl_proto::TlRead<'a, Repr = tl_proto::Boxed>, { tl_proto::deserialize(self.body.as_ref()) } diff --git a/network/src/util/mod.rs b/network/src/util/mod.rs index d7d1fcb1b..63f1ad51e 100644 --- a/network/src/util/mod.rs +++ b/network/src/util/mod.rs @@ -1,7 +1,11 @@ +use bytes::Bytes; + pub use self::futures::BoxFutureOrNoop; pub use self::router::{Routable, Router, RouterBuilder}; pub use self::traits::NetworkExt; +use crate::types::PeerId; + mod futures; mod router; mod traits; @@ -29,3 +33,16 @@ macro_rules! match_tl_request { } }; } + +pub(crate) fn validate_signature(peed_id: &PeerId, signature: &Bytes, data: &T) -> bool +where + T: tl_proto::TlWrite, +{ + let Some(public_key) = peed_id.as_public_key() else { + return false; + }; + let Ok::<&[u8; 64], _>(signature) = signature.as_ref().try_into() else { + return false; + }; + public_key.verify(data, signature) +} diff --git a/network/tests/dht.rs b/network/tests/dht.rs new file mode 100644 index 000000000..3db627d5b --- /dev/null +++ b/network/tests/dht.rs @@ -0,0 +1,99 @@ +use std::net::Ipv4Addr; +use std::sync::Arc; + +use anyhow::Result; +use everscale_crypto::ed25519; +use tycho_network::{proto, Address, AddressList, DhtClient, DhtService, Network, PeerId, Router}; +use tycho_util::time::now_sec; + +struct Node { + network: Network, + dht: DhtClient, +} + +impl Node { + fn new(key: &ed25519::SecretKey) -> Result { + let keypair = everscale_crypto::ed25519::KeyPair::from(key); + + let (dht_client, dht) = DhtService::builder(keypair.public_key.into()) + .with_storage(|builder| builder) + .build(); + + let router = Router::builder().route(dht).build(); + + let network = Network::builder() + .with_private_key(key.to_bytes()) + .with_service_name("test-service") + .build((Ipv4Addr::LOCALHOST, 0), router) + .unwrap(); + + let dht = dht_client.build(network.clone()); + + Ok(Self { network, dht }) + } + + fn make_node_info(key: &ed25519::SecretKey, address: Address) -> proto::dht::NodeInfo { + const TTL: u32 = 3600; + + let keypair = ed25519::KeyPair::from(key); + let peer_id = PeerId::from(keypair.public_key); + + let now = now_sec(); + let mut node_info = proto::dht::NodeInfo { + id: peer_id, + address_list: AddressList { + items: vec![address], + created_at: now, + expires_at: now + TTL, + }, + created_at: now, + signature: Default::default(), + }; + node_info.signature = keypair.sign(&node_info).to_vec().into(); + node_info + } +} + +fn make_network(node_count: usize) -> Vec { + let keys = (0..node_count) + .map(|i| ed25519::SecretKey::generate(&mut rand::thread_rng())) + .collect::>(); + + let nodes = keys + .iter() + .map(Node::new) + .collect::>>() + .unwrap(); + + let bootstrap_info = std::iter::zip(&keys, &nodes) + .map(|(key, node)| Arc::new(Node::make_node_info(key, node.network.local_addr().into()))) + .collect::>(); + for node in &nodes { + for info in &bootstrap_info { + node.dht.add_peer(info.clone()).unwrap(); + } + } + + nodes +} + +#[tokio::test] +async fn bootstrap_nodes_accessible() -> Result<()> { + tracing_subscriber::fmt::init(); + + let nodes = make_network(5); + + for i in 0..nodes.len() { + for j in 0..nodes.len() { + if i == j { + continue; + } + + let left = &nodes[i]; + let right = &nodes[j]; + left.dht.get_node_info(right.network.peer_id()).await?; + } + } + + Ok(()) +} From 3f35f2d949c5dd5ad8864a3daa668e90f059a606 Mon Sep 17 00:00:00 2001 From: Ivan Kalinin Date: Tue, 13 Feb 2024 18:55:39 +0100 Subject: [PATCH 22/35] network: Refactor project structure --- Cargo.lock | 2 ++ Cargo.toml | 5 +++ consensus/src/intercom/dispatcher.rs | 6 ++-- network/Cargo.toml | 5 +-- network/src/dht/mod.rs | 18 +++++----- network/src/dht/routing.rs | 4 +++ network/src/lib.rs | 20 +++++------ network/src/{ => network}/config.rs | 14 ++++---- network/src/{ => network}/connection.rs | 42 ++++++++++++++++++++--- network/src/network/connection_manager.rs | 31 ++++++++--------- network/src/{ => network}/crypto.rs | 14 ++++---- network/src/{ => network}/endpoint.rs | 10 +++--- network/src/network/mod.rs | 32 +++++++++-------- network/src/network/peer.rs | 14 ++++---- network/src/network/request_handler.rs | 40 ++++++++++----------- network/src/network/wire.rs | 26 +++++++------- network/src/proto/dht.rs | 2 -- network/src/types/mod.rs | 22 ++++++------ network/src/util/mod.rs | 4 +-- network/src/util/router.rs | 2 +- network/src/util/traits.rs | 23 +++++-------- network/tests/dht.rs | 14 +++++--- util/Cargo.toml | 2 ++ {network/src/util => util/src}/futures.rs | 0 util/src/lib.rs | 1 + 25 files changed, 196 insertions(+), 157 deletions(-) rename network/src/{ => network}/config.rs (96%) rename network/src/{ => network}/connection.rs (79%) rename network/src/{ => network}/crypto.rs (96%) rename network/src/{ => network}/endpoint.rs (96%) rename {network/src/util => util/src}/futures.rs (100%) diff --git a/Cargo.lock b/Cargo.lock index e8de21899..98ce12e3c 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1861,7 +1861,9 @@ name = "tycho-util" version = "0.0.1" dependencies = [ "ahash", + "castaway", "dashmap", + "futures-util", "rand", "tokio", ] diff --git a/Cargo.toml b/Cargo.toml index 7a0354e0b..54ca19ac8 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -5,6 +5,11 @@ members = ["consensus", "core", "network", "storage", "util", "validator"] [profile.release] debug = true +[patch.crates-io] +# NOTE: use crates.io dependency when it is released +# https://github.com/sagebind/castaway/issues/18 +castaway = { git = "https://github.com/sagebind/castaway.git" } + [workspace.lints.rust] future_incompatible = "warn" nonstandard_style = "warn" diff --git a/consensus/src/intercom/dispatcher.rs b/consensus/src/intercom/dispatcher.rs index 4c34ea84e..cfb39c5a7 100644 --- a/consensus/src/intercom/dispatcher.rs +++ b/consensus/src/intercom/dispatcher.rs @@ -6,7 +6,7 @@ use bytes::Bytes; use serde::{Deserialize, Serialize}; use tycho_network::{ - service_query_fn, Config, InboundServiceRequest, Network, NetworkExt, Response, Version, + service_query_fn, Network, NetworkConfig, NetworkExt, Response, ServiceRequest, Version, }; use crate::intercom::responses::*; @@ -58,7 +58,7 @@ impl Dispatcher { }); let network = Network::builder() - .with_config(Config::default()) + .with_config(NetworkConfig::default()) .with_random_private_key() .with_service_name("tycho-mempool-router") .build((Ipv4Addr::LOCALHOST, 0), service_fn)?; @@ -156,7 +156,7 @@ struct DispatcherInner { } impl DispatcherInner { - async fn handle(self: Arc, req: InboundServiceRequest) -> Option> { + async fn handle(self: Arc, req: ServiceRequest) -> Option { let body = match bincode::deserialize::(&req.body) { Ok(body) => body, Err(e) => { diff --git a/network/Cargo.toml b/network/Cargo.toml index 6ad3b31a9..a2f6566c1 100644 --- a/network/Cargo.toml +++ b/network/Cargo.toml @@ -10,6 +10,7 @@ include = ["src/**/*.rs", "src/**/*.tl"] ahash = "0.8" anyhow = "1.0" bytes = "1.0" +castaway = "0.2" dashmap = "5.4" ed25519 = { version = "2.0", features = ["alloc", "pkcs8"] } everscale-crypto = { version = "0.2", features = ["tl-proto"] } @@ -33,10 +34,6 @@ tokio-util = { version = "0.7", features = ["codec"] } tracing = "0.1" x509-parser = "0.15" -# NOTE: use crates.io dependency when it is released -# https://github.com/sagebind/castaway/issues/18 -castaway = { git = "https://github.com/sagebind/castaway.git" } - # local deps tycho-util = { path = "../util", version = "=0.0.1" } diff --git a/network/src/dht/mod.rs b/network/src/dht/mod.rs index ec47c7aa7..b621e51b5 100644 --- a/network/src/dht/mod.rs +++ b/network/src/dht/mod.rs @@ -12,9 +12,9 @@ use self::storage::{Storage, StorageError}; use crate::network::{Network, WeakNetwork}; use crate::proto::dht; use crate::types::{ - AddressList, InboundServiceRequest, PeerAffinity, PeerId, PeerInfo, Request, Response, Service, + AddressList, PeerAffinity, PeerId, PeerInfo, Request, Response, Service, ServiceRequest, }; -use crate::util::{validate_signature, NetworkExt, Routable}; +use crate::util::{check_peer_signature, NetworkExt, Routable}; pub use self::storage::StorageBuilder; @@ -156,13 +156,13 @@ impl DhtService { } } -impl Service> for DhtService { - type QueryResponse = Response; +impl Service for DhtService { + type QueryResponse = Response; type OnQueryFuture = futures_util::future::Ready>; type OnMessageFuture = futures_util::future::Ready<()>; type OnDatagramFuture = futures_util::future::Ready<()>; - fn on_query(&self, req: InboundServiceRequest) -> Self::OnQueryFuture { + fn on_query(&self, req: ServiceRequest) -> Self::OnQueryFuture { tracing::debug!( peer_id = %req.metadata.peer_id, addr = %req.metadata.remote_address, @@ -198,7 +198,7 @@ impl Service> for DhtService { } #[inline] - fn on_message(&self, req: InboundServiceRequest) -> Self::OnMessageFuture { + fn on_message(&self, req: ServiceRequest) -> Self::OnMessageFuture { tracing::debug!( peer_id = %req.metadata.peer_id, addr = %req.metadata.remote_address, @@ -229,7 +229,7 @@ impl Service> for DhtService { } #[inline] - fn on_datagram(&self, _req: InboundServiceRequest) -> Self::OnDatagramFuture { + fn on_datagram(&self, _req: ServiceRequest) -> Self::OnDatagramFuture { futures_util::future::ready(()) } } @@ -472,7 +472,7 @@ fn validate_node_info(now: u32, info: &dht::NodeInfo) -> bool { && info.address_list.created_at <= now + CLOCK_THRESHOLD && info.address_list.expires_at >= now && !info.address_list.items.is_empty() - && validate_signature(&info.id, &info.signature, info) + && check_peer_signature(&info.id, &info.signature, info) } fn validate_value(now: u32, key: &[u8; 32], value: &dht::Value) -> bool { @@ -480,7 +480,7 @@ fn validate_value(now: u32, key: &[u8; 32], value: &dht::Value) -> bool { dht::Value::Signed(value) => { value.expires_at >= now && key == &tl_proto::hash(&value.key) - && validate_signature(&value.key.peer_id, &value.signature, value) + && check_peer_signature(&value.key.peer_id, &value.signature, value) } dht::Value::Overlay(value) => value.expires_at >= now && key == &tl_proto::hash(&value.key), } diff --git a/network/src/dht/routing.rs b/network/src/dht/routing.rs index d380b05dc..75782dc40 100644 --- a/network/src/dht/routing.rs +++ b/network/src/dht/routing.rs @@ -22,10 +22,12 @@ impl RoutingTable { &self.local_id } + #[allow(unused)] pub fn is_empty(&self) -> bool { self.buckets.values().all(Bucket::is_empty) } + #[allow(unused)] pub fn len(&self) -> usize { self.buckets.values().map(|bucket| bucket.nodes.len()).sum() } @@ -42,6 +44,7 @@ impl RoutingTable { .insert(node, max_k, node_ttl) } + #[allow(unused)] pub fn remove(&mut self, key: &PeerId) -> bool { let distance = distance(&self.local_id, key); if let Some(bucket) = self.buckets.get_mut(&distance) { @@ -105,6 +108,7 @@ impl RoutingTable { } } + #[allow(unused)] pub fn contains(&self, key: &PeerId) -> bool { let distance = distance(&self.local_id, key); self.buckets diff --git a/network/src/lib.rs b/network/src/lib.rs index 8cc88c882..ccad6783e 100644 --- a/network/src/lib.rs +++ b/network/src/lib.rs @@ -1,19 +1,19 @@ -pub use self::util::{NetworkExt, Routable, Router, RouterBuilder}; -pub use config::{Config, QuicConfig}; +pub use self::util::{check_peer_signature, NetworkExt, Routable, Router, RouterBuilder}; pub use dht::{DhtClient, DhtClientBuilder, DhtService, DhtServiceBuilder, StorageBuilder}; -pub use network::{Network, NetworkBuilder, Peer, WeakNetwork}; +pub use network::{ + ActivePeers, Connection, KnownPeers, Network, NetworkBuilder, NetworkConfig, Peer, QuicConfig, + RecvStream, SendStream, WeakActivePeers, WeakNetwork, +}; pub use types::{ service_datagram_fn, service_message_fn, service_query_fn, Address, AddressList, - BoxCloneService, BoxService, Direction, DisconnectReason, InboundRequestMeta, - InboundServiceRequest, PeerId, Request, Response, RpcQuery, Service, ServiceDatagramFn, - ServiceExt, ServiceMessageFn, ServiceQueryFn, Version, + BoxCloneService, BoxService, Direction, DisconnectReason, InboundRequestMeta, PeerAffinity, + PeerEvent, PeerId, PeerInfo, Request, Response, RpcQuery, Service, ServiceDatagramFn, + ServiceExt, ServiceMessageFn, ServiceQueryFn, ServiceRequest, Version, }; -mod config; -mod connection; -mod crypto; +pub use quinn; + mod dht; -mod endpoint; mod network; mod types; mod util; diff --git a/network/src/config.rs b/network/src/network/config.rs similarity index 96% rename from network/src/config.rs rename to network/src/network/config.rs index 92c608788..5c4da552a 100644 --- a/network/src/config.rs +++ b/network/src/network/config.rs @@ -3,12 +3,14 @@ use std::time::Duration; use anyhow::{Context, Result}; -use crate::crypto::{CertVerifier, CertVerifierWithPeerId}; +use crate::network::crypto::{ + generate_cert, peer_id_from_certificate, CertVerifier, CertVerifierWithPeerId, +}; use crate::types::PeerId; #[derive(Debug, Clone)] #[non_exhaustive] -pub struct Config { +pub struct NetworkConfig { pub quic: Option, pub connection_manager_channel_capacity: usize, pub connectivity_check_interval: Duration, @@ -22,7 +24,7 @@ pub struct Config { pub shutdown_idle_timeout: Duration, } -impl Default for Config { +impl Default for NetworkConfig { fn default() -> Self { Self { quic: None, @@ -178,8 +180,8 @@ impl EndpointConfigBuilder { let reset_key = compute_reset_key(&keypair.secret_key); let quinn_endpoint_config = quinn::EndpointConfig::new(Arc::new(reset_key)); - let (cert, pkcs8_der) = crate::crypto::generate_cert(&keypair, &service_name) - .context("Failed to generate a certificate")?; + let (cert, pkcs8_der) = + generate_cert(&keypair, &service_name).context("Failed to generate a certificate")?; let cert_verifier = Arc::new(CertVerifier::from(service_name.clone())); let quinn_client_config = make_client_config( @@ -197,7 +199,7 @@ impl EndpointConfigBuilder { transport_config.clone(), )?; - let peer_id = crate::crypto::peer_id_from_certificate(&cert)?; + let peer_id = peer_id_from_certificate(&cert)?; Ok(EndpointConfig { peer_id, diff --git a/network/src/connection.rs b/network/src/network/connection.rs similarity index 79% rename from network/src/connection.rs rename to network/src/network/connection.rs index a40b1379c..d5dd1e34b 100644 --- a/network/src/connection.rs +++ b/network/src/network/connection.rs @@ -5,8 +5,9 @@ use std::task::{Context, Poll}; use anyhow::{Context as _, Result}; use bytes::Bytes; -use quinn::{ConnectionError, RecvStream, SendDatagramError}; +use quinn::{ConnectionError, SendDatagramError}; +use crate::network::crypto::peer_id_from_certificate; use crate::types::{Direction, InboundRequestMeta, PeerId}; #[derive(Clone)] @@ -56,14 +57,14 @@ impl Connection { self.inner .open_bi() .await - .map(|(send, recv)| (SendStream(send), recv)) + .map(|(send, recv)| (SendStream(send), RecvStream(recv))) } pub async fn accept_bi(&self) -> Result<(SendStream, RecvStream), ConnectionError> { self.inner .accept_bi() .await - .map(|(send, recv)| (SendStream(send), recv)) + .map(|(send, recv)| (SendStream(send), RecvStream(recv))) } pub async fn open_uni(&self) -> Result { @@ -71,7 +72,7 @@ impl Connection { } pub async fn accept_uni(&self) -> Result { - self.inner.accept_uni().await + self.inner.accept_uni().await.map(RecvStream) } pub fn send_datagram(&self, data: Bytes) -> Result<(), SendDatagramError> { @@ -94,6 +95,7 @@ impl std::fmt::Debug for Connection { } } +#[repr(transparent)] pub struct SendStream(quinn::SendStream); impl Drop for SendStream { @@ -145,6 +147,36 @@ impl tokio::io::AsyncWrite for SendStream { } } +#[repr(transparent)] +pub struct RecvStream(quinn::RecvStream); + +impl std::ops::Deref for RecvStream { + type Target = quinn::RecvStream; + + #[inline] + fn deref(&self) -> &Self::Target { + &self.0 + } +} + +impl std::ops::DerefMut for RecvStream { + #[inline] + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.0 + } +} + +impl tokio::io::AsyncRead for RecvStream { + #[inline] + fn poll_read( + mut self: Pin<&mut Self>, + cx: &mut Context<'_>, + buf: &mut tokio::io::ReadBuf<'_>, + ) -> Poll> { + Pin::new(&mut self.0).poll_read(cx, buf) + } +} + fn extract_peer_id(connection: &quinn::Connection) -> Result { let certificate = connection .peer_identity() @@ -152,5 +184,5 @@ fn extract_peer_id(connection: &quinn::Connection) -> Result { .and_then(|certificates| certificates.into_iter().next()) .context("No certificate found in the connection")?; - crate::crypto::peer_id_from_certificate(&certificate).map_err(Into::into) + peer_id_from_certificate(&certificate).map_err(Into::into) } diff --git a/network/src/network/connection_manager.rs b/network/src/network/connection_manager.rs index f314e76d2..e18dd1808 100644 --- a/network/src/network/connection_manager.rs +++ b/network/src/network/connection_manager.rs @@ -4,29 +4,28 @@ use std::time::{Duration, Instant}; use ahash::HashMap; use anyhow::Result; -use bytes::Bytes; use tokio::sync::{broadcast, mpsc, oneshot}; use tokio::task::JoinSet; use tycho_util::{FastDashMap, FastHashMap}; -use super::request_handler::InboundRequestHandler; -use super::wire::handshake; -use crate::config::Config; -use crate::connection::Connection; -use crate::endpoint::{Connecting, Endpoint}; +use crate::network::config::NetworkConfig; +use crate::network::connection::Connection; +use crate::network::endpoint::{Connecting, Endpoint}; +use crate::network::request_handler::InboundRequestHandler; +use crate::network::wire::handshake; use crate::types::{ - Address, BoxCloneService, Direction, DisconnectReason, InboundServiceRequest, PeerAffinity, - PeerEvent, PeerId, PeerInfo, Response, + Address, BoxCloneService, Direction, DisconnectReason, PeerAffinity, PeerEvent, PeerId, + PeerInfo, Response, ServiceRequest, }; #[derive(Debug)] -pub enum ConnectionManagerRequest { +pub(crate) enum ConnectionManagerRequest { Connect(Address, Option, oneshot::Sender>), Shutdown(oneshot::Sender<()>), } -pub struct ConnectionManager { - config: Arc, +pub(crate) struct ConnectionManager { + config: Arc, endpoint: Arc, mailbox: mpsc::Receiver, @@ -40,7 +39,7 @@ pub struct ConnectionManager { active_peers: ActivePeers, known_peers: KnownPeers, - service: BoxCloneService, Response>, + service: BoxCloneService, } impl Drop for ConnectionManager { @@ -51,11 +50,11 @@ impl Drop for ConnectionManager { impl ConnectionManager { pub fn new( - config: Arc, + config: Arc, endpoint: Arc, active_peers: ActivePeers, known_peers: KnownPeers, - service: BoxCloneService, Response>, + service: BoxCloneService, ) -> (Self, mpsc::Sender) { let (mailbox_tx, mailbox) = mpsc::channel(config.connection_manager_channel_capacity); let connection_manager = Self { @@ -214,7 +213,7 @@ impl ConnectionManager { fn handle_incoming(&mut self, connecting: Connecting) { async fn handle_incoming_task( connecting: Connecting, - config: Arc, + config: Arc, active_peers: ActivePeers, known_peers: KnownPeers, ) -> ConnectingOutput { @@ -319,7 +318,7 @@ impl ConnectionManager { address: Address, peer_id: Option, callback: oneshot::Sender>, - config: Arc, + config: Arc, ) -> ConnectingOutput { let fut = async { let connection = connecting?.await?; diff --git a/network/src/crypto.rs b/network/src/network/crypto.rs similarity index 96% rename from network/src/crypto.rs rename to network/src/network/crypto.rs index ea31db560..602d1d62b 100644 --- a/network/src/crypto.rs +++ b/network/src/network/crypto.rs @@ -5,7 +5,7 @@ use pkcs8::EncodePrivateKey; use crate::types::PeerId; -pub fn generate_cert( +pub(crate) fn generate_cert( keypair: &ed25519::KeypairBytes, subject_name: &str, ) -> Result<(rustls::Certificate, rustls::PrivateKey)> { @@ -25,7 +25,7 @@ pub fn generate_cert( Ok((rustls::Certificate(cert), key_der)) } -pub fn peer_id_from_certificate( +pub(crate) fn peer_id_from_certificate( certificate: &rustls::Certificate, ) -> Result { use pkcs8::DecodePublicKey; @@ -44,7 +44,7 @@ pub fn peer_id_from_certificate( Ok(PeerId(public_key.to_bytes())) } -pub struct CertVerifierWithPeerId { +pub(crate) struct CertVerifierWithPeerId { inner: CertVerifier, peer_id: PeerId, } @@ -87,7 +87,7 @@ impl rustls::client::ServerCertVerifier for CertVerifierWithPeerId { } /// Verifies self-signed certificates for the specified SNI. -pub struct CertVerifier { +pub(crate) struct CertVerifier { service_name: String, } @@ -125,7 +125,7 @@ impl rustls::server::ClientCertVerifier for CertVerifier { prepared .parsed .verify_for_usage( - SIGNATURE_ALGHORITHMS, + SIGNATURE_ALGORITHMS, std::slice::from_ref(&prepared.root), &prepared.intermediates, now, @@ -181,7 +181,7 @@ impl rustls::client::ServerCertVerifier for CertVerifier { prepared .parsed .verify_for_usage( - SIGNATURE_ALGHORITHMS, + SIGNATURE_ALGORITHMS, std::slice::from_ref(&prepared.root), &prepared.intermediates, now, @@ -257,4 +257,4 @@ struct InvalidCertificatePublicKey(pkcs8::spki::Error); #[error("certificate peer id mismatch")] struct CertificatePeerIdMismatch; -static SIGNATURE_ALGHORITHMS: &[&webpki::SignatureAlgorithm] = &[&webpki::ED25519]; +static SIGNATURE_ALGORITHMS: &[&webpki::SignatureAlgorithm] = &[&webpki::ED25519]; diff --git a/network/src/endpoint.rs b/network/src/network/endpoint.rs similarity index 96% rename from network/src/endpoint.rs rename to network/src/network/endpoint.rs index 925ff8062..64ef1f4cc 100644 --- a/network/src/endpoint.rs +++ b/network/src/network/endpoint.rs @@ -7,11 +7,11 @@ use std::time::Duration; use anyhow::Result; -use crate::config::EndpointConfig; -use crate::connection::Connection; +use crate::network::config::EndpointConfig; +use crate::network::connection::Connection; use crate::types::{Address, Direction, PeerId}; -pub struct Endpoint { +pub(crate) struct Endpoint { inner: quinn::Endpoint, local_addr: RwLock, config: EndpointConfig, @@ -114,7 +114,7 @@ impl Endpoint { pin_project_lite::pin_project! { #[must_use = "futures do nothing unless you `.await` or poll them"] - pub struct Accept<'a> { + pub(crate) struct Accept<'a> { #[pin] inner: quinn::Accept<'a>, } @@ -133,7 +133,7 @@ impl<'a> Future for Accept<'a> { #[derive(Debug)] #[must_use = "futures do nothing unless you `.await` or poll them"] -pub struct Connecting { +pub(crate) struct Connecting { inner: quinn::Connecting, origin: Direction, } diff --git a/network/src/network/mod.rs b/network/src/network/mod.rs index 146715cac..1d2175b62 100644 --- a/network/src/network/mod.rs +++ b/network/src/network/mod.rs @@ -2,25 +2,27 @@ use std::net::{SocketAddr, ToSocketAddrs}; use std::sync::{Arc, Weak}; use anyhow::Result; -use bytes::Bytes; use everscale_crypto::ed25519; use rand::Rng; use tokio::sync::{broadcast, mpsc, oneshot}; -use crate::config::{Config, EndpointConfig}; -use crate::endpoint::Endpoint; +use self::config::EndpointConfig; +use self::connection_manager::{ConnectionManager, ConnectionManagerRequest}; +use self::endpoint::Endpoint; use crate::types::{ - Address, DisconnectReason, InboundServiceRequest, PeerEvent, PeerId, Response, Service, - ServiceExt, + Address, DisconnectReason, PeerEvent, PeerId, Response, Service, ServiceExt, ServiceRequest, }; +pub use self::config::{NetworkConfig, QuicConfig}; +pub use self::connection::{Connection, RecvStream, SendStream}; +pub use self::connection_manager::{ActivePeers, KnownPeers, WeakActivePeers}; pub use self::peer::Peer; -use self::connection_manager::{ - ActivePeers, ConnectionManager, ConnectionManagerRequest, KnownPeers, WeakActivePeers, -}; - +mod config; +mod connection; mod connection_manager; +mod crypto; +mod endpoint; mod peer; mod request_handler; mod wire; @@ -32,11 +34,11 @@ pub struct NetworkBuilder { #[derive(Default)] struct BuilderFields { - config: Option, + config: Option, } impl NetworkBuilder { - pub fn with_config(mut self, config: Config) -> Self { + pub fn with_config(mut self, config: NetworkConfig) -> Self { self.optional_fields.config = Some(config); self } @@ -70,7 +72,7 @@ impl NetworkBuilder { pub fn build(self, bind_address: T, service: S) -> Result where S: Send + Sync + Clone + 'static, - S: Service, QueryResponse = Response>, + S: Service, { use socket2::{Domain, Protocol, Socket, Type}; @@ -236,7 +238,7 @@ impl Network { } struct NetworkInner { - config: Arc, + config: Arc, endpoint: Arc, active_peers: WeakActivePeers, known_peers: KnownPeers, @@ -309,8 +311,8 @@ mod tests { use super::*; use crate::types::{service_query_fn, BoxCloneService}; - fn echo_service() -> BoxCloneService, Response> { - let handle = |request: InboundServiceRequest| async move { + fn echo_service() -> BoxCloneService { + let handle = |request: ServiceRequest| async move { tracing::trace!("received: {}", request.body.escape_ascii()); let response = Response { version: Default::default(), diff --git a/network/src/network/peer.rs b/network/src/network/peer.rs index 1718fa37a..37a26d754 100644 --- a/network/src/network/peer.rs +++ b/network/src/network/peer.rs @@ -4,19 +4,19 @@ use anyhow::Result; use bytes::Bytes; use tokio_util::codec::{FramedRead, FramedWrite}; -use super::wire::{make_codec, recv_response, send_request}; -use crate::config::Config; -use crate::connection::Connection; +use crate::network::config::NetworkConfig; +use crate::network::connection::Connection; +use crate::network::wire::{make_codec, recv_response, send_request}; use crate::types::{PeerId, Request, Response}; #[derive(Clone)] pub struct Peer { connection: Connection, - config: Arc, + config: Arc, } impl Peer { - pub fn new(connection: Connection, config: Arc) -> Self { + pub(crate) fn new(connection: Connection, config: Arc) -> Self { Self { connection, config } } @@ -24,7 +24,7 @@ impl Peer { self.connection.peer_id() } - pub async fn rpc(&self, request: Request) -> Result> { + pub async fn rpc(&self, request: Request) -> Result { let (send_stream, recv_stream) = self.connection.open_bi().await?; let mut send_stream = FramedWrite::new(send_stream, make_codec(&self.config)); let mut recv_stream = FramedRead::new(recv_stream, make_codec(&self.config)); @@ -35,7 +35,7 @@ impl Peer { recv_response(&mut recv_stream).await } - pub async fn send_message(&self, request: Request) -> Result<()> { + pub async fn send_message(&self, request: Request) -> Result<()> { let send_stream = self.connection.open_uni().await?; let mut send_stream = FramedWrite::new(send_stream, make_codec(&self.config)); diff --git a/network/src/network/request_handler.rs b/network/src/network/request_handler.rs index 060c5c06f..88deb1a34 100644 --- a/network/src/network/request_handler.rs +++ b/network/src/network/request_handler.rs @@ -1,31 +1,29 @@ use std::sync::Arc; use anyhow::Result; -use bytes::Bytes; -use quinn::RecvStream; use tokio::task::JoinSet; use tokio_util::codec::{FramedRead, FramedWrite, LengthDelimitedCodec}; -use super::connection_manager::ActivePeers; -use super::wire::{make_codec, recv_request, send_response}; -use crate::config::Config; -use crate::connection::{Connection, SendStream}; +use crate::network::config::NetworkConfig; +use crate::network::connection::{Connection, RecvStream, SendStream}; +use crate::network::connection_manager::ActivePeers; +use crate::network::wire::{make_codec, recv_request, send_response}; use crate::types::{ - BoxCloneService, DisconnectReason, InboundRequestMeta, InboundServiceRequest, Response, Service, + BoxCloneService, DisconnectReason, InboundRequestMeta, Response, Service, ServiceRequest, }; -pub struct InboundRequestHandler { - config: Arc, +pub(crate) struct InboundRequestHandler { + config: Arc, connection: Connection, - service: BoxCloneService, Response>, + service: BoxCloneService, active_peers: ActivePeers, } impl InboundRequestHandler { pub fn new( - config: Arc, + config: Arc, connection: Connection, - service: BoxCloneService, Response>, + service: BoxCloneService, active_peers: ActivePeers, ) -> Self { Self { @@ -85,7 +83,7 @@ impl InboundRequestHandler { let service = self.service.clone(); async move { service - .on_datagram(InboundServiceRequest { + .on_datagram(ServiceRequest { metadata, body: datagram, }) @@ -123,15 +121,15 @@ impl InboundRequestHandler { struct UniStreamRequestHandler { meta: Arc, - service: BoxCloneService, Response>, + service: BoxCloneService, recv_stream: FramedRead, } impl UniStreamRequestHandler { fn new( - config: &Config, + config: &NetworkConfig, meta: Arc, - service: BoxCloneService, Response>, + service: BoxCloneService, recv_stream: RecvStream, ) -> Self { Self { @@ -150,7 +148,7 @@ impl UniStreamRequestHandler { async fn do_handle(mut self) -> Result<()> { let req = recv_request(&mut self.recv_stream).await?; self.service - .on_query(InboundServiceRequest { + .on_query(ServiceRequest { metadata: self.meta, body: req.body, }) @@ -161,16 +159,16 @@ impl UniStreamRequestHandler { struct BiStreamRequestHandler { meta: Arc, - service: BoxCloneService, Response>, + service: BoxCloneService, send_stream: FramedWrite, recv_stream: FramedRead, } impl BiStreamRequestHandler { fn new( - config: &Config, + config: &NetworkConfig, meta: Arc, - service: BoxCloneService, Response>, + service: BoxCloneService, send_stream: SendStream, recv_stream: RecvStream, ) -> Self { @@ -190,7 +188,7 @@ impl BiStreamRequestHandler { async fn do_handle(mut self) -> Result<()> { let req = recv_request(&mut self.recv_stream).await?; - let handler = self.service.on_query(InboundServiceRequest { + let handler = self.service.on_query(ServiceRequest { metadata: self.meta, body: req.body, }); diff --git a/network/src/network/wire.rs b/network/src/network/wire.rs index 6861d413a..3b08eb789 100644 --- a/network/src/network/wire.rs +++ b/network/src/network/wire.rs @@ -1,14 +1,14 @@ use anyhow::Result; -use bytes::Bytes; use futures_util::sink::SinkExt; use futures_util::StreamExt; use tokio::io::{AsyncRead, AsyncReadExt, AsyncWrite, AsyncWriteExt}; use tokio_util::codec::{FramedRead, FramedWrite, LengthDelimitedCodec}; -use crate::config::Config; +use crate::network::config::NetworkConfig; +use crate::network::connection::Connection; use crate::types::{Direction, Request, Response, Version}; -pub fn make_codec(config: &Config) -> LengthDelimitedCodec { +pub(crate) fn make_codec(config: &NetworkConfig) -> LengthDelimitedCodec { let mut builder = LengthDelimitedCodec::builder(); if let Some(max_frame_size) = config.max_frame_size { @@ -18,9 +18,7 @@ pub fn make_codec(config: &Config) -> LengthDelimitedCodec { builder.length_field_length(4).big_endian().new_codec() } -pub async fn handshake( - connection: crate::connection::Connection, -) -> Result { +pub(crate) async fn handshake(connection: Connection) -> Result { match connection.origin() { Direction::Inbound => { let mut send_stream = connection.open_uni().await?; @@ -35,18 +33,18 @@ pub async fn handshake( Ok(connection) } -pub async fn send_request( +pub(crate) async fn send_request( send_stream: &mut FramedWrite, - request: Request, + request: Request, ) -> Result<()> { send_version(send_stream.get_mut(), request.version).await?; send_stream.send(request.body).await?; Ok(()) } -pub async fn recv_request( +pub(crate) async fn recv_request( recv_stream: &mut FramedRead, -) -> Result> { +) -> Result { let version = recv_version(recv_stream.get_mut()).await?; let body = match recv_stream.next().await { Some(body) => body?.freeze(), @@ -55,18 +53,18 @@ pub async fn recv_request( Ok(Request { version, body }) } -pub async fn send_response( +pub(crate) async fn send_response( send_stream: &mut FramedWrite, - response: Response, + response: Response, ) -> Result<()> { send_version(send_stream.get_mut(), response.version).await?; send_stream.send(response.body).await?; Ok(()) } -pub async fn recv_response( +pub(crate) async fn recv_response( recv_stream: &mut FramedRead, -) -> Result> { +) -> Result { let version = recv_version(recv_stream.get_mut()).await?; let body = match recv_stream.next().await { Some(body) => body?.freeze(), diff --git a/network/src/proto/dht.rs b/network/src/proto/dht.rs index 4896ceff7..1447e6401 100644 --- a/network/src/proto/dht.rs +++ b/network/src/proto/dht.rs @@ -200,8 +200,6 @@ pub mod rpc { use super::*; /// Suggest a node to store that value. - /// - /// See [`Stored`]. #[derive(Debug, Clone, TlRead, TlWrite)] #[tl(boxed, id = "dht.store", scheme = "proto.tl")] pub struct Store { diff --git a/network/src/types/mod.rs b/network/src/types/mod.rs index cbc17eb09..f004aabb0 100644 --- a/network/src/types/mod.rs +++ b/network/src/types/mod.rs @@ -40,12 +40,12 @@ impl Version { } } -pub struct Request { +pub struct Request { pub version: Version, - pub body: T, + pub body: Bytes, } -impl Request { +impl Request { pub fn from_tl(body: T) -> Self where T: tl_proto::TlWrite, @@ -57,19 +57,19 @@ impl Request { } } -impl> AsRef<[u8]> for Request { +impl AsRef<[u8]> for Request { #[inline] fn as_ref(&self) -> &[u8] { self.body.as_ref() } } -pub struct Response { +pub struct Response { pub version: Version, - pub body: T, + pub body: Bytes, } -impl Response { +impl Response { pub fn from_tl(body: T) -> Self where T: tl_proto::TlWrite, @@ -88,19 +88,19 @@ impl Response { } } -impl> AsRef<[u8]> for Response { +impl AsRef<[u8]> for Response { #[inline] fn as_ref(&self) -> &[u8] { self.body.as_ref() } } -pub struct InboundServiceRequest { +pub struct ServiceRequest { pub metadata: Arc, - pub body: T, + pub body: Bytes, } -impl> AsRef<[u8]> for InboundServiceRequest { +impl AsRef<[u8]> for ServiceRequest { #[inline] fn as_ref(&self) -> &[u8] { self.body.as_ref() diff --git a/network/src/util/mod.rs b/network/src/util/mod.rs index 63f1ad51e..a57c6ab68 100644 --- a/network/src/util/mod.rs +++ b/network/src/util/mod.rs @@ -1,12 +1,10 @@ use bytes::Bytes; -pub use self::futures::BoxFutureOrNoop; pub use self::router::{Routable, Router, RouterBuilder}; pub use self::traits::NetworkExt; use crate::types::PeerId; -mod futures; mod router; mod traits; @@ -34,7 +32,7 @@ macro_rules! match_tl_request { }; } -pub(crate) fn validate_signature(peed_id: &PeerId, signature: &Bytes, data: &T) -> bool +pub fn check_peer_signature(peed_id: &PeerId, signature: &Bytes, data: &T) -> bool where T: tl_proto::TlWrite, { diff --git a/network/src/util/router.rs b/network/src/util/router.rs index 25e1d6722..7a445643b 100644 --- a/network/src/util/router.rs +++ b/network/src/util/router.rs @@ -1,10 +1,10 @@ use std::marker::PhantomData; use std::sync::Arc; +use tycho_util::futures::BoxFutureOrNoop; use tycho_util::FastHashMap; use crate::types::{BoxService, Service, ServiceExt}; -use crate::util::BoxFutureOrNoop; pub trait Routable { #[inline] diff --git a/network/src/util/traits.rs b/network/src/util/traits.rs index f031eae31..a4d1d16f7 100644 --- a/network/src/util/traits.rs +++ b/network/src/util/traits.rs @@ -1,7 +1,6 @@ use std::future::Future; use anyhow::Result; -use bytes::Bytes; use crate::network::{Network, Peer}; use crate::types::{PeerEvent, PeerId, Request, Response}; @@ -10,22 +9,18 @@ pub trait NetworkExt { fn query( &self, peer_id: &PeerId, - request: Request, - ) -> impl Future>> + Send; + request: Request, + ) -> impl Future> + Send; - fn send( - &self, - peer_id: &PeerId, - request: Request, - ) -> impl Future> + Send; + fn send(&self, peer_id: &PeerId, request: Request) -> impl Future> + Send; } impl NetworkExt for Network { - async fn query(&self, peer_id: &PeerId, request: Request) -> Result> { + async fn query(&self, peer_id: &PeerId, request: Request) -> Result { on_connected_peer(self, Peer::rpc, peer_id, request).await } - async fn send(&self, peer_id: &PeerId, request: Request) -> Result<()> { + async fn send(&self, peer_id: &PeerId, request: Request) -> Result<()> { on_connected_peer(self, Peer::send_message, peer_id, request).await } } @@ -34,7 +29,7 @@ async fn on_connected_peer( network: &Network, f: F, peer_id: &PeerId, - request: Request, + request: Request, ) -> Result where for<'a> F: PeerTask<'a, T>, @@ -87,18 +82,18 @@ where trait PeerTask<'a, T> { type Output: Future> + 'a; - fn call(self, peer: &'a Peer, request: Request) -> Self::Output; + fn call(self, peer: &'a Peer, request: Request) -> Self::Output; } impl<'a, T, F, Fut> PeerTask<'a, T> for F where - F: FnOnce(&'a Peer, Request) -> Fut, + F: FnOnce(&'a Peer, Request) -> Fut, Fut: Future> + 'a, { type Output = Fut; #[inline] - fn call(self, peer: &'a Peer, request: Request) -> Fut { + fn call(self, peer: &'a Peer, request: Request) -> Fut { self(peer, request) } } diff --git a/network/tests/dht.rs b/network/tests/dht.rs index 3db627d5b..0290f1ed4 100644 --- a/network/tests/dht.rs +++ b/network/tests/dht.rs @@ -1,8 +1,14 @@ +//! Run tests with this env: +//! ```text +//! RUST_LOG=info,tycho_network=trace +//! ``` + use std::net::Ipv4Addr; use std::sync::Arc; use anyhow::Result; use everscale_crypto::ed25519; +use tycho_network::proto::dht; use tycho_network::{proto, Address, AddressList, DhtClient, DhtService, Network, PeerId, Router}; use tycho_util::time::now_sec; @@ -54,9 +60,9 @@ impl Node { } } -fn make_network(node_count: usize) -> Vec { +fn make_network(node_count: usize) -> (Vec, Vec>) { let keys = (0..node_count) - .map(|i| ed25519::SecretKey::generate(&mut rand::thread_rng())) + .map(|_| ed25519::SecretKey::generate(&mut rand::thread_rng())) .collect::>(); let nodes = keys @@ -74,14 +80,14 @@ fn make_network(node_count: usize) -> Vec { } } - nodes + (nodes, bootstrap_info) } #[tokio::test] async fn bootstrap_nodes_accessible() -> Result<()> { tracing_subscriber::fmt::init(); - let nodes = make_network(5); + let (nodes, _) = make_network(5); for i in 0..nodes.len() { for j in 0..nodes.len() { diff --git a/util/Cargo.toml b/util/Cargo.toml index 8e8bc5b84..59830c5ec 100644 --- a/util/Cargo.toml +++ b/util/Cargo.toml @@ -7,7 +7,9 @@ description = "Shared utilities for node components." [dependencies] # crates.io deps ahash = "0.8" +castaway = "0.2" dashmap = "5.4" +futures-util = "0.3" rand = "0.8" tokio = { version = "1", default-features = false, features = ["time"] } diff --git a/network/src/util/futures.rs b/util/src/futures.rs similarity index 100% rename from network/src/util/futures.rs rename to util/src/futures.rs diff --git a/util/src/lib.rs b/util/src/lib.rs index 71786a6c3..54e4d47aa 100644 --- a/util/src/lib.rs +++ b/util/src/lib.rs @@ -2,6 +2,7 @@ use std::collections::HashMap; use std::collections::HashSet; pub mod time; +pub mod futures; pub type FastDashMap = dashmap::DashMap; pub type FastDashSet = dashmap::DashSet; From 43a788743cbd97ca729c95e3607ecf6380b8d9c0 Mon Sep 17 00:00:00 2001 From: Ivan Kalinin Date: Tue, 13 Feb 2024 19:27:00 +0100 Subject: [PATCH 23/35] network: Simplify dht builder --- network/src/dht/config.rs | 34 +++++++++ network/src/dht/mod.rs | 139 ++++++++++++++++++------------------- network/src/dht/query.rs | 7 +- network/src/dht/routing.rs | 32 +++------ network/src/dht/storage.rs | 6 +- network/src/lib.rs | 9 +-- network/src/proto/dht.rs | 26 +++++++ network/tests/dht.rs | 4 +- 8 files changed, 147 insertions(+), 110 deletions(-) create mode 100644 network/src/dht/config.rs diff --git a/network/src/dht/config.rs b/network/src/dht/config.rs new file mode 100644 index 000000000..97955a4d0 --- /dev/null +++ b/network/src/dht/config.rs @@ -0,0 +1,34 @@ +use std::time::Duration; + +// TODO: add max storage item size +#[derive(Debug, Clone)] +pub struct DhtConfig { + /// DHT K parameter. + pub max_k: usize, + /// Maximum time to live for node info. + pub max_node_info_ttl: Duration, + /// Maximum time to live for stored values. + pub max_stored_value_ttl: Duration, + /// Maximum length of stored key names. + pub max_stored_key_name_len: usize, + /// Maximum index of stored keys. + pub max_stored_key_index: u32, + /// Maximum storage capacity (number of entries). + pub max_storage_capacity: u64, + /// Time until a stored item is considered idle and can be removed. + pub storage_item_time_to_idle: Option, +} + +impl Default for DhtConfig { + fn default() -> Self { + Self { + max_k: 6, + max_node_info_ttl: Duration::from_secs(3600), + max_stored_value_ttl: Duration::from_secs(3600), + max_stored_key_name_len: 128, + max_stored_key_index: 4, + max_storage_capacity: 10000, + storage_item_time_to_idle: None, + } + } +} diff --git a/network/src/dht/mod.rs b/network/src/dht/mod.rs index b621e51b5..33028bd54 100644 --- a/network/src/dht/mod.rs +++ b/network/src/dht/mod.rs @@ -8,16 +8,18 @@ use tycho_util::time::{now_sec, shifted_interval}; use self::query::{Query, StoreValue}; use self::routing::RoutingTable; -use self::storage::{Storage, StorageError}; +use self::storage::Storage; use crate::network::{Network, WeakNetwork}; use crate::proto::dht; use crate::types::{ AddressList, PeerAffinity, PeerId, PeerInfo, Request, Response, Service, ServiceRequest, }; -use crate::util::{check_peer_signature, NetworkExt, Routable}; +use crate::util::{NetworkExt, Routable}; -pub use self::storage::StorageBuilder; +pub use self::config::DhtConfig; +pub use self::storage::{OverlayValueMerger, StorageError}; +mod config; mod query; mod routing; mod storage; @@ -58,7 +60,7 @@ impl DhtClient { } pub fn add_peer(&self, peer: Arc) -> Result { - self.inner.add_peer(&self.network, peer) + self.inner.add_node_info(&self.network, peer) } pub async fn get_node_info(&self, peer_id: &PeerId) -> Result { @@ -86,24 +88,52 @@ impl DhtClient { } } -pub struct DhtServiceBuilder { - mandatory_fields: MandatoryFields, +pub struct DhtServiceBuilder { local_id: PeerId, - node_ttl: Duration, - max_k: usize, + config: Option, + overlay_merger: Option>, } +// TODO: add overlay merger methods impl DhtServiceBuilder { + pub fn with_config(mut self, config: DhtConfig) -> Self { + self.config = Some(config); + self + } + + pub fn with_overlay_value_merger(mut self, merger: Arc) -> Self { + self.overlay_merger = Some(merger); + self + } + pub fn build(self) -> (DhtClientBuilder, DhtService) { - let storage = self.mandatory_fields; + let config = self.config.unwrap_or_default(); + + let storage = { + let mut builder = Storage::builder() + .with_max_key_name_len(config.max_stored_key_name_len) + .with_max_key_index(config.max_stored_key_index) + .with_max_capacity(config.max_storage_capacity) + .with_max_ttl(config.max_stored_value_ttl); + + if let Some(time_to_idle) = config.storage_item_time_to_idle { + builder = builder.with_max_idle(time_to_idle); + } + + if let Some(ref merger) = self.overlay_merger { + builder = builder.with_overlay_value_merger(merger); + } + + builder.build() + }; let inner = Arc::new(DhtInner { local_id: self.local_id, routing_table: Mutex::new(RoutingTable::new(self.local_id)), storage, node_info: Mutex::new(None), - max_k: self.max_k, - node_ttl: self.node_ttl, + max_k: config.max_k, + node_ttl: config.max_node_info_ttl, }); let client_builder = DhtClientBuilder { @@ -115,43 +145,15 @@ impl DhtServiceBuilder { } } -impl DhtServiceBuilder<()> { - pub fn with_storage(self, f: F) -> DhtServiceBuilder - where - F: FnOnce(StorageBuilder) -> StorageBuilder, - { - let storage = f(Storage::builder()).build(); - DhtServiceBuilder { - mandatory_fields: storage, - local_id: self.local_id, - node_ttl: self.node_ttl, - max_k: self.max_k, - } - } -} - -impl DhtServiceBuilder { - pub fn with_max_k(mut self, max_k: usize) -> Self { - self.max_k = max_k; - self - } - - pub fn with_node_ttl(mut self, ttl: Duration) -> Self { - self.node_ttl = ttl; - self - } -} - #[derive(Clone)] pub struct DhtService(Arc); impl DhtService { - pub fn builder(local_id: PeerId) -> DhtServiceBuilder<()> { + pub fn builder(local_id: PeerId) -> DhtServiceBuilder { DhtServiceBuilder { - mandatory_fields: (), local_id, - node_ttl: Duration::from_secs(15 * 60), - max_k: 20, + config: None, + overlay_merger: None, } } } @@ -406,16 +408,13 @@ impl DhtInner { Ok(()) } - fn add_peer(&self, network: &Network, peer: Arc) -> Result { - anyhow::ensure!( - validate_node_info(now_sec(), &peer), - "invalid peer node info" - ); + fn add_node_info(&self, network: &Network, node: Arc) -> Result { + anyhow::ensure!(node.is_valid(now_sec()), "invalid peer node info"); // TODO: add support for multiple addresses - let peer_info = match peer.address_list.items.first() { - Some(address) if peer.id != self.local_id => PeerInfo { - peer_id: peer.id, + let peer_info = match node.address_list.items.first() { + Some(address) if node.id != self.local_id => PeerInfo { + peer_id: node.id, affinity: PeerAffinity::Allowed, address: address.clone(), }, @@ -423,7 +422,7 @@ impl DhtInner { }; let mut routing_table = self.routing_table.lock().unwrap(); - let is_new = routing_table.add(peer, self.max_k, &self.node_ttl); + let is_new = routing_table.add(node, self.max_k, &self.node_ttl); if is_new { network.known_peers().insert(peer_info); } @@ -439,7 +438,7 @@ impl DhtInner { .routing_table .lock() .unwrap() - .closest(&req.key, req.k as usize); + .closest(&req.key, (req.k as usize).min(self.max_k)); NodeResponseRaw { nodes } } @@ -452,7 +451,7 @@ impl DhtInner { .routing_table .lock() .unwrap() - .closest(&req.key, req.k as usize); + .closest(&req.key, (req.k as usize).min(self.max_k)); ValueResponseRaw::NotFound(nodes) } @@ -467,25 +466,6 @@ impl DhtInner { } } -fn validate_node_info(now: u32, info: &dht::NodeInfo) -> bool { - info.created_at <= now + CLOCK_THRESHOLD - && info.address_list.created_at <= now + CLOCK_THRESHOLD - && info.address_list.expires_at >= now - && !info.address_list.items.is_empty() - && check_peer_signature(&info.id, &info.signature, info) -} - -fn validate_value(now: u32, key: &[u8; 32], value: &dht::Value) -> bool { - match value { - dht::Value::Signed(value) => { - value.expires_at >= now - && key == &tl_proto::hash(&value.key) - && check_peer_signature(&value.key.peer_id, &value.signature, value) - } - dht::Value::Overlay(value) => value.expires_at >= now && key == &tl_proto::hash(&value.key), - } -} - #[derive(TlWrite)] #[tl(boxed, scheme = "proto.tl")] enum ValueResponseRaw { @@ -501,4 +481,17 @@ struct NodeResponseRaw { nodes: Vec>, } -const CLOCK_THRESHOLD: u32 = 1; +pub fn xor_distance(left: &PeerId, right: &PeerId) -> usize { + for (i, (left, right)) in std::iter::zip(left.0.chunks(8), right.0.chunks(8)).enumerate() { + let left = u64::from_be_bytes(left.try_into().unwrap()); + let right = u64::from_be_bytes(right.try_into().unwrap()); + let diff = left ^ right; + if diff != 0 { + return MAX_XOR_DISTANCE - (i * 64 + diff.leading_zeros() as usize); + } + } + + 0 +} + +const MAX_XOR_DISTANCE: usize = 256; diff --git a/network/src/dht/query.rs b/network/src/dht/query.rs index 8ffcdf194..658854224 100644 --- a/network/src/dht/query.rs +++ b/network/src/dht/query.rs @@ -12,7 +12,6 @@ use tycho_util::time::now_sec; use tycho_util::{FastHashMap, FastHashSet}; use crate::dht::routing::RoutingTable; -use crate::dht::{validate_node_info, validate_value}; use crate::network::Network; use crate::proto::dht; use crate::types::{PeerId, Request}; @@ -73,7 +72,7 @@ impl Query { match res { // Return the value if found Some(Ok(dht::ValueResponse::Found(value))) => { - if !validate_value(now_sec(), self.local_id(), &value) { + if !value.is_valid(now_sec(), self.local_id()) { // Ignore invalid values continue; } @@ -190,7 +189,7 @@ impl Query { let mut has_new = false; for node in nodes { // Skip invalid entries - if !validate_node_info(now, &node) { + if !node.is_valid(now) { continue; } @@ -214,7 +213,7 @@ impl Query { let mut has_new = false; for node in nodes { // Skip invalid entries - if !validate_node_info(now, &node) { + if !node.is_valid(now) { continue; } diff --git a/network/src/dht/routing.rs b/network/src/dht/routing.rs index 75782dc40..93da6a6a1 100644 --- a/network/src/dht/routing.rs +++ b/network/src/dht/routing.rs @@ -2,10 +2,11 @@ use std::collections::{BTreeMap, VecDeque}; use std::sync::Arc; use std::time::{Duration, Instant}; +use crate::dht::{xor_distance, MAX_XOR_DISTANCE}; use crate::proto::dht; use crate::types::PeerId; -pub struct RoutingTable { +pub(crate) struct RoutingTable { local_id: PeerId, buckets: BTreeMap, } @@ -33,7 +34,7 @@ impl RoutingTable { } pub fn add(&mut self, node: Arc, max_k: usize, node_ttl: &Duration) -> bool { - let distance = distance(&self.local_id, &node.id); + let distance = xor_distance(&self.local_id, &node.id); if distance == 0 { return false; } @@ -46,7 +47,7 @@ impl RoutingTable { #[allow(unused)] pub fn remove(&mut self, key: &PeerId) -> bool { - let distance = distance(&self.local_id, key); + let distance = xor_distance(&self.local_id, key); if let Some(bucket) = self.buckets.get_mut(&distance) { bucket.remove(key) } else { @@ -61,10 +62,10 @@ impl RoutingTable { // TODO: fill secure and unsecure buckets in parallel let mut result = Vec::with_capacity(count); - let distance = distance(&self.local_id, PeerId::wrap(key)); + let distance = xor_distance(&self.local_id, PeerId::wrap(key)); // Search for closest nodes first - for i in (distance..=MAX_DISTANCE).chain((0..distance).rev()) { + for i in (distance..=MAX_XOR_DISTANCE).chain((0..distance).rev()) { let remaining = match count.checked_sub(result.len()) { None | Some(0) => break, Some(n) => n, @@ -88,12 +89,12 @@ impl RoutingTable { return; } - let distance = distance(&self.local_id, PeerId::wrap(key)); + let distance = xor_distance(&self.local_id, PeerId::wrap(key)); let mut processed = 0; // Search for closest nodes first - for i in (distance..=MAX_DISTANCE).chain((0..distance).rev()) { + for i in (distance..=MAX_XOR_DISTANCE).chain((0..distance).rev()) { let remaining = match count.checked_sub(processed) { None | Some(0) => break, Some(n) => n, @@ -110,7 +111,7 @@ impl RoutingTable { #[allow(unused)] pub fn contains(&self, key: &PeerId) -> bool { - let distance = distance(&self.local_id, key); + let distance = xor_distance(&self.local_id, key); self.buckets .get(&distance) .map(|bucket| bucket.contains(key)) @@ -184,21 +185,6 @@ impl Node { } } -pub fn distance(left: &PeerId, right: &PeerId) -> usize { - for (i, (left, right)) in std::iter::zip(left.0.chunks(8), right.0.chunks(8)).enumerate() { - let left = u64::from_be_bytes(left.try_into().unwrap()); - let right = u64::from_be_bytes(right.try_into().unwrap()); - let diff = left ^ right; - if diff != 0 { - return MAX_DISTANCE - (i * 64 + diff.leading_zeros() as usize); - } - } - - 0 -} - -const MAX_DISTANCE: usize = 256; - #[cfg(test)] mod tests { use std::str::FromStr; diff --git a/network/src/dht/storage.rs b/network/src/dht/storage.rs index e0dc3fc7f..0452260ae 100644 --- a/network/src/dht/storage.rs +++ b/network/src/dht/storage.rs @@ -37,7 +37,7 @@ impl OverlayValueMerger for () { } } -pub struct StorageBuilder { +pub(crate) struct StorageBuilder { cache_builder: DhtCacheBuilder, overlay_value_merger: Weak, max_ttl: Duration, @@ -59,7 +59,7 @@ impl Default for StorageBuilder { } impl StorageBuilder { - pub(crate) fn build(self) -> Storage { + pub fn build(self) -> Storage { fn weigher(_key: &StorageKeyId, value: &StoredValue) -> u32 { std::mem::size_of::() as u32 + std::mem::size_of::() as u32 @@ -111,7 +111,7 @@ impl StorageBuilder { } } -pub struct Storage { +pub(crate) struct Storage { cache: DhtCache, overlay_value_merger: Weak, max_ttl_sec: u32, diff --git a/network/src/lib.rs b/network/src/lib.rs index ccad6783e..c4f9b8467 100644 --- a/network/src/lib.rs +++ b/network/src/lib.rs @@ -1,5 +1,8 @@ pub use self::util::{check_peer_signature, NetworkExt, Routable, Router, RouterBuilder}; -pub use dht::{DhtClient, DhtClientBuilder, DhtService, DhtServiceBuilder, StorageBuilder}; +pub use dht::{ + xor_distance, DhtClient, DhtClientBuilder, DhtConfig, DhtService, DhtServiceBuilder, + OverlayValueMerger, StorageError, +}; pub use network::{ ActivePeers, Connection, KnownPeers, Network, NetworkBuilder, NetworkConfig, Peer, QuicConfig, RecvStream, SendStream, WeakActivePeers, WeakNetwork, @@ -37,9 +40,7 @@ mod tests { async fn init_works() { let keypair = everscale_crypto::ed25519::KeyPair::generate(&mut rand::thread_rng()); - let (dht_client, dht) = DhtService::builder(keypair.public_key.into()) - .with_storage(|builder| builder) - .build(); + let (dht_client, dht) = DhtService::builder(keypair.public_key.into()).build(); let router = Router::builder().route(dht).build(); diff --git a/network/src/proto/dht.rs b/network/src/proto/dht.rs index 1447e6401..d0d5a28ec 100644 --- a/network/src/proto/dht.rs +++ b/network/src/proto/dht.rs @@ -2,6 +2,7 @@ use bytes::Bytes; use tl_proto::{TlRead, TlWrite}; use crate::types::{AddressList, PeerId}; +use crate::util::check_peer_signature; /// A signed DHT node info. #[derive(Debug, Clone, TlRead, TlWrite)] @@ -17,6 +18,18 @@ pub struct NodeInfo { pub signature: Bytes, } +impl NodeInfo { + pub fn is_valid(&self, at: u32) -> bool { + const CLOCK_THRESHOLD: u32 = 1; + + self.created_at <= at + CLOCK_THRESHOLD + && self.address_list.created_at <= at + CLOCK_THRESHOLD + && self.address_list.expires_at >= at + && !self.address_list.items.is_empty() + && check_peer_signature(&self.id, &self.signature, self) + } +} + pub trait WithValue: TlWrite + for<'a> TlRead<'a, Repr = tl_proto::Boxed> { @@ -108,6 +121,19 @@ pub enum Value { } impl Value { + pub fn is_valid(&self, at: u32, key_hash: &[u8; 32]) -> bool { + match self { + Self::Signed(value) => { + value.expires_at >= at + && key_hash == &tl_proto::hash(&value.key) + && check_peer_signature(&value.key.peer_id, &value.signature, value) + } + Self::Overlay(value) => { + value.expires_at >= at && key_hash == &tl_proto::hash(&value.key) + } + } + } + pub fn key_name(&self) -> &[u8] { match self { Self::Signed(value) => value.key.name.as_ref(), diff --git a/network/tests/dht.rs b/network/tests/dht.rs index 0290f1ed4..e8b309570 100644 --- a/network/tests/dht.rs +++ b/network/tests/dht.rs @@ -21,9 +21,7 @@ impl Node { fn new(key: &ed25519::SecretKey) -> Result { let keypair = everscale_crypto::ed25519::KeyPair::from(key); - let (dht_client, dht) = DhtService::builder(keypair.public_key.into()) - .with_storage(|builder| builder) - .build(); + let (dht_client, dht) = DhtService::builder(keypair.public_key.into()).build(); let router = Router::builder().route(dht).build(); From d64a44b135684a11c46260c61adefb00955fb636 Mon Sep 17 00:00:00 2001 From: Ivan Kalinin Date: Tue, 13 Feb 2024 20:32:05 +0100 Subject: [PATCH 24/35] network: Fix dht find/store value queries --- network/src/dht/mod.rs | 189 ++++++++++++++++--------- network/src/dht/query.rs | 7 +- network/src/lib.rs | 2 +- network/src/network/request_handler.rs | 2 +- network/src/proto/dht.rs | 10 +- network/tests/dht.rs | 48 ++++++- 6 files changed, 178 insertions(+), 80 deletions(-) diff --git a/network/src/dht/mod.rs b/network/src/dht/mod.rs index 33028bd54..7361be7f9 100644 --- a/network/src/dht/mod.rs +++ b/network/src/dht/mod.rs @@ -72,20 +72,31 @@ impl DhtClient { Ok(info) } - pub async fn find_value(&self, key: &T) -> Option>> + pub async fn find_value(&self, key: &T) -> Result, FindValueError> where T: dht::WithValue, { - let res = self + match self .inner .find_value(&self.network, &tl_proto::hash(key)) - .await?; - Some(res.and_then(|value| T::parse_value(value).map_err(Into::into))) + .await + { + Some(value) => T::parse_value(value).map_err(FindValueError::InvalidData), + None => Err(FindValueError::NotFound), + } } pub async fn store_value(&self, value: Box) -> Result<()> { self.inner.store_value(&self.network, value).await } + + pub fn make_signed_value(&self, name: &str, expires_at: u32, data: T) -> Box + where + T: TlWrite + 'static, + { + self.inner + .make_signed_value(&self.network, name, expires_at, data) + } } pub struct DhtServiceBuilder { @@ -94,14 +105,13 @@ pub struct DhtServiceBuilder { overlay_merger: Option>, } -// TODO: add overlay merger methods impl DhtServiceBuilder { pub fn with_config(mut self, config: DhtConfig) -> Self { self.config = Some(config); self } - pub fn with_overlay_value_merger(mut self, merger: Arc) -> Self { + pub fn with_overlay_value_merger(mut self, merger: Arc) -> Self { self.overlay_merger = Some(merger); self } @@ -164,32 +174,33 @@ impl Service for DhtService { type OnMessageFuture = futures_util::future::Ready<()>; type OnDatagramFuture = futures_util::future::Ready<()>; + #[tracing::instrument( + level = "debug", + name = "on_dht_query", + skip_all, + fields(peer_id = %req.metadata.peer_id, addr = %req.metadata.remote_address) + )] fn on_query(&self, req: ServiceRequest) -> Self::OnQueryFuture { - tracing::debug!( - peer_id = %req.metadata.peer_id, - addr = %req.metadata.remote_address, - byte_len = req.body.len(), - "processing DHT query", - ); - let response = crate::match_tl_request!(req.body, { dht::rpc::FindNode as ref r => { + tracing::debug!(key = %PeerId::wrap(&r.key), k = r.k, "find_node"); + let res = self.0.handle_find_node(r); Some(tl_proto::serialize(res)) }, dht::rpc::FindValue as ref r => { + tracing::debug!(key = %PeerId::wrap(&r.key), k = r.k, "find_value"); + let res = self.0.handle_find_value(r); Some(tl_proto::serialize(res)) }, dht::rpc::GetNodeInfo as _ => { + tracing::debug!("get_node_info"); + self.0.handle_get_node_info().map(tl_proto::serialize) }, }, e => { - tracing::debug!( - peer_id = %req.metadata.peer_id, - addr = %req.metadata.remote_address, - "failed to deserialize query from: {e:?}" - ); + tracing::debug!("failed to deserialize query from: {e:?}"); None }); @@ -199,32 +210,23 @@ impl Service for DhtService { })) } - #[inline] + #[tracing::instrument( + level = "debug", + name = "on_dht_message", + skip_all, + fields(peer_id = %req.metadata.peer_id, addr = %req.metadata.remote_address) + )] fn on_message(&self, req: ServiceRequest) -> Self::OnMessageFuture { - tracing::debug!( - peer_id = %req.metadata.peer_id, - addr = %req.metadata.remote_address, - byte_len = req.body.len(), - "processing DHT message", - ); - crate::match_tl_request!(req.body, { - dht::rpc::Store as ref r => match self.0.handle_store(r) { - Ok(_) => {}, - Err(e) => { - tracing::debug!( - peer_id = %req.metadata.peer_id, - addr = %req.metadata.remote_address, - "failed to store value: {e:?}" - ); + dht::rpc::Store as ref r => { + tracing::debug!("store"); + + if let Err(e) = self.0.handle_store(r) { + tracing::debug!("failed to store value: {e:?}"); } } }, e => { - tracing::debug!( - peer_id = %req.metadata.peer_id, - addr = %req.metadata.remote_address, - "failed to deserialize message from: {e:?}" - ); + tracing::debug!("failed to deserialize message from: {e:?}"); }); futures_util::future::ready(()) @@ -333,32 +335,27 @@ impl DhtInner { *self.node_info.lock().unwrap() = Some(node_info); } - async fn announce_local_node_info(self: &Arc, network: &Network, ttl: u32) -> Result<()> { + async fn announce_local_node_info(&self, network: &Network, ttl: u32) -> Result<()> { let value = { - let now = now_sec(); - - let mut value = dht::SignedValue { - key: dht::SignedKey { - name: "addr".to_owned().into(), - idx: 0, - peer_id: self.local_id, - }, - data: Bytes::from(tl_proto::serialize(AddressList { + let created_at = now_sec(); + let expires_at = created_at + ttl; + + self.make_signed_value( + network, + "addr", + expires_at, + AddressList { items: vec![network.local_addr().into()], - created_at: now, - expires_at: now + ttl, - })), - expires_at: now_sec() + ttl, - signature: Bytes::new(), - }; - value.signature = network.sign_tl(&value).to_vec().into(); - Box::new(dht::Value::Signed(value)) + created_at, + expires_at, + }, + ) }; self.store_value(network, value).await } - async fn find_more_dht_nodes(self: &Arc, network: &Network) { + async fn find_more_dht_nodes(&self, network: &Network) { // TODO: deduplicate shared futures let query = Query::new( network.clone(), @@ -376,11 +373,7 @@ impl DhtInner { } } - async fn find_value( - &self, - network: &Network, - key_hash: &[u8; 32], - ) -> Option>> { + async fn find_value(&self, network: &Network, key_hash: &[u8; 32]) -> Option> { // TODO: deduplicate shared futures let query = Query::new( network.clone(), @@ -429,6 +422,34 @@ impl DhtInner { Ok(is_new) } + fn make_signed_value( + &self, + network: &Network, + name: &str, + expires_at: u32, + data: T, + ) -> Box + where + T: TlWrite + 'static, + { + let mut value = dht::SignedValue { + key: dht::SignedKey { + name: name.to_owned().into(), + idx: 0, + peer_id: self.local_id, + }, + data: match castaway::cast!(data, Bytes) { + Ok(data) => data, + Err(data) => tl_proto::serialize(data).into(), + }, + expires_at, + signature: Default::default(), + }; + value.signature = network.sign_tl(&value).to_vec().into(); + + Box::new(dht::Value::Signed(value)) + } + fn handle_store(&self, req: &dht::rpc::Store) -> Result { self.storage.insert(&req.value) } @@ -466,15 +487,41 @@ impl DhtInner { } } -#[derive(TlWrite)] -#[tl(boxed, scheme = "proto.tl")] enum ValueResponseRaw { - #[tl(id = "dht.valueFound")] Found(Bytes), - #[tl(id = "dht.valueNotFound")] NotFound(Vec>), } +impl TlWrite for ValueResponseRaw { + type Repr = tl_proto::Boxed; + + fn max_size_hint(&self) -> usize { + 4 + match self { + Self::Found(value) => value.max_size_hint(), + Self::NotFound(nodes) => nodes.max_size_hint(), + } + } + + fn write_to

(&self, packet: &mut P) + where + P: tl_proto::TlPacket, + { + const FOUND_TL_ID: u32 = tl_proto::id!("dht.valueFound", scheme = "proto.tl"); + const NOT_FOUND_TL_ID: u32 = tl_proto::id!("dht.valueNotFound", scheme = "proto.tl"); + + match self { + Self::Found(value) => { + packet.write_u32(FOUND_TL_ID); + packet.write_raw_slice(&value); + } + Self::NotFound(nodes) => { + packet.write_u32(NOT_FOUND_TL_ID); + nodes.write_to(packet); + } + } + } +} + #[derive(TlWrite)] #[tl(boxed, id = "dht.nodesFound", scheme = "proto.tl")] struct NodeResponseRaw { @@ -495,3 +542,11 @@ pub fn xor_distance(left: &PeerId, right: &PeerId) -> usize { } const MAX_XOR_DISTANCE: usize = 256; + +#[derive(Debug, thiserror::Error)] +pub enum FindValueError { + #[error("failed to deserialize value: {0}")] + InvalidData(#[from] tl_proto::TlError), + #[error("value not found")] + NotFound, +} diff --git a/network/src/dht/query.rs b/network/src/dht/query.rs index 658854224..c72c1bf77 100644 --- a/network/src/dht/query.rs +++ b/network/src/dht/query.rs @@ -46,7 +46,8 @@ impl Query { self.candidates.local_id().as_bytes() } - pub async fn find_value(mut self) -> Option>> { + #[tracing::instrument(level = "debug", skip_all)] + pub async fn find_value(mut self) -> Option> { // Prepare shared request let request_body = Bytes::from(tl_proto::serialize(dht::rpc::FindValue { key: *self.local_id(), @@ -77,7 +78,7 @@ impl Query { continue; } - return Some(Ok(value)); + return Some(value); } // Refill futures from the nodes response Some(Ok(dht::ValueResponse::NotFound(nodes))) => { @@ -117,6 +118,7 @@ impl Query { None } + #[tracing::instrument(level = "debug", skip_all)] pub async fn find_peers(mut self) -> impl Iterator> { // Prepare shared request let request_body = Bytes::from(tl_proto::serialize(dht::rpc::FindNode { @@ -328,6 +330,7 @@ impl StoreValue<()> { } impl, Option>)> + Send> StoreValue { + #[tracing::instrument(level = "debug", skip_all, name = "store_value")] pub async fn run(mut self) { while let Some((node, res)) = self.futures.next().await { match res { diff --git a/network/src/lib.rs b/network/src/lib.rs index c4f9b8467..f027fdd53 100644 --- a/network/src/lib.rs +++ b/network/src/lib.rs @@ -1,7 +1,7 @@ pub use self::util::{check_peer_signature, NetworkExt, Routable, Router, RouterBuilder}; pub use dht::{ xor_distance, DhtClient, DhtClientBuilder, DhtConfig, DhtService, DhtServiceBuilder, - OverlayValueMerger, StorageError, + FindValueError, OverlayValueMerger, StorageError, }; pub use network::{ ActivePeers, Connection, KnownPeers, Network, NetworkBuilder, NetworkConfig, Peer, QuicConfig, diff --git a/network/src/network/request_handler.rs b/network/src/network/request_handler.rs index 88deb1a34..6db6cd031 100644 --- a/network/src/network/request_handler.rs +++ b/network/src/network/request_handler.rs @@ -148,7 +148,7 @@ impl UniStreamRequestHandler { async fn do_handle(mut self) -> Result<()> { let req = recv_request(&mut self.recv_stream).await?; self.service - .on_query(ServiceRequest { + .on_message(ServiceRequest { metadata: self.meta, body: req.body, }) diff --git a/network/src/proto/dht.rs b/network/src/proto/dht.rs index d0d5a28ec..304034e5f 100644 --- a/network/src/proto/dht.rs +++ b/network/src/proto/dht.rs @@ -39,7 +39,7 @@ pub trait WithValue: } /// Key for values that can only be updated by the owner. -#[derive(Debug, Clone, TlRead, TlWrite)] +#[derive(Debug, Clone, PartialEq, Eq, TlRead, TlWrite)] #[tl(boxed, id = "dht.signedKey", scheme = "proto.tl")] pub struct SignedKey { /// Key name. @@ -62,7 +62,7 @@ impl WithValue for SignedKey { } /// Key for overlay-managed values. -#[derive(Debug, Clone, TlRead, TlWrite)] +#[derive(Debug, Clone, PartialEq, Eq, TlRead, TlWrite)] #[tl(boxed, id = "dht.overlayKey", scheme = "proto.tl")] pub struct OverlayKey { /// Overlay id. @@ -85,7 +85,7 @@ impl WithValue for OverlayKey { } /// Value with a known owner. -#[derive(Debug, Clone, TlRead, TlWrite)] +#[derive(Debug, Clone, PartialEq, Eq, TlRead, TlWrite)] #[tl(boxed, id = "dht.signedValue", scheme = "proto.tl")] pub struct SignedValue { /// Signed key. @@ -100,7 +100,7 @@ pub struct SignedValue { } /// Overlay-managed value. -#[derive(Debug, Clone, TlRead, TlWrite)] +#[derive(Debug, Clone, PartialEq, Eq, TlRead, TlWrite)] #[tl(boxed, id = "dht.overlayValue", scheme = "proto.tl")] pub struct OverlayValue { /// Overlay key. @@ -112,7 +112,7 @@ pub struct OverlayValue { } /// Stored value. -#[derive(Debug, Clone)] +#[derive(Debug, Clone, PartialEq, Eq)] pub enum Value { /// Value with a known owner. Signed(SignedValue), diff --git a/network/tests/dht.rs b/network/tests/dht.rs index e8b309570..2911d180c 100644 --- a/network/tests/dht.rs +++ b/network/tests/dht.rs @@ -8,8 +8,11 @@ use std::sync::Arc; use anyhow::Result; use everscale_crypto::ed25519; +use tl_proto::{TlRead, TlWrite}; use tycho_network::proto::dht; -use tycho_network::{proto, Address, AddressList, DhtClient, DhtService, Network, PeerId, Router}; +use tycho_network::{ + Address, AddressList, DhtClient, DhtService, FindValueError, Network, PeerId, Router, +}; use tycho_util::time::now_sec; struct Node { @@ -36,14 +39,14 @@ impl Node { Ok(Self { network, dht }) } - fn make_node_info(key: &ed25519::SecretKey, address: Address) -> proto::dht::NodeInfo { + fn make_node_info(key: &ed25519::SecretKey, address: Address) -> dht::NodeInfo { const TTL: u32 = 3600; let keypair = ed25519::KeyPair::from(key); let peer_id = PeerId::from(keypair.public_key); let now = now_sec(); - let mut node_info = proto::dht::NodeInfo { + let mut node_info = dht::NodeInfo { id: peer_id, address_list: AddressList { items: vec![address], @@ -83,7 +86,7 @@ fn make_network(node_count: usize) -> (Vec, Vec>) { #[tokio::test] async fn bootstrap_nodes_accessible() -> Result<()> { - tracing_subscriber::fmt::init(); + tracing_subscriber::fmt::try_init().ok(); let (nodes, _) = make_network(5); @@ -101,3 +104,40 @@ async fn bootstrap_nodes_accessible() -> Result<()> { Ok(()) } + +#[tokio::test] +async fn bootstrap_nodes_store_value() -> Result<()> { + tracing_subscriber::fmt::try_init().ok(); + + #[derive(Debug, Clone, PartialEq, Eq, TlWrite, TlRead)] + struct SomeValue(u32); + + let (nodes, _) = make_network(5); + + // Store value + let first = &nodes[0].dht; + let value_to_store = first.make_signed_value("test", now_sec() + 600, SomeValue(123123)); + first.store_value(value_to_store.clone()).await?; + + // Retrieve an existing value + let queried_value = first + .find_value(&dht::SignedKey { + name: "test".to_owned().into(), + idx: 0, + peer_id: *first.network().peer_id(), + }) + .await?; + assert_eq!(&dht::Value::Signed(queried_value), value_to_store.as_ref()); + + // Retrieve a non-existing value + let res = first + .find_value(&dht::SignedKey { + name: "not-existing".to_owned().into(), + idx: 1, + peer_id: *first.network().peer_id(), + }) + .await; + assert!(matches!(res, Err(FindValueError::NotFound))); + + Ok(()) +} From aed6b08c05aed75f5150992a2056f692928b81f2 Mon Sep 17 00:00:00 2001 From: Ivan Kalinin Date: Thu, 15 Feb 2024 15:51:57 +0100 Subject: [PATCH 25/35] feat(network): Rework dht proto --- network/src/dht/mod.rs | 323 ++++++++++-------- network/src/dht/query.rs | 62 ++-- network/src/dht/routing.rs | 30 +- network/src/dht/storage.rs | 60 ++-- network/src/lib.rs | 16 +- network/src/network/connection_manager.rs | 71 ++-- network/src/network/mod.rs | 2 +- network/src/proto.tl | 43 ++- network/src/proto/dht.rs | 388 +++++++++++++++++----- network/src/types/address.rs | 35 -- network/src/types/mod.rs | 19 +- network/src/types/peer_info.rs | 80 +++++ network/src/util/mod.rs | 9 +- network/src/util/tl.rs | 43 +++ network/src/util/traits.rs | 15 +- network/tests/dht.rs | 53 ++- util/src/lib.rs | 109 +++++- 17 files changed, 900 insertions(+), 458 deletions(-) create mode 100644 network/src/types/peer_info.rs create mode 100644 network/src/util/tl.rs diff --git a/network/src/dht/mod.rs b/network/src/dht/mod.rs index 7361be7f9..7ad131fec 100644 --- a/network/src/dht/mod.rs +++ b/network/src/dht/mod.rs @@ -3,16 +3,19 @@ use std::time::Duration; use anyhow::Result; use bytes::{Buf, Bytes}; -use tl_proto::TlWrite; +use tycho_util::realloc_box_enum; use tycho_util::time::{now_sec, shifted_interval}; use self::query::{Query, StoreValue}; use self::routing::RoutingTable; use self::storage::Storage; use crate::network::{Network, WeakNetwork}; -use crate::proto::dht; +use crate::proto::dht::{ + rpc, NodeInfoResponse, NodeResponse, PeerValue, PeerValueKey, PeerValueKeyRef, PeerValueRef, + Value, ValueRef, ValueResponseRaw, +}; use crate::types::{ - AddressList, PeerAffinity, PeerId, PeerInfo, Request, Response, Service, ServiceRequest, + Address, PeerAffinity, PeerId, PeerInfo, Request, Response, Service, ServiceRequest, }; use crate::util::{NetworkExt, Routable}; @@ -59,43 +62,159 @@ impl DhtClient { &self.network } - pub fn add_peer(&self, peer: Arc) -> Result { + pub fn add_peer(&self, peer: Arc) -> Result { self.inner.add_node_info(&self.network, peer) } - pub async fn get_node_info(&self, peer_id: &PeerId) -> Result { + pub async fn get_node_info(&self, peer_id: &PeerId) -> Result { let res = self .network - .query(peer_id, Request::from_tl(dht::rpc::GetNodeInfo)) + .query(peer_id, Request::from_tl(rpc::GetNodeInfo)) .await?; - let dht::NodeInfoResponse { info } = res.parse_tl()?; + let NodeInfoResponse { info } = res.parse_tl()?; Ok(info) } - pub async fn find_value(&self, key: &T) -> Result, FindValueError> + pub fn entry<'n>(&self, name: &'n [u8]) -> DhtQueryBuilder<'_, 'n> { + DhtQueryBuilder { + inner: &self.inner, + network: &self.network, + name, + idx: 0, + } + } +} + +#[derive(Clone, Copy)] +pub struct DhtQueryBuilder<'a, 'n> { + inner: &'a DhtInner, + network: &'a Network, + name: &'n [u8], + idx: u32, +} + +impl<'a, 'n> DhtQueryBuilder<'a, 'n> { + #[inline] + pub fn with_idx(&mut self, idx: u32) -> &mut Self { + self.idx = idx; + self + } + + pub async fn find_peer_value(&self, peer_id: &PeerId) -> Result where - T: dht::WithValue, + for<'tl> T: tl_proto::TlRead<'tl>, { - match self - .inner - .find_value(&self.network, &tl_proto::hash(key)) - .await - { - Some(value) => T::parse_value(value).map_err(FindValueError::InvalidData), + let key_hash = tl_proto::hash(PeerValueKeyRef { + name: self.name, + idx: self.idx, + peer_id, + }); + + match self.inner.find_value(self.network, &key_hash).await { + Some(value) => match value.as_ref() { + Value::Peer(value) => { + tl_proto::deserialize(&value.data).map_err(FindValueError::InvalidData) + } + Value::Overlay(_) => Err(FindValueError::InvalidData( + tl_proto::TlError::UnknownConstructor, + )), + }, None => Err(FindValueError::NotFound), } } - pub async fn store_value(&self, value: Box) -> Result<()> { - self.inner.store_value(&self.network, value).await + pub async fn find_peer_value_raw( + &self, + peer_id: &PeerId, + ) -> Result, FindValueError> { + let key_hash = tl_proto::hash(PeerValueKeyRef { + name: self.name, + idx: self.idx, + peer_id, + }); + + match self.inner.find_value(self.network, &key_hash).await { + Some(value) => { + realloc_box_enum!(value, { + Value::Peer(value) => Box::new(value) => Ok(value), + Value::Overlay(_) => Err(FindValueError::InvalidData( + tl_proto::TlError::UnknownConstructor, + )), + }) + } + None => Err(FindValueError::NotFound), + } } - pub fn make_signed_value(&self, name: &str, expires_at: u32, data: T) -> Box + pub fn with_data(&self, data: T) -> DhtQueryWithDataBuilder<'a, 'n> where - T: TlWrite + 'static, + T: tl_proto::TlWrite, { - self.inner - .make_signed_value(&self.network, name, expires_at, data) + DhtQueryWithDataBuilder { + inner: *self, + data: tl_proto::serialize(&data), + at: None, + ttl: DhtQueryWithDataBuilder::DEFAULT_TTL, + } + } +} + +pub struct DhtQueryWithDataBuilder<'a, 'n> { + inner: DhtQueryBuilder<'a, 'n>, + data: Vec, + at: Option, + ttl: u32, +} + +impl DhtQueryWithDataBuilder<'_, '_> { + const DEFAULT_TTL: u32 = 3600; + + pub fn with_time(&mut self, at: u32) -> &mut Self { + self.at = Some(at); + self + } + + pub fn with_ttl(&mut self, ttl: u32) -> &mut Self { + self.ttl = ttl; + self + } + + pub async fn store_as_peer(&self) -> Result<()> { + let dht = self.inner.inner; + let network = self.inner.network; + + let mut value = PeerValueRef { + key: PeerValueKeyRef { + name: self.inner.name, + idx: self.inner.idx, + peer_id: &dht.local_id, + }, + data: &self.data, + expires_at: self.at.unwrap_or_else(now_sec) + self.ttl, + signature: &[0; 64], + }; + let signature = network.sign_tl(&value); + value.signature = &signature; + + dht.store_value(network, ValueRef::Peer(value)).await + } + + pub fn into_signed_peer_value(self) -> PeerValue { + let dht = self.inner.inner; + let network = self.inner.network; + + let mut value = PeerValue { + key: PeerValueKey { + name: Box::from(self.inner.name), + idx: self.inner.idx, + peer_id: dht.local_id, + }, + data: self.data.into_boxed_slice(), + expires_at: self.at.unwrap_or_else(now_sec) + self.ttl, + signature: Box::new([0; 64]), + }; + *value.signature = network.sign_tl(&value); + value } } @@ -182,19 +301,19 @@ impl Service for DhtService { )] fn on_query(&self, req: ServiceRequest) -> Self::OnQueryFuture { let response = crate::match_tl_request!(req.body, { - dht::rpc::FindNode as ref r => { + rpc::FindNode as ref r => { tracing::debug!(key = %PeerId::wrap(&r.key), k = r.k, "find_node"); let res = self.0.handle_find_node(r); Some(tl_proto::serialize(res)) }, - dht::rpc::FindValue as ref r => { + rpc::FindValue as ref r => { tracing::debug!(key = %PeerId::wrap(&r.key), k = r.k, "find_value"); let res = self.0.handle_find_value(r); Some(tl_proto::serialize(res)) }, - dht::rpc::GetNodeInfo as _ => { + rpc::GetNodeInfo as _ => { tracing::debug!("get_node_info"); self.0.handle_get_node_info().map(tl_proto::serialize) @@ -218,7 +337,7 @@ impl Service for DhtService { )] fn on_message(&self, req: ServiceRequest) -> Self::OnMessageFuture { crate::match_tl_request!(req.body, { - dht::rpc::Store as ref r => { + rpc::StoreRef<'_> as ref r => { tracing::debug!("store"); if let Err(e) = self.0.handle_store(r) { @@ -241,14 +360,14 @@ impl Service for DhtService { impl Routable for DhtService { fn query_ids(&self) -> impl IntoIterator { [ - dht::rpc::FindNode::TL_ID, - dht::rpc::FindValue::TL_ID, - dht::rpc::GetNodeInfo::TL_ID, + rpc::FindNode::TL_ID, + rpc::FindValue::TL_ID, + rpc::GetNodeInfo::TL_ID, ] } fn message_ids(&self) -> impl IntoIterator { - [dht::rpc::Store::TL_ID] + [rpc::Store::TL_ID] } } @@ -256,7 +375,7 @@ struct DhtInner { local_id: PeerId, routing_table: Mutex, storage: Storage, - node_info: Mutex>, + node_info: Mutex>, max_k: usize, node_ttl: Duration, } @@ -320,39 +439,26 @@ impl DhtInner { fn refresh_local_node_info(&self, network: &Network, ttl: u32) { let now = now_sec(); - let mut node_info = dht::NodeInfo { + let mut node_info = PeerInfo { id: self.local_id, - address_list: AddressList { - items: vec![network.local_addr().into()], - created_at: now, - expires_at: now + ttl, - }, + address_list: vec![network.local_addr().into()].into_boxed_slice(), created_at: now, - signature: Bytes::new(), + expires_at: now + ttl, + signature: Box::new([0; 64]), }; - node_info.signature = network.sign_tl(&node_info).to_vec().into(); + *node_info.signature = network.sign_tl(&node_info); *self.node_info.lock().unwrap() = Some(node_info); } async fn announce_local_node_info(&self, network: &Network, ttl: u32) -> Result<()> { - let value = { - let created_at = now_sec(); - let expires_at = created_at + ttl; - - self.make_signed_value( - network, - "addr", - expires_at, - AddressList { - items: vec![network.local_addr().into()], - created_at, - expires_at, - }, - ) - }; + let data = tl_proto::serialize(&[network.local_addr().into()] as &[Address]); + + let mut value = self.make_unsigned_peer_value(b"addr", &data, now_sec() + ttl); + let signature = network.sign_tl(&value); + value.signature = &signature; - self.store_value(network, value).await + self.store_value(network, ValueRef::Peer(value)).await } async fn find_more_dht_nodes(&self, network: &Network) { @@ -373,7 +479,7 @@ impl DhtInner { } } - async fn find_value(&self, network: &Network, key_hash: &[u8; 32]) -> Option> { + async fn find_value(&self, network: &Network, key_hash: &[u8; 32]) -> Option> { // TODO: deduplicate shared futures let query = Query::new( network.clone(), @@ -386,7 +492,7 @@ impl DhtInner { query.find_value().await } - async fn store_value(&self, network: &Network, value: Box) -> Result<()> { + async fn store_value(&self, network: &Network, value: ValueRef<'_>) -> Result<()> { self.storage.insert(&value)?; let query = StoreValue::new( @@ -401,70 +507,54 @@ impl DhtInner { Ok(()) } - fn add_node_info(&self, network: &Network, node: Arc) -> Result { + fn add_node_info(&self, network: &Network, node: Arc) -> Result { anyhow::ensure!(node.is_valid(now_sec()), "invalid peer node info"); - // TODO: add support for multiple addresses - let peer_info = match node.address_list.items.first() { - Some(address) if node.id != self.local_id => PeerInfo { - peer_id: node.id, - affinity: PeerAffinity::Allowed, - address: address.clone(), - }, - _ => return Ok(false), - }; + if node.id == self.local_id { + return Ok(false); + } let mut routing_table = self.routing_table.lock().unwrap(); - let is_new = routing_table.add(node, self.max_k, &self.node_ttl); + let is_new = routing_table.add(node.clone(), self.max_k, &self.node_ttl); if is_new { - network.known_peers().insert(peer_info); + network.known_peers().insert(node, PeerAffinity::Allowed); } Ok(is_new) } - fn make_signed_value( - &self, - network: &Network, - name: &str, + fn make_unsigned_peer_value<'a>( + &'a self, + name: &'a [u8], + data: &'a [u8], expires_at: u32, - data: T, - ) -> Box - where - T: TlWrite + 'static, - { - let mut value = dht::SignedValue { - key: dht::SignedKey { - name: name.to_owned().into(), + ) -> PeerValueRef<'a> { + PeerValueRef { + key: PeerValueKeyRef { + name, idx: 0, - peer_id: self.local_id, - }, - data: match castaway::cast!(data, Bytes) { - Ok(data) => data, - Err(data) => tl_proto::serialize(data).into(), + peer_id: &self.local_id, }, + data, expires_at, - signature: Default::default(), - }; - value.signature = network.sign_tl(&value).to_vec().into(); - - Box::new(dht::Value::Signed(value)) + signature: &[0; 64], + } } - fn handle_store(&self, req: &dht::rpc::Store) -> Result { + fn handle_store(&self, req: &rpc::StoreRef<'_>) -> Result { self.storage.insert(&req.value) } - fn handle_find_node(&self, req: &dht::rpc::FindNode) -> NodeResponseRaw { + fn handle_find_node(&self, req: &rpc::FindNode) -> NodeResponse { let nodes = self .routing_table .lock() .unwrap() .closest(&req.key, (req.k as usize).min(self.max_k)); - NodeResponseRaw { nodes } + NodeResponse { nodes } } - fn handle_find_value(&self, req: &dht::rpc::FindValue) -> ValueResponseRaw { + fn handle_find_value(&self, req: &rpc::FindValue) -> ValueResponseRaw { if let Some(value) = self.storage.get(&req.key) { ValueResponseRaw::Found(value) } else { @@ -478,56 +568,15 @@ impl DhtInner { } } - fn handle_get_node_info(&self) -> Option { + fn handle_get_node_info(&self) -> Option { self.node_info .lock() .unwrap() .clone() - .map(|info| dht::NodeInfoResponse { info }) - } -} - -enum ValueResponseRaw { - Found(Bytes), - NotFound(Vec>), -} - -impl TlWrite for ValueResponseRaw { - type Repr = tl_proto::Boxed; - - fn max_size_hint(&self) -> usize { - 4 + match self { - Self::Found(value) => value.max_size_hint(), - Self::NotFound(nodes) => nodes.max_size_hint(), - } - } - - fn write_to

(&self, packet: &mut P) - where - P: tl_proto::TlPacket, - { - const FOUND_TL_ID: u32 = tl_proto::id!("dht.valueFound", scheme = "proto.tl"); - const NOT_FOUND_TL_ID: u32 = tl_proto::id!("dht.valueNotFound", scheme = "proto.tl"); - - match self { - Self::Found(value) => { - packet.write_u32(FOUND_TL_ID); - packet.write_raw_slice(&value); - } - Self::NotFound(nodes) => { - packet.write_u32(NOT_FOUND_TL_ID); - nodes.write_to(packet); - } - } + .map(|info| NodeInfoResponse { info }) } } -#[derive(TlWrite)] -#[tl(boxed, id = "dht.nodesFound", scheme = "proto.tl")] -struct NodeResponseRaw { - nodes: Vec>, -} - pub fn xor_distance(left: &PeerId, right: &PeerId) -> usize { for (i, (left, right)) in std::iter::zip(left.0.chunks(8), right.0.chunks(8)).enumerate() { let left = u64::from_be_bytes(left.try_into().unwrap()); diff --git a/network/src/dht/query.rs b/network/src/dht/query.rs index c72c1bf77..208580069 100644 --- a/network/src/dht/query.rs +++ b/network/src/dht/query.rs @@ -13,8 +13,8 @@ use tycho_util::{FastHashMap, FastHashSet}; use crate::dht::routing::RoutingTable; use crate::network::Network; -use crate::proto::dht; -use crate::types::{PeerId, Request}; +use crate::proto::dht::{rpc, NodeResponse, Value, ValueRef, ValueResponse}; +use crate::types::{PeerId, PeerInfo, Request}; use crate::util::NetworkExt; pub struct Query { @@ -47,9 +47,9 @@ impl Query { } #[tracing::instrument(level = "debug", skip_all)] - pub async fn find_value(mut self) -> Option> { + pub async fn find_value(mut self) -> Option> { // Prepare shared request - let request_body = Bytes::from(tl_proto::serialize(dht::rpc::FindValue { + let request_body = Bytes::from(tl_proto::serialize(rpc::FindValue { key: *self.local_id(), k: self.max_k as u32, })); @@ -59,7 +59,7 @@ impl Query { let mut futures = FuturesUnordered::new(); self.candidates .visit_closest(self.local_id(), self.max_k, |node| { - futures.push(Self::visit::( + futures.push(Self::visit::( self.network.clone(), node.clone(), request_body.clone(), @@ -72,7 +72,7 @@ impl Query { while let Some((node, res)) = futures.next().await { match res { // Return the value if found - Some(Ok(dht::ValueResponse::Found(value))) => { + Some(Ok(ValueResponse::Found(value))) => { if !value.is_valid(now_sec(), self.local_id()) { // Ignore invalid values continue; @@ -81,7 +81,7 @@ impl Query { return Some(value); } // Refill futures from the nodes response - Some(Ok(dht::ValueResponse::NotFound(nodes))) => { + Some(Ok(ValueResponse::NotFound(nodes))) => { tracing::debug!(peer_id = %node.id, count = nodes.len(), "received nodes"); if !self.update_candidates(now_sec(), self.max_k, nodes, &mut visited) { // Do nothing if candidates were not changed @@ -95,7 +95,7 @@ impl Query { // Skip already visited nodes return; } - futures.push(Self::visit::( + futures.push(Self::visit::( self.network.clone(), node.clone(), request_body.clone(), @@ -119,9 +119,9 @@ impl Query { } #[tracing::instrument(level = "debug", skip_all)] - pub async fn find_peers(mut self) -> impl Iterator> { + pub async fn find_peers(mut self) -> impl Iterator> { // Prepare shared request - let request_body = Bytes::from(tl_proto::serialize(dht::rpc::FindNode { + let request_body = Bytes::from(tl_proto::serialize(rpc::FindNode { key: *self.local_id(), k: self.max_k as u32, })); @@ -131,7 +131,7 @@ impl Query { let mut futures = FuturesUnordered::new(); self.candidates .visit_closest(self.local_id(), self.max_k, |node| { - futures.push(Self::visit::( + futures.push(Self::visit::( self.network.clone(), node.clone(), request_body.clone(), @@ -140,11 +140,11 @@ impl Query { }); // Process responses and refill futures until all peers are traversed - let mut result = FastHashMap::>::new(); + let mut result = FastHashMap::>::new(); while let Some((node, res)) = futures.next().await { match res { // Refill futures from the nodes response - Some(Ok(dht::NodeResponse { nodes })) => { + Some(Ok(NodeResponse { nodes })) => { tracing::debug!(peer_id = %node.id, count = nodes.len(), "received nodes"); if !self.update_candidates_full(now_sec(), self.max_k, nodes, &mut result) { // Do nothing if candidates were not changed @@ -158,7 +158,7 @@ impl Query { // Skip already visited nodes return; } - futures.push(Self::visit::( + futures.push(Self::visit::( self.network.clone(), node.clone(), request_body.clone(), @@ -185,7 +185,7 @@ impl Query { &mut self, now: u32, max_k: usize, - nodes: Vec, + nodes: Vec>, visited: &mut FastHashSet, ) -> bool { let mut has_new = false; @@ -197,7 +197,7 @@ impl Query { // Insert a new entry if visited.insert(node.id) { - self.candidates.add(Arc::new(node), max_k, &Duration::MAX); + self.candidates.add(node, max_k, &Duration::MAX); has_new = true; } } @@ -209,8 +209,8 @@ impl Query { &mut self, now: u32, max_k: usize, - nodes: Vec, - visited: &mut FastHashMap>, + nodes: Vec>, + visited: &mut FastHashMap>, ) -> bool { let mut has_new = false; for node in nodes { @@ -222,14 +222,14 @@ impl Query { match visited.entry(node.id) { // Insert a new entry hash_map::Entry::Vacant(entry) => { - let node = entry.insert(Arc::new(node)).clone(); + let node = entry.insert(node).clone(); self.candidates.add(node, max_k, &Duration::MAX); has_new = true; } // Try to replace an old entry hash_map::Entry::Occupied(mut entry) => { if entry.get().created_at < node.created_at { - *entry.get_mut() = Arc::new(node); + *entry.get_mut() = node; } } } @@ -240,10 +240,10 @@ impl Query { async fn visit( network: Network, - node: Arc, + node: Arc, request_body: Bytes, semaphore: &Semaphore, - ) -> (Arc, Option>) + ) -> (Arc, Option>) where for<'a> T: tl_proto::TlRead<'a, Repr = tl_proto::Boxed>, { @@ -278,15 +278,15 @@ impl StoreValue<()> { pub fn new( network: Network, routing_table: &RoutingTable, - value: Box, + value: ValueRef<'_>, max_k: usize, - ) -> StoreValue, Option>)> + Send> { - let key_hash = match value.as_ref() { - dht::Value::Signed(value) => tl_proto::hash(&value.key), - dht::Value::Overlay(value) => tl_proto::hash(&value.key), + ) -> StoreValue, Option>)> + Send> { + let key_hash = match &value { + ValueRef::Peer(value) => tl_proto::hash(&value.key), + ValueRef::Overlay(value) => tl_proto::hash(&value.key), }; - let request_body = Bytes::from(tl_proto::serialize(dht::rpc::Store { value })); + let request_body = Bytes::from(tl_proto::serialize(rpc::StoreRef { value })); let semaphore = Arc::new(Semaphore::new(10)); let futures = futures_util::stream::FuturesUnordered::new(); @@ -304,10 +304,10 @@ impl StoreValue<()> { async fn visit( network: Network, - node: Arc, + node: Arc, request_body: Bytes, semaphore: Arc, - ) -> (Arc, Option>) { + ) -> (Arc, Option>) { let Ok(_permit) = semaphore.acquire().await else { return (node, None); }; @@ -329,7 +329,7 @@ impl StoreValue<()> { } } -impl, Option>)> + Send> StoreValue { +impl, Option>)> + Send> StoreValue { #[tracing::instrument(level = "debug", skip_all, name = "store_value")] pub async fn run(mut self) { while let Some((node, res)) = self.futures.next().await { diff --git a/network/src/dht/routing.rs b/network/src/dht/routing.rs index 93da6a6a1..1f584bc68 100644 --- a/network/src/dht/routing.rs +++ b/network/src/dht/routing.rs @@ -3,8 +3,7 @@ use std::sync::Arc; use std::time::{Duration, Instant}; use crate::dht::{xor_distance, MAX_XOR_DISTANCE}; -use crate::proto::dht; -use crate::types::PeerId; +use crate::types::{PeerId, PeerInfo}; pub(crate) struct RoutingTable { local_id: PeerId, @@ -33,7 +32,7 @@ impl RoutingTable { self.buckets.values().map(|bucket| bucket.nodes.len()).sum() } - pub fn add(&mut self, node: Arc, max_k: usize, node_ttl: &Duration) -> bool { + pub fn add(&mut self, node: Arc, max_k: usize, node_ttl: &Duration) -> bool { let distance = xor_distance(&self.local_id, &node.id); if distance == 0 { return false; @@ -55,7 +54,7 @@ impl RoutingTable { } } - pub fn closest(&self, key: &[u8; 32], count: usize) -> Vec> { + pub fn closest(&self, key: &[u8; 32], count: usize) -> Vec> { if count == 0 { return Vec::new(); } @@ -83,7 +82,7 @@ impl RoutingTable { pub fn visit_closest(&self, key: &[u8; 32], count: usize, mut f: F) where - F: FnMut(&Arc), + F: FnMut(&Arc), { if count == 0 { return; @@ -130,7 +129,7 @@ impl Bucket { } } - fn insert(&mut self, node: Arc, max_k: usize, timeout: &Duration) -> bool { + fn insert(&mut self, node: Arc, max_k: usize, timeout: &Duration) -> bool { if let Some(index) = self .nodes .iter_mut() @@ -168,12 +167,12 @@ impl Bucket { } struct Node { - data: Arc, + data: Arc, last_updated_at: Instant, } impl Node { - fn new(data: Arc) -> Self { + fn new(data: Arc) -> Self { Self { data, last_updated_at: Instant::now(), @@ -189,22 +188,17 @@ impl Node { mod tests { use std::str::FromStr; - use crate::AddressList; - use super::*; const MAX_K: usize = 20; - fn make_node(id: PeerId) -> Arc { - Arc::new(dht::NodeInfo { + fn make_node(id: PeerId) -> Arc { + Arc::new(PeerInfo { id, - address_list: AddressList { - items: Default::default(), - created_at: 0, - expires_at: 0, - }, + address_list: Default::default(), created_at: 0, - signature: Default::default(), + expires_at: 0, + signature: Box::new([0; 64]), }) } diff --git a/network/src/dht/storage.rs b/network/src/dht/storage.rs index 0452260ae..3b4cfb198 100644 --- a/network/src/dht/storage.rs +++ b/network/src/dht/storage.rs @@ -1,3 +1,4 @@ +use std::cell::RefCell; use std::sync::{Arc, Weak}; use std::time::Duration; @@ -8,37 +9,27 @@ use moka::Expiry; use tl_proto::TlWrite; use tycho_util::time::now_sec; -use crate::proto; +use crate::proto::dht::{OverlayValue, OverlayValueRef, PeerValueRef, ValueRef}; type DhtCache = Cache; type DhtCacheBuilder = CacheBuilder>; pub trait OverlayValueMerger: Send + Sync + 'static { - fn check_value(&self, new: &proto::dht::OverlayValue) -> Result<(), StorageError>; - - fn merge_value( - &self, - new: &proto::dht::OverlayValue, - stored: &mut proto::dht::OverlayValue, - ) -> bool; + fn check_value(&self, new: &OverlayValueRef<'_>) -> Result<(), StorageError>; + fn merge_value(&self, new: &OverlayValueRef<'_>, stored: &mut OverlayValue) -> bool; } impl OverlayValueMerger for () { - fn check_value(&self, _new: &proto::dht::OverlayValue) -> Result<(), StorageError> { + fn check_value(&self, _new: &OverlayValueRef<'_>) -> Result<(), StorageError> { Err(StorageError::InvalidKey) } - - fn merge_value( - &self, - _new: &proto::dht::OverlayValue, - _stored: &mut proto::dht::OverlayValue, - ) -> bool { + fn merge_value(&self, _new: &OverlayValueRef<'_>, _stored: &mut OverlayValue) -> bool { false } } pub(crate) struct StorageBuilder { - cache_builder: DhtCacheBuilder, + cache_builder: DhtCacheBuilder, overlay_value_merger: Weak, max_ttl: Duration, max_key_name_len: usize, @@ -129,7 +120,7 @@ impl Storage { (stored_value.expires_at > now_sec()).then_some(stored_value.data) } - pub fn insert(&self, value: &proto::dht::Value) -> Result { + pub fn insert(&self, value: &ValueRef<'_>) -> Result { match value.expires_at().checked_sub(now_sec()) { Some(0) | None => return Err(StorageError::ValueExpired), Some(remaining_ttl) if remaining_ttl > self.max_ttl_sec => { @@ -145,12 +136,12 @@ impl Storage { } match value { - proto::dht::Value::Signed(value) => self.insert_signed_value(value), - proto::dht::Value::Overlay(value) => self.insert_overlay_value(value), + ValueRef::Peer(value) => self.insert_signed_value(value), + ValueRef::Overlay(value) => self.insert_overlay_value(value), } } - fn insert_signed_value(&self, value: &proto::dht::SignedValue) -> Result { + fn insert_signed_value(&self, value: &PeerValueRef<'_>) -> Result { let Some(public_key) = value.key.peer_id.as_public_key() else { return Err(StorageError::InvalidSignature); }; @@ -172,17 +163,28 @@ impl Storage { .is_fresh()) } - fn insert_overlay_value(&self, value: &proto::dht::OverlayValue) -> Result { - use std::borrow::Cow; - use std::cell::RefCell; - + fn insert_overlay_value(&self, value: &OverlayValueRef<'_>) -> Result { let Some(merger) = self.overlay_value_merger.upgrade() else { return Ok(false); }; merger.check_value(value)?; - let new_value = RefCell::new(Cow::Borrowed(value)); + enum OverlayValueCow<'a, 'b> { + Borrowed(&'a OverlayValueRef<'b>), + Owned(OverlayValue), + } + + impl OverlayValueCow<'_, '_> { + fn make_stored_value(&self) -> StoredValue { + match self { + Self::Borrowed(value) => StoredValue::new(*value, value.expires_at), + Self::Owned(value) => StoredValue::new(value, value.expires_at), + } + } + } + + let new_value = RefCell::new(OverlayValueCow::Borrowed(value)); Ok(self .cache @@ -190,18 +192,16 @@ impl Storage { .or_insert_with_if( || { let value = new_value.borrow(); - StoredValue::new(value.as_ref(), value.expires_at) + value.make_stored_value() }, |prev| { - let Ok(mut prev) = - tl_proto::deserialize::(&prev.data) - else { + let Ok(mut prev) = tl_proto::deserialize::(&prev.data) else { // Invalid values are always replaced with new values return true; }; if merger.merge_value(value, &mut prev) { - *new_value.borrow_mut() = Cow::Owned(prev); + *new_value.borrow_mut() = OverlayValueCow::Owned(prev); true } else { false diff --git a/network/src/lib.rs b/network/src/lib.rs index f027fdd53..fe4689e40 100644 --- a/network/src/lib.rs +++ b/network/src/lib.rs @@ -1,17 +1,17 @@ pub use self::util::{check_peer_signature, NetworkExt, Routable, Router, RouterBuilder}; pub use dht::{ - xor_distance, DhtClient, DhtClientBuilder, DhtConfig, DhtService, DhtServiceBuilder, - FindValueError, OverlayValueMerger, StorageError, + xor_distance, DhtClient, DhtClientBuilder, DhtConfig, DhtQueryBuilder, DhtQueryWithDataBuilder, + DhtService, DhtServiceBuilder, FindValueError, OverlayValueMerger, StorageError, }; pub use network::{ - ActivePeers, Connection, KnownPeers, Network, NetworkBuilder, NetworkConfig, Peer, QuicConfig, - RecvStream, SendStream, WeakActivePeers, WeakNetwork, + ActivePeers, Connection, KnownPeer, KnownPeers, Network, NetworkBuilder, NetworkConfig, Peer, + QuicConfig, RecvStream, SendStream, WeakActivePeers, WeakNetwork, }; pub use types::{ - service_datagram_fn, service_message_fn, service_query_fn, Address, AddressList, - BoxCloneService, BoxService, Direction, DisconnectReason, InboundRequestMeta, PeerAffinity, - PeerEvent, PeerId, PeerInfo, Request, Response, RpcQuery, Service, ServiceDatagramFn, - ServiceExt, ServiceMessageFn, ServiceQueryFn, ServiceRequest, Version, + service_datagram_fn, service_message_fn, service_query_fn, Address, BoxCloneService, + BoxService, Direction, DisconnectReason, InboundRequestMeta, PeerAffinity, PeerEvent, PeerId, + PeerInfo, Request, Response, RpcQuery, Service, ServiceDatagramFn, ServiceExt, + ServiceMessageFn, ServiceQueryFn, ServiceRequest, Version, }; pub use quinn; diff --git a/network/src/network/connection_manager.rs b/network/src/network/connection_manager.rs index e18dd1808..df9692430 100644 --- a/network/src/network/connection_manager.rs +++ b/network/src/network/connection_manager.rs @@ -180,24 +180,35 @@ impl ConnectionManager { .0 .iter() .filter(|item| { - let peer_info = item.value(); - peer_info.affinity == PeerAffinity::High - && &peer_info.peer_id != self.endpoint.peer_id() - && !self.active_peers.contains(&peer_info.peer_id) - && !self.pending_dials.contains_key(&peer_info.peer_id) + let KnownPeer { + peer_info, + affinity, + } = item.value(); + + *affinity == PeerAffinity::High + && &peer_info.id != self.endpoint.peer_id() + && !self.active_peers.contains(&peer_info.id) + && !self.pending_dials.contains_key(&peer_info.id) && self .dial_backoff_states - .get(&peer_info.peer_id) + .get(&peer_info.id) .map_or(true, |state| now > state.next_attempt_at) }) .take(outstanding_connections_limit) - .map(|item| item.value().clone()) + .map(|item| item.value().peer_info.clone()) .collect::>(); for peer_info in outstanding_connections { + // TODO: handle multiple addresses + let address = peer_info + .iter_addresses() + .next() + .cloned() + .expect("address list must have at least one item"); + let (tx, rx) = oneshot::channel(); - self.dial_peer(peer_info.address, Some(peer_info.peer_id), tx); - self.pending_dials.insert(peer_info.peer_id, rx); + self.dial_peer(address, Some(peer_info.id), tx); + self.pending_dials.insert(peer_info.id, rx); } } @@ -220,15 +231,9 @@ impl ConnectionManager { let fut = async { let connection = connecting.await?; - match known_peers.get(connection.peer_id()) { - Some(PeerInfo { - affinity: PeerAffinity::High | PeerAffinity::Allowed, - .. - }) => {} - Some(PeerInfo { - affinity: PeerAffinity::Never, - .. - }) => { + match known_peers.get_affinity(connection.peer_id()) { + Some(PeerAffinity::High | PeerAffinity::Allowed) => {} + Some(PeerAffinity::Never) => { anyhow::bail!( "rejecting connection from peer {} due to PeerAffinity::Never", connection.peer_id(), @@ -556,7 +561,7 @@ fn simultaneous_dial_tie_breaking( } #[derive(Default, Clone)] -pub struct KnownPeers(Arc>); +pub struct KnownPeers(Arc>); impl KnownPeers { pub fn new() -> Self { @@ -567,21 +572,31 @@ impl KnownPeers { self.0.contains_key(peer_id) } - pub fn get(&self, peer_id: &PeerId) -> Option { + pub fn get(&self, peer_id: &PeerId) -> Option { self.0.get(peer_id).map(|item| item.value().clone()) } - pub fn insert(&self, peer_info: PeerInfo) -> Option { - self.0.insert(peer_info.peer_id, peer_info) + pub fn get_affinity(&self, peer_id: &PeerId) -> Option { + self.0.get(peer_id).map(|item| item.value().affinity) } - pub fn remove(&self, peer_id: &PeerId) -> Option { - self.0.remove(peer_id).map(|(_, value)| value) + pub fn insert(&self, peer_info: Arc, affinity: PeerAffinity) -> Option { + self.0.insert( + peer_info.id, + KnownPeer { + peer_info, + affinity, + }, + ) } - pub fn print_all(&self) { - for item in self.0.iter() { - println!("{}: {:?}", item.peer_id, item.value()); - } + pub fn remove(&self, peer_id: &PeerId) -> Option { + self.0.remove(peer_id).map(|(_, value)| value) } } + +#[derive(Debug, Clone)] +pub struct KnownPeer { + pub peer_info: Arc, + pub affinity: PeerAffinity, +} diff --git a/network/src/network/mod.rs b/network/src/network/mod.rs index 1d2175b62..f0274cfee 100644 --- a/network/src/network/mod.rs +++ b/network/src/network/mod.rs @@ -15,7 +15,7 @@ use crate::types::{ pub use self::config::{NetworkConfig, QuicConfig}; pub use self::connection::{Connection, RecvStream, SendStream}; -pub use self::connection_manager::{ActivePeers, KnownPeers, WeakActivePeers}; +pub use self::connection_manager::{ActivePeers, KnownPeer, KnownPeers, WeakActivePeers}; pub use self::peer::Peer; mod config; diff --git a/network/src/proto.tl b/network/src/proto.tl index 73bfa9cd0..f139d5dab 100644 --- a/network/src/proto.tl +++ b/network/src/proto.tl @@ -14,17 +14,6 @@ transport.peerId key:int256 = transport.PeerId; transport.address.ipv4 ip:int port:int = transport.Address; transport.address.ipv6 ip:int128 port:int = transport.Address; -/** -* @param items multiple possible addresses for the same peer -* @param created_at unix timestamp when the list was generated -* @param expires_at unix timestamp up to which this list is valid -*/ -transport.addressList - items:(vector transport.Address) - created_at:int - expires_at:int - = transport.AddressList; - // DHT //////////////////////////////////////////////////////////////////////////////// @@ -32,14 +21,16 @@ transport.addressList /** * @param id node public key -* @param addr_list list of possible peer addresses -* @param created_at unix timestamp when the entry was generated -* @param signature a ed25519 signature of the entry +* @param addr_list multiple possible addresses for the same peer +* @param created_at unix timestamp when the info was generated +* @param expires_at unix timestamp up to which the info is valid +* @param signature a ed25519 signature of the info */ dht.node id:transport.PeerId - addr_list:transport.addressList + addr_list:(vector transport.Address) created_at:int + expires_at:int signature:bytes = dht.Node; @@ -56,7 +47,7 @@ dht.nodes nodes:(vector dht.node) = dht.Nodes; * @param idx key index used for versioning * @param peer_id owner id */ -dht.signedKey +dht.peerValueKey name:bytes idx:int peer_id:transport.PeerId @@ -65,21 +56,25 @@ dht.signedKey /** * Key for the overlay-managed value * -* @param id overlay id -* @param name key name as UTF-8 string -* @param idx key index used for versioning +* @param name key name as UTF-8 string +* @param idx key index used for versioning +* @param overlay_id overlay id */ -dht.overlayKey id:int256 name:bytes idx:int = dht.Key; +dht.overlayValueKey + name:bytes + idx:int + overlay_id:int256 + = dht.Key; /** * A value with an exact owner * -* @param key signed key +* @param key peer value key * @param value any data * @param expires_at unix timestamp up to which this value is valid */ -dht.signedValue key:dht.signedKey data:bytes expires_at:int signature:bytes = dht.Value; +dht.peerValue key:dht.peerValueKey data:bytes expires_at:int signature:bytes = dht.Value; /** * An overlay-managed value @@ -88,7 +83,7 @@ dht.signedValue key:dht.signedKey data:bytes expires_at:int signature:bytes = dh * @param value any data * @param expires_at unix timestamp up to which this value is valid */ -dht.overlayValue key:dht.overlayKey data:bytes expires_at:int = dht.Value; +dht.overlayValue key:dht.overlayValueKey data:bytes expires_at:int = dht.Value; /** @@ -143,6 +138,6 @@ dht.findNode key:int256 k:int = dht.NodeResponse; */ dht.findValue key:int256 k:int = dht.ValueResponse; /** -* Requests a signed address list +* Requests a signed node info */ dht.getNodeInfo = dht.NodeInfoResponse; diff --git a/network/src/proto/dht.rs b/network/src/proto/dht.rs index 304034e5f..27e07c02d 100644 --- a/network/src/proto/dht.rs +++ b/network/src/proto/dht.rs @@ -1,121 +1,181 @@ +use std::sync::Arc; + use bytes::Bytes; use tl_proto::{TlRead, TlWrite}; -use crate::types::{AddressList, PeerId}; -use crate::util::check_peer_signature; - -/// A signed DHT node info. -#[derive(Debug, Clone, TlRead, TlWrite)] -pub struct NodeInfo { - /// Node public key. - pub id: PeerId, - /// A list of possible peer addresses. - pub address_list: AddressList, - /// Unix timestamp when the entry was generated. - pub created_at: u32, - /// A `ed25519` signature of this entry. - #[tl(signature)] - pub signature: Bytes, -} - -impl NodeInfo { - pub fn is_valid(&self, at: u32) -> bool { - const CLOCK_THRESHOLD: u32 = 1; - - self.created_at <= at + CLOCK_THRESHOLD - && self.address_list.created_at <= at + CLOCK_THRESHOLD - && self.address_list.expires_at >= at - && !self.address_list.items.is_empty() - && check_peer_signature(&self.id, &self.signature, self) - } -} - -pub trait WithValue: - TlWrite + for<'a> TlRead<'a, Repr = tl_proto::Boxed> -{ - type Value<'a>: TlWrite + TlRead<'a, Repr = tl_proto::Boxed>; - - fn parse_value(value: Box) -> tl_proto::TlResult>; -} +use crate::types::{PeerId, PeerInfo}; +use crate::util::{check_peer_signature, tl}; /// Key for values that can only be updated by the owner. +/// +/// See [`SignedValueKeyRef`] for the non-owned version of the struct. #[derive(Debug, Clone, PartialEq, Eq, TlRead, TlWrite)] -#[tl(boxed, id = "dht.signedKey", scheme = "proto.tl")] -pub struct SignedKey { +#[tl(boxed, id = "dht.peerValueKey", scheme = "proto.tl")] +pub struct PeerValueKey { /// Key name. - pub name: Bytes, + #[tl(with = "tl_name_owned")] + pub name: Box<[u8]>, /// Key index (version). pub idx: u32, /// Public key of the owner. pub peer_id: PeerId, } -impl WithValue for SignedKey { - type Value<'a> = SignedValue; +/// Key for values that can only be updated by the owner. +/// +/// See [`SignedValueKey`] for the owned version of the struct. +#[derive(Debug, Clone, PartialEq, Eq, TlRead, TlWrite)] +#[tl(boxed, id = "dht.peerValueKey", scheme = "proto.tl")] +pub struct PeerValueKeyRef<'tl> { + /// Key name. + #[tl(with = "tl_name_ref")] + pub name: &'tl [u8], + /// Key index (version). + pub idx: u32, + /// Public key of the owner. + pub peer_id: &'tl PeerId, +} - fn parse_value(value: Box) -> tl_proto::TlResult> { - match *value { - Value::Signed(value) => Ok(value), - Value::Overlay(_) => Err(tl_proto::TlError::UnknownConstructor), +impl PeerValueKeyRef<'_> { + pub fn as_owned(&self) -> PeerValueKey { + PeerValueKey { + name: Box::from(self.name), + idx: self.idx, + peer_id: *self.peer_id, } } } /// Key for overlay-managed values. +/// +/// See [`OverlayValueKeyRef`] for the non-owned version of the struct. #[derive(Debug, Clone, PartialEq, Eq, TlRead, TlWrite)] -#[tl(boxed, id = "dht.overlayKey", scheme = "proto.tl")] -pub struct OverlayKey { - /// Overlay id. - pub id: [u8; 32], +#[tl(boxed, id = "dht.overlayValueKey", scheme = "proto.tl")] +pub struct OverlayValueKey { /// Key name. - pub name: Bytes, + #[tl(with = "tl_name_owned")] + pub name: Box<[u8]>, /// Key index (version). pub idx: u32, + /// Overlay id. + pub overlay_id: [u8; 32], } -impl WithValue for OverlayKey { - type Value<'a> = OverlayValue; +/// Key for overlay-managed values. +/// +/// See [`OverlayValueKey`] for the owned version of the struct. +#[derive(Debug, Clone, PartialEq, Eq, TlRead, TlWrite)] +#[tl(boxed, id = "dht.overlayValueKey", scheme = "proto.tl")] +pub struct OverlayValueKeyRef<'tl> { + /// Key name. + #[tl(with = "tl_name_ref")] + pub name: &'tl [u8], + /// Key index (version). + pub idx: u32, + /// Overlay id. + pub overlay_id: &'tl [u8; 32], +} - fn parse_value(value: Box) -> tl_proto::TlResult> { - match *value { - Value::Signed(_) => Err(tl_proto::TlError::UnknownConstructor), - Value::Overlay(value) => Ok(value), +impl OverlayValueKeyRef<'_> { + pub fn as_owned(&self) -> OverlayValueKey { + OverlayValueKey { + name: Box::from(self.name), + idx: self.idx, + overlay_id: *self.overlay_id, } } } /// Value with a known owner. +/// +/// See [`PeerValueRef`] for the non-owned version of the struct. #[derive(Debug, Clone, PartialEq, Eq, TlRead, TlWrite)] -#[tl(boxed, id = "dht.signedValue", scheme = "proto.tl")] -pub struct SignedValue { - /// Signed key. - pub key: SignedKey, +#[tl(boxed, id = "dht.peerValue", scheme = "proto.tl")] +pub struct PeerValue { + /// Peer value key. + pub key: PeerValueKey, /// Any data. - pub data: Bytes, + pub data: Box<[u8]>, /// Unix timestamp up to which this value is valid. pub expires_at: u32, /// A `ed25519` signature of this entry. - #[tl(signature)] - pub signature: Bytes, + #[tl(signature, with = "tl::signature_owned")] + pub signature: Box<[u8; 64]>, +} + +/// Value with a known owner. +/// +/// See [`PeerValue`] for the owned version of the struct. +#[derive(Debug, Clone, PartialEq, Eq, TlRead, TlWrite)] +#[tl(boxed, id = "dht.peerValue", scheme = "proto.tl")] +pub struct PeerValueRef<'tl> { + /// Peer value key. + pub key: PeerValueKeyRef<'tl>, + /// Any data. + pub data: &'tl [u8], + /// Unix timestamp up to which this value is valid. + pub expires_at: u32, + /// A `ed25519` signature of this entry. + #[tl(signature, with = "tl::signature_ref")] + pub signature: &'tl [u8; 64], +} + +impl PeerValueRef<'_> { + pub fn as_owned(&self) -> PeerValue { + PeerValue { + key: self.key.as_owned(), + data: Box::from(self.data), + expires_at: self.expires_at, + signature: Box::new(*self.signature), + } + } } /// Overlay-managed value. +/// +/// See [`OverlayValueRef`] for the non-owned version of the struct. #[derive(Debug, Clone, PartialEq, Eq, TlRead, TlWrite)] #[tl(boxed, id = "dht.overlayValue", scheme = "proto.tl")] pub struct OverlayValue { /// Overlay key. - pub key: OverlayKey, + pub key: OverlayValueKey, /// Any data. - pub data: Bytes, + pub data: Box<[u8]>, /// Unix timestamp up to which this value is valid. pub expires_at: u32, } +/// Overlay-managed value. +/// +/// See [`OverlayValue`] for the owned version of the struct. +#[derive(Debug, Clone, PartialEq, Eq, TlRead, TlWrite)] +#[tl(boxed, id = "dht.overlayValue", scheme = "proto.tl")] +pub struct OverlayValueRef<'tl> { + /// Overlay key. + pub key: OverlayValueKeyRef<'tl>, + /// Any data. + pub data: &'tl [u8], + /// Unix timestamp up to which this value is valid. + pub expires_at: u32, +} + +impl OverlayValueRef<'_> { + pub fn as_owned(&self) -> OverlayValue { + OverlayValue { + key: self.key.as_owned(), + data: Box::from(self.data), + expires_at: self.expires_at, + } + } +} + /// Stored value. +/// +/// See [`ValueRef`] for the non-owned version of the struct. #[derive(Debug, Clone, PartialEq, Eq)] pub enum Value { /// Value with a known owner. - Signed(SignedValue), + Peer(PeerValue), /// Overlay-managed value. Overlay(OverlayValue), } @@ -123,7 +183,7 @@ pub enum Value { impl Value { pub fn is_valid(&self, at: u32, key_hash: &[u8; 32]) -> bool { match self { - Self::Signed(value) => { + Self::Peer(value) => { value.expires_at >= at && key_hash == &tl_proto::hash(&value.key) && check_peer_signature(&value.key.peer_id, &value.signature, value) @@ -136,21 +196,21 @@ impl Value { pub fn key_name(&self) -> &[u8] { match self { - Self::Signed(value) => value.key.name.as_ref(), + Self::Peer(value) => value.key.name.as_ref(), Self::Overlay(value) => value.key.name.as_ref(), } } pub const fn key_index(&self) -> u32 { match self { - Self::Signed(value) => value.key.idx, + Self::Peer(value) => value.key.idx, Self::Overlay(value) => value.key.idx, } } pub const fn expires_at(&self) -> u32 { match self { - Self::Signed(value) => value.expires_at, + Self::Peer(value) => value.expires_at, Self::Overlay(value) => value.expires_at, } } @@ -161,7 +221,7 @@ impl TlWrite for Value { fn max_size_hint(&self) -> usize { match self { - Self::Signed(value) => value.max_size_hint(), + Self::Peer(value) => value.max_size_hint(), Self::Overlay(value) => value.max_size_hint(), } } @@ -171,7 +231,7 @@ impl TlWrite for Value { P: tl_proto::TlPacket, { match self { - Self::Signed(value) => value.write_to(packet), + Self::Peer(value) => value.write_to(packet), Self::Overlay(value) => value.write_to(packet), } } @@ -184,19 +244,101 @@ impl<'a> TlRead<'a> for Value { let id = u32::read_from(packet, offset)?; *offset -= 4; match id { - SignedValue::TL_ID => SignedValue::read_from(packet, offset).map(Self::Signed), + PeerValue::TL_ID => PeerValue::read_from(packet, offset).map(Self::Peer), OverlayValue::TL_ID => OverlayValue::read_from(packet, offset).map(Self::Overlay), _ => Err(tl_proto::TlError::UnknownConstructor), } } } +/// Stored value. +/// +/// See [`Value`] for the owned version of the struct. +#[derive(Debug, Clone, PartialEq, Eq)] +pub enum ValueRef<'tl> { + /// Value with a known owner. + Peer(PeerValueRef<'tl>), + /// Overlay-managed value. + Overlay(OverlayValueRef<'tl>), +} + +impl ValueRef<'_> { + pub fn is_valid(&self, at: u32, key_hash: &[u8; 32]) -> bool { + match self { + Self::Peer(value) => { + value.expires_at >= at + && key_hash == &tl_proto::hash(&value.key) + && check_peer_signature(value.key.peer_id, value.signature, value) + } + Self::Overlay(value) => { + value.expires_at >= at && key_hash == &tl_proto::hash(&value.key) + } + } + } + + pub fn key_name(&self) -> &[u8] { + match self { + Self::Peer(value) => value.key.name, + Self::Overlay(value) => value.key.name, + } + } + + pub const fn key_index(&self) -> u32 { + match self { + Self::Peer(value) => value.key.idx, + Self::Overlay(value) => value.key.idx, + } + } + + pub const fn expires_at(&self) -> u32 { + match self { + Self::Peer(value) => value.expires_at, + Self::Overlay(value) => value.expires_at, + } + } +} + +impl TlWrite for ValueRef<'_> { + type Repr = tl_proto::Boxed; + + fn max_size_hint(&self) -> usize { + match self { + Self::Peer(value) => value.max_size_hint(), + Self::Overlay(value) => value.max_size_hint(), + } + } + + fn write_to

(&self, packet: &mut P) + where + P: tl_proto::TlPacket, + { + match self { + Self::Peer(value) => value.write_to(packet), + Self::Overlay(value) => value.write_to(packet), + } + } +} + +impl<'a> TlRead<'a> for ValueRef<'a> { + type Repr = tl_proto::Boxed; + + fn read_from(packet: &'a [u8], offset: &mut usize) -> tl_proto::TlResult { + let id = u32::read_from(packet, offset)?; + *offset -= 4; + match id { + PeerValue::TL_ID => PeerValueRef::read_from(packet, offset).map(Self::Peer), + OverlayValue::TL_ID => OverlayValueRef::read_from(packet, offset).map(Self::Overlay), + _ => Err(tl_proto::TlError::UnknownConstructor), + } + } +} + /// A response for the [`rpc::FindNode`] query. #[derive(Debug, Clone, TlRead, TlWrite)] #[tl(boxed, id = "dht.nodesFound", scheme = "proto.tl")] pub struct NodeResponse { /// List of nodes closest to the key. - pub nodes: Vec, + pub nodes: Vec>, } /// A response for the [`rpc::FindValue`] query. @@ -208,7 +350,44 @@ pub enum ValueResponse { Found(Box), /// List of nodes closest to the key. #[tl(id = "dht.valueNotFound")] - NotFound(Vec), + NotFound(Vec>), +} + +/// A response for the [`rpc::FindValue`] query. +#[derive(Debug, Clone)] +pub enum ValueResponseRaw { + Found(Bytes), + NotFound(Vec>), +} + +impl TlWrite for ValueResponseRaw { + type Repr = tl_proto::Boxed; + + fn max_size_hint(&self) -> usize { + 4 + match self { + Self::Found(value) => value.max_size_hint(), + Self::NotFound(nodes) => nodes.max_size_hint(), + } + } + + fn write_to

(&self, packet: &mut P) + where + P: tl_proto::TlPacket, + { + const FOUND_TL_ID: u32 = tl_proto::id!("dht.valueFound", scheme = "proto.tl"); + const NOT_FOUND_TL_ID: u32 = tl_proto::id!("dht.valueNotFound", scheme = "proto.tl"); + + match self { + Self::Found(value) => { + packet.write_u32(FOUND_TL_ID); + packet.write_raw_slice(value); + } + Self::NotFound(nodes) => { + packet.write_u32(NOT_FOUND_TL_ID); + nodes.write_to(packet); + } + } + } } /// A response for the [`rpc::GetNodeInfo`] query. @@ -216,13 +395,11 @@ pub enum ValueResponse { #[tl(boxed, id = "dht.nodeInfoFound", scheme = "proto.tl")] pub struct NodeInfoResponse { /// Signed node info. - pub info: NodeInfo, + pub info: PeerInfo, } /// DHT RPC models. pub mod rpc { - use crate::types::RpcQuery; - use super::*; /// Suggest a node to store that value. @@ -230,7 +407,15 @@ pub mod rpc { #[tl(boxed, id = "dht.store", scheme = "proto.tl")] pub struct Store { /// A value to store. - pub value: Box, + pub value: Value, + } + + /// Suggest a node to store that value. + #[derive(Debug, Clone, TlRead, TlWrite)] + #[tl(boxed, id = "dht.store", scheme = "proto.tl")] + pub struct StoreRef<'tl> { + /// A value to store. + pub value: ValueRef<'tl>, } /// Search for `k` closest nodes. @@ -245,10 +430,6 @@ pub mod rpc { pub k: u32, } - impl RpcQuery for FindNode { - type Response = NodeResponse; - } - /// Search for a value if stored or `k` closest nodes. /// /// See [`ValueResponse`]. @@ -261,18 +442,47 @@ pub mod rpc { pub k: u32, } - impl RpcQuery for FindValue { - type Response = ValueResponse; - } - /// Requests a signed address list from the node. /// /// See [`NodeInfoResponse`]. #[derive(Debug, Clone, TlRead, TlWrite)] #[tl(boxed, id = "dht.getNodeInfo", scheme = "proto.tl")] pub struct GetNodeInfo; +} + +mod tl_name_ref { + use super::*; + + #[inline] + pub fn size_hint(name: &[u8]) -> usize { + name.max_size_hint() + } + + #[inline] + pub fn write(name: &[u8], packet: &mut P) { + name.as_ref().write_to(packet); + } + + pub fn read<'a>(packet: &'a [u8], offset: &mut usize) -> tl_proto::TlResult<&'a [u8]> { + <&tl_proto::BoundedBytes<128>>::read_from(packet, offset).map(|bytes| bytes.as_ref()) + } +} + +mod tl_name_owned { + use super::*; + + #[inline] + pub fn size_hint(name: &[u8]) -> usize { + name.max_size_hint() + } + + #[inline] + pub fn write(name: &[u8], packet: &mut P) { + name.as_ref().write_to(packet); + } - impl RpcQuery for GetNodeInfo { - type Response = NodeInfoResponse; + pub fn read(packet: &[u8], offset: &mut usize) -> tl_proto::TlResult> { + <&tl_proto::BoundedBytes<128>>::read_from(packet, offset) + .map(|bytes| Box::from(bytes.as_ref())) } } diff --git a/network/src/types/address.rs b/network/src/types/address.rs index 157ebb273..b954652a3 100644 --- a/network/src/types/address.rs +++ b/network/src/types/address.rs @@ -129,40 +129,5 @@ impl FromStr for Address { } } -#[derive(Debug, Clone, TlWrite, Eq, PartialEq)] -pub struct AddressList { - pub items: Vec

, - pub created_at: u32, - pub expires_at: u32, -} - -impl AddressList { - pub const MAX_LEN: usize = 4; -} - -impl<'a> TlRead<'a> for AddressList { - type Repr = tl_proto::Bare; - - fn read_from(packet: &'a [u8], offset: &mut usize) -> tl_proto::TlResult { - use tl_proto::TlError; - - let len = u32::read_from(packet, offset)? as usize; - if len == 0 || len > Self::MAX_LEN { - return Err(TlError::InvalidData); - } - - let mut items = Vec::with_capacity(len); - for _ in 0..len { - items.push(Address::read_from(packet, offset)?); - } - - Ok(Self { - items, - created_at: u32::read_from(packet, offset)?, - expires_at: u32::read_from(packet, offset)?, - }) - } -} - const ADDRESS_V4_TL_ID: u32 = tl_proto::id!("transport.address.ipv4", scheme = "proto.tl"); const ADDRESS_V6_TL_ID: u32 = tl_proto::id!("transport.address.ipv6", scheme = "proto.tl"); diff --git a/network/src/types/mod.rs b/network/src/types/mod.rs index f004aabb0..d494ed8fe 100644 --- a/network/src/types/mod.rs +++ b/network/src/types/mod.rs @@ -3,8 +3,9 @@ use std::sync::Arc; use bytes::Bytes; -pub use self::address::{Address, AddressList}; +pub use self::address::Address; pub use self::peer_id::{Direction, PeerId}; +pub use self::peer_info::{PeerAffinity, PeerInfo}; pub use self::rpc::RpcQuery; pub use self::service::{ service_datagram_fn, service_message_fn, service_query_fn, BoxCloneService, BoxService, @@ -13,6 +14,7 @@ pub use self::service::{ mod address; mod peer_id; +mod peer_info; mod rpc; mod service; @@ -114,21 +116,6 @@ pub struct InboundRequestMeta { pub remote_address: SocketAddr, } -#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord)] -pub enum PeerAffinity { - High, - Allowed, - Never, -} - -#[derive(Debug, Clone)] -pub struct PeerInfo { - pub peer_id: PeerId, - pub affinity: PeerAffinity, - // TODO: change to address list - pub address: Address, -} - #[derive(Debug, Clone, PartialEq, Eq)] pub enum PeerEvent { NewPeer(PeerId), diff --git a/network/src/types/peer_info.rs b/network/src/types/peer_info.rs new file mode 100644 index 000000000..5190c1644 --- /dev/null +++ b/network/src/types/peer_info.rs @@ -0,0 +1,80 @@ +use tl_proto::{TlRead, TlWrite}; + +use crate::types::{Address, PeerId}; +use crate::util::{check_peer_signature, tl}; + +#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord)] +pub enum PeerAffinity { + High, + Allowed, + Never, +} + +/// A signed node info. +#[derive(Debug, Clone, TlRead, TlWrite)] +pub struct PeerInfo { + /// Node public key. + pub id: PeerId, + /// Multiple possible addresses for the same peer. + #[tl(with = "tl_address_list")] + pub address_list: Box<[Address]>, + /// Unix timestamp when the info was generated. + pub created_at: u32, + /// Unix timestamp up to which the info is valid. + pub expires_at: u32, + /// A `ed25519` signature of the info. + #[tl(signature, with = "tl::signature_owned")] + pub signature: Box<[u8; 64]>, +} + +impl PeerInfo { + pub const MAX_ADDRESSES: usize = 4; + + pub fn is_valid(&self, at: u32) -> bool { + const CLOCK_THRESHOLD: u32 = 1; + + self.created_at <= at + CLOCK_THRESHOLD + && self.expires_at >= at + && !self.address_list.is_empty() + && check_peer_signature(&self.id, &self.signature, self) + } + + pub fn is_expired(&self, at: u32) -> bool { + self.expires_at < at + } + + pub fn iter_addresses(&self) -> std::slice::Iter<'_, Address> { + self.address_list.iter() + } +} + +mod tl_address_list { + use super::*; + + pub fn size_hint(address_list: &[Address]) -> usize { + 4 + address_list + .iter() + .map(Address::max_size_hint) + .sum::() + } + + pub fn write(address_list: &[Address], packet: &mut P) { + address_list.write_to(packet); + } + + pub fn read(packet: &[u8], offset: &mut usize) -> tl_proto::TlResult> { + use tl_proto::TlError; + + let len = u32::read_from(packet, offset)? as usize; + if len == 0 || len > PeerInfo::MAX_ADDRESSES { + return Err(TlError::InvalidData); + } + + let mut items = Vec::with_capacity(len); + for _ in 0..len { + items.push(Address::read_from(packet, offset)?); + } + + Ok(items.into_boxed_slice()) + } +} diff --git a/network/src/util/mod.rs b/network/src/util/mod.rs index a57c6ab68..76eb5eba8 100644 --- a/network/src/util/mod.rs +++ b/network/src/util/mod.rs @@ -1,5 +1,3 @@ -use bytes::Bytes; - pub use self::router::{Routable, Router, RouterBuilder}; pub use self::traits::NetworkExt; @@ -8,6 +6,8 @@ use crate::types::PeerId; mod router; mod traits; +pub(crate) mod tl; + #[macro_export] macro_rules! match_tl_request { ($req_body:expr, { @@ -32,15 +32,12 @@ macro_rules! match_tl_request { }; } -pub fn check_peer_signature(peed_id: &PeerId, signature: &Bytes, data: &T) -> bool +pub fn check_peer_signature(peed_id: &PeerId, signature: &[u8; 64], data: &T) -> bool where T: tl_proto::TlWrite, { let Some(public_key) = peed_id.as_public_key() else { return false; }; - let Ok::<&[u8; 64], _>(signature) = signature.as_ref().try_into() else { - return false; - }; public_key.verify(data, signature) } diff --git a/network/src/util/tl.rs b/network/src/util/tl.rs new file mode 100644 index 000000000..ee01d9ea4 --- /dev/null +++ b/network/src/util/tl.rs @@ -0,0 +1,43 @@ +use tl_proto::{TlError, TlPacket, TlRead, TlResult, TlWrite}; + +pub mod signature_ref { + use super::*; + + #[inline] + pub fn size_hint(signature: &[u8; 64]) -> usize { + signature.as_slice().max_size_hint() + } + + #[inline] + pub fn write(signature: &[u8; 64], packet: &mut P) { + signature.as_slice().write_to(packet); + } + + pub fn read<'a>(packet: &'a [u8], offset: &mut usize) -> TlResult<&'a [u8; 64]> { + <&tl_proto::BoundedBytes<64>>::read_from(packet, offset) + .and_then(|bytes| bytes.as_ref().try_into().map_err(|_e| TlError::InvalidData)) + } +} + +pub mod signature_owned { + use super::*; + + #[inline] + pub fn size_hint(signature: &[u8; 64]) -> usize { + signature.as_slice().max_size_hint() + } + + #[inline] + pub fn write(signature: &[u8; 64], packet: &mut P) { + signature.as_slice().write_to(packet); + } + + pub fn read(packet: &[u8], offset: &mut usize) -> TlResult> { + <&tl_proto::BoundedBytes<64>>::read_from(packet, offset).and_then(|bytes| { + let Ok::<[u8; 64], _>(bytes) = bytes.as_ref().try_into() else { + return Err(TlError::InvalidData); + }; + Ok(Box::new(bytes)) + }) + } +} diff --git a/network/src/util/traits.rs b/network/src/util/traits.rs index a4d1d16f7..fb0a06384 100644 --- a/network/src/util/traits.rs +++ b/network/src/util/traits.rs @@ -2,7 +2,7 @@ use std::future::Future; use anyhow::Result; -use crate::network::{Network, Peer}; +use crate::network::{KnownPeer, Network, Peer}; use crate::types::{PeerEvent, PeerId, Request, Response}; pub trait NetworkExt { @@ -45,10 +45,15 @@ where match network.known_peers().get(peer_id) { // Initiate a connection of it is a known peer - Some(peer_info) => { - network - .connect_with_peer_id(peer_info.address, peer_id) - .await?; + Some(KnownPeer { peer_info, .. }) => { + // TODO: try multiple addresses + let address = peer_info + .iter_addresses() + .next() + .cloned() + .expect("address list must have at least one item"); + + network.connect_with_peer_id(address, peer_id).await?; } // Error otherwise None => anyhow::bail!("trying to interact with an unknown peer: {peer_id}"), diff --git a/network/tests/dht.rs b/network/tests/dht.rs index 2911d180c..04c6425ec 100644 --- a/network/tests/dht.rs +++ b/network/tests/dht.rs @@ -9,9 +9,8 @@ use std::sync::Arc; use anyhow::Result; use everscale_crypto::ed25519; use tl_proto::{TlRead, TlWrite}; -use tycho_network::proto::dht; use tycho_network::{ - Address, AddressList, DhtClient, DhtService, FindValueError, Network, PeerId, Router, + Address, DhtClient, DhtService, FindValueError, Network, PeerId, PeerInfo, Router, }; use tycho_util::time::now_sec; @@ -39,29 +38,24 @@ impl Node { Ok(Self { network, dht }) } - fn make_node_info(key: &ed25519::SecretKey, address: Address) -> dht::NodeInfo { - const TTL: u32 = 3600; - + fn make_peer_info(key: &ed25519::SecretKey, address: Address) -> PeerInfo { let keypair = ed25519::KeyPair::from(key); let peer_id = PeerId::from(keypair.public_key); let now = now_sec(); - let mut node_info = dht::NodeInfo { + let mut node_info = PeerInfo { id: peer_id, - address_list: AddressList { - items: vec![address], - created_at: now, - expires_at: now + TTL, - }, + address_list: vec![address].into_boxed_slice(), created_at: now, - signature: Default::default(), + expires_at: u32::MAX, + signature: Box::new([0; 64]), }; - node_info.signature = keypair.sign(&node_info).to_vec().into(); + *node_info.signature = keypair.sign(&node_info); node_info } } -fn make_network(node_count: usize) -> (Vec, Vec>) { +fn make_network(node_count: usize) -> (Vec, Vec>) { let keys = (0..node_count) .map(|_| ed25519::SecretKey::generate(&mut rand::thread_rng())) .collect::>(); @@ -73,7 +67,7 @@ fn make_network(node_count: usize) -> (Vec, Vec>) { .unwrap(); let bootstrap_info = std::iter::zip(&keys, &nodes) - .map(|(key, node)| Arc::new(Node::make_node_info(key, node.network.local_addr().into()))) + .map(|(key, node)| Arc::new(Node::make_peer_info(key, node.network.local_addr().into()))) .collect::>(); for node in &nodes { for info in &bootstrap_info { @@ -112,30 +106,31 @@ async fn bootstrap_nodes_store_value() -> Result<()> { #[derive(Debug, Clone, PartialEq, Eq, TlWrite, TlRead)] struct SomeValue(u32); + const VALUE: SomeValue = SomeValue(123123); + let (nodes, _) = make_network(5); // Store value let first = &nodes[0].dht; - let value_to_store = first.make_signed_value("test", now_sec() + 600, SomeValue(123123)); - first.store_value(value_to_store.clone()).await?; + + first + .entry(b"test") + .with_data(VALUE) + .with_time(now_sec()) + .store_as_peer() + .await?; // Retrieve an existing value - let queried_value = first - .find_value(&dht::SignedKey { - name: "test".to_owned().into(), - idx: 0, - peer_id: *first.network().peer_id(), - }) + let value = first + .entry(b"test") + .find_peer_value::(&first.network().peer_id()) .await?; - assert_eq!(&dht::Value::Signed(queried_value), value_to_store.as_ref()); + assert_eq!(value, VALUE); // Retrieve a non-existing value let res = first - .find_value(&dht::SignedKey { - name: "not-existing".to_owned().into(), - idx: 1, - peer_id: *first.network().peer_id(), - }) + .entry(b"non-existing") + .find_peer_value_raw(&first.network().peer_id()) .await; assert!(matches!(res, Err(FindValueError::NotFound))); diff --git a/util/src/lib.rs b/util/src/lib.rs index 54e4d47aa..1405a6117 100644 --- a/util/src/lib.rs +++ b/util/src/lib.rs @@ -1,10 +1,117 @@ use std::collections::HashMap; use std::collections::HashSet; -pub mod time; pub mod futures; +pub mod time; pub type FastDashMap = dashmap::DashMap; pub type FastDashSet = dashmap::DashSet; pub type FastHashMap = HashMap; pub type FastHashSet = HashSet; + +/// # Example +/// +/// ```rust +/// # use tycho_util::realloc_box_enum; +/// enum Value { +/// One(BigValue1), +/// Two(BigValue2), +/// } +/// +/// struct BigValue1([u32; 10]); +/// +/// struct BigValue2([u32; 7]); +/// +/// fn convert_to_one(value: Box) -> Option> { +/// realloc_box_enum!(value, { +/// Value::One(value) => Box::new(value) => Some(value), +/// _ => None, +/// }) +/// } +/// ``` +#[macro_export] +macro_rules! realloc_box_enum { + ($value:expr, { + $target_variant:pat => Box::new($extracted:ident) => $target:expr, + $other_variant:pat => $other:expr, + }) => {{ + let value: ::std::boxed::Box<_> = $value; + match ::core::convert::AsRef::as_ref(&value) { + #[allow(unused_variables)] + $target_variant => { + let $extracted = unsafe { + $crate::__internal::realloc_box(value, |value| match value { + $target_variant => $extracted, + _ => unreachable!(), + }) + }; + $target + } + $other_variant => $other, + } + }}; +} + +#[doc(hidden)] +pub mod __internal { + /// # Safety + /// The following must be true: + /// - `T` must have the same layout as `R` + /// - `f` must not panic + pub unsafe fn realloc_box(value: Box, f: F) -> Box + where + F: FnOnce(T) -> R, + { + assert!(std::mem::align_of::() == std::mem::align_of::()); + + let ptr = Box::into_raw(value); + let value = std::ptr::read(ptr); + + let ptr = std::alloc::realloc( + ptr.cast::(), + std::alloc::Layout::new::(), + std::mem::size_of::(), + ) + .cast::(); + + if ptr.is_null() { + std::alloc::handle_alloc_error(std::alloc::Layout::new::()); + } + + // NOTE: in case of panic, the memory will be leaked + std::ptr::write(ptr, f(value)); + + Box::from_raw(ptr) + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + #[allow(dead_code)] + fn realloc_enum() { + enum Value { + One(BigValue1), + Two(BigValue2), + } + + #[derive(Clone)] + struct BigValue1([u32; 10]); + + #[derive(Clone)] + struct BigValue2([u32; 7]); + + fn convert_to_one(value: Box) -> Option> { + realloc_box_enum!(value, { + Value::One(value) => Box::new(value) => Some(value), + _ => None, + }) + } + + let value = BigValue1([123; 10]); + let one = convert_to_one(Box::new(Value::One(value.clone()))); + assert_eq!(one.unwrap().0, value.0); + } +} From 509b9e2c16ef6c6bf706f52ea0acc4f02be30ca7 Mon Sep 17 00:00:00 2001 From: Ivan Kalinin Date: Thu, 15 Feb 2024 17:23:48 +0100 Subject: [PATCH 26/35] feat(network): Use typed key names --- network/src/dht/config.rs | 6 -- network/src/dht/mod.rs | 69 ++++++++------- network/src/dht/query.rs | 13 ++- network/src/dht/storage.rs | 25 ------ network/src/proto.tl | 19 +++-- network/src/proto/dht.rs | 105 +++++------------------ network/src/types/mod.rs | 146 +------------------------------- network/src/types/peer_event.rs | 40 +++++++++ network/src/types/request.rs | 104 +++++++++++++++++++++++ network/tests/dht.rs | 14 +-- 10 files changed, 235 insertions(+), 306 deletions(-) create mode 100644 network/src/types/peer_event.rs create mode 100644 network/src/types/request.rs diff --git a/network/src/dht/config.rs b/network/src/dht/config.rs index 97955a4d0..a1bd60bcf 100644 --- a/network/src/dht/config.rs +++ b/network/src/dht/config.rs @@ -9,10 +9,6 @@ pub struct DhtConfig { pub max_node_info_ttl: Duration, /// Maximum time to live for stored values. pub max_stored_value_ttl: Duration, - /// Maximum length of stored key names. - pub max_stored_key_name_len: usize, - /// Maximum index of stored keys. - pub max_stored_key_index: u32, /// Maximum storage capacity (number of entries). pub max_storage_capacity: u64, /// Time until a stored item is considered idle and can be removed. @@ -25,8 +21,6 @@ impl Default for DhtConfig { max_k: 6, max_node_info_ttl: Duration::from_secs(3600), max_stored_value_ttl: Duration::from_secs(3600), - max_stored_key_name_len: 128, - max_stored_key_index: 4, max_storage_capacity: 10000, storage_item_time_to_idle: None, } diff --git a/network/src/dht/mod.rs b/network/src/dht/mod.rs index 7ad131fec..0644698ea 100644 --- a/network/src/dht/mod.rs +++ b/network/src/dht/mod.rs @@ -11,8 +11,8 @@ use self::routing::RoutingTable; use self::storage::Storage; use crate::network::{Network, WeakNetwork}; use crate::proto::dht::{ - rpc, NodeInfoResponse, NodeResponse, PeerValue, PeerValueKey, PeerValueKeyRef, PeerValueRef, - Value, ValueRef, ValueResponseRaw, + rpc, NodeInfoResponse, NodeResponse, PeerValue, PeerValueKey, PeerValueKeyName, + PeerValueKeyRef, PeerValueRef, Value, ValueRef, ValueResponseRaw, }; use crate::types::{ Address, PeerAffinity, PeerId, PeerInfo, Request, Response, Service, ServiceRequest, @@ -75,7 +75,7 @@ impl DhtClient { Ok(info) } - pub fn entry<'n>(&self, name: &'n [u8]) -> DhtQueryBuilder<'_, 'n> { + pub fn entry(&self, name: PeerValueKeyName) -> DhtQueryBuilder<'_> { DhtQueryBuilder { inner: &self.inner, network: &self.network, @@ -86,27 +86,26 @@ impl DhtClient { } #[derive(Clone, Copy)] -pub struct DhtQueryBuilder<'a, 'n> { +pub struct DhtQueryBuilder<'a> { inner: &'a DhtInner, network: &'a Network, - name: &'n [u8], + name: PeerValueKeyName, idx: u32, } -impl<'a, 'n> DhtQueryBuilder<'a, 'n> { +impl<'a> DhtQueryBuilder<'a> { #[inline] pub fn with_idx(&mut self, idx: u32) -> &mut Self { self.idx = idx; self } - pub async fn find_peer_value(&self, peer_id: &PeerId) -> Result + pub async fn find_value(&self, peer_id: &PeerId) -> Result where for<'tl> T: tl_proto::TlRead<'tl>, { let key_hash = tl_proto::hash(PeerValueKeyRef { name: self.name, - idx: self.idx, peer_id, }); @@ -129,7 +128,6 @@ impl<'a, 'n> DhtQueryBuilder<'a, 'n> { ) -> Result, FindValueError> { let key_hash = tl_proto::hash(PeerValueKeyRef { name: self.name, - idx: self.idx, peer_id, }); @@ -146,7 +144,7 @@ impl<'a, 'n> DhtQueryBuilder<'a, 'n> { } } - pub fn with_data(&self, data: T) -> DhtQueryWithDataBuilder<'a, 'n> + pub fn with_data(&self, data: T) -> DhtQueryWithDataBuilder<'a> where T: tl_proto::TlWrite, { @@ -154,21 +152,19 @@ impl<'a, 'n> DhtQueryBuilder<'a, 'n> { inner: *self, data: tl_proto::serialize(&data), at: None, - ttl: DhtQueryWithDataBuilder::DEFAULT_TTL, + ttl: DEFAULT_TTL, } } } -pub struct DhtQueryWithDataBuilder<'a, 'n> { - inner: DhtQueryBuilder<'a, 'n>, +pub struct DhtQueryWithDataBuilder<'a> { + inner: DhtQueryBuilder<'a>, data: Vec, at: Option, ttl: u32, } -impl DhtQueryWithDataBuilder<'_, '_> { - const DEFAULT_TTL: u32 = 3600; - +impl DhtQueryWithDataBuilder<'_> { pub fn with_time(&mut self, at: u32) -> &mut Self { self.at = Some(at); self @@ -179,14 +175,13 @@ impl DhtQueryWithDataBuilder<'_, '_> { self } - pub async fn store_as_peer(&self) -> Result<()> { + pub async fn store(&self) -> Result<()> { let dht = self.inner.inner; let network = self.inner.network; let mut value = PeerValueRef { key: PeerValueKeyRef { name: self.inner.name, - idx: self.inner.idx, peer_id: &dht.local_id, }, data: &self.data, @@ -199,14 +194,13 @@ impl DhtQueryWithDataBuilder<'_, '_> { dht.store_value(network, ValueRef::Peer(value)).await } - pub fn into_signed_peer_value(self) -> PeerValue { + pub fn into_signed_value(self) -> PeerValue { let dht = self.inner.inner; let network = self.inner.network; let mut value = PeerValue { key: PeerValueKey { - name: Box::from(self.inner.name), - idx: self.inner.idx, + name: self.name, peer_id: dht.local_id, }, data: self.data.into_boxed_slice(), @@ -218,6 +212,22 @@ impl DhtQueryWithDataBuilder<'_, '_> { } } +impl<'a> std::ops::Deref for DhtQueryWithDataBuilder<'a> { + type Target = DhtQueryBuilder<'a>; + + #[inline] + fn deref(&self) -> &Self::Target { + &self.inner + } +} + +impl<'a> std::ops::DerefMut for DhtQueryWithDataBuilder<'a> { + #[inline] + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} + pub struct DhtServiceBuilder { local_id: PeerId, config: Option, @@ -240,8 +250,6 @@ impl DhtServiceBuilder { let storage = { let mut builder = Storage::builder() - .with_max_key_name_len(config.max_stored_key_name_len) - .with_max_key_index(config.max_stored_key_index) .with_max_capacity(config.max_storage_capacity) .with_max_ttl(config.max_stored_value_ttl); @@ -382,7 +390,6 @@ struct DhtInner { impl DhtInner { fn start_background_tasks(self: &Arc, network: WeakNetwork) { - const INFO_TTL: u32 = 3600; const INFO_UPDATE_PERIOD: Duration = Duration::from_secs(60); const ANNOUNCE_PERIOD: Duration = Duration::from_secs(600); @@ -416,14 +423,14 @@ impl DhtInner { match action { Action::Refresh => { - this.refresh_local_node_info(&network, INFO_TTL); + this.refresh_local_node_info(&network, DEFAULT_TTL); } Action::Announce => { // Always refresh node info before announcing - this.refresh_local_node_info(&network, INFO_TTL); + this.refresh_local_node_info(&network, DEFAULT_TTL); refresh_interval.reset(); - if let Err(e) = this.announce_local_node_info(&network, INFO_TTL).await { + if let Err(e) = this.announce_local_node_info(&network, DEFAULT_TTL).await { tracing::error!("failed to announce local DHT node info: {e:?}"); } } @@ -454,7 +461,8 @@ impl DhtInner { async fn announce_local_node_info(&self, network: &Network, ttl: u32) -> Result<()> { let data = tl_proto::serialize(&[network.local_addr().into()] as &[Address]); - let mut value = self.make_unsigned_peer_value(b"addr", &data, now_sec() + ttl); + let mut value = + self.make_unsigned_peer_value(PeerValueKeyName::NodeInfo, &data, now_sec() + ttl); let signature = network.sign_tl(&value); value.signature = &signature; @@ -524,14 +532,13 @@ impl DhtInner { fn make_unsigned_peer_value<'a>( &'a self, - name: &'a [u8], + name: PeerValueKeyName, data: &'a [u8], expires_at: u32, ) -> PeerValueRef<'a> { PeerValueRef { key: PeerValueKeyRef { name, - idx: 0, peer_id: &self.local_id, }, data, @@ -599,3 +606,5 @@ pub enum FindValueError { #[error("value not found")] NotFound, } + +const DEFAULT_TTL: u32 = 3600; diff --git a/network/src/dht/query.rs b/network/src/dht/query.rs index 208580069..862f92f49 100644 --- a/network/src/dht/query.rs +++ b/network/src/dht/query.rs @@ -73,7 +73,10 @@ impl Query { match res { // Return the value if found Some(Ok(ValueResponse::Found(value))) => { - if !value.is_valid(now_sec(), self.local_id()) { + let is_valid = value.is_valid(now_sec(), self.local_id()); + tracing::debug!(peer_id = %node.id, is_valid, "found value"); + + if !is_valid { // Ignore invalid values continue; } @@ -82,8 +85,12 @@ impl Query { } // Refill futures from the nodes response Some(Ok(ValueResponse::NotFound(nodes))) => { - tracing::debug!(peer_id = %node.id, count = nodes.len(), "received nodes"); - if !self.update_candidates(now_sec(), self.max_k, nodes, &mut visited) { + let node_count = nodes.len(); + let has_new = + self.update_candidates(now_sec(), self.max_k, nodes, &mut visited); + tracing::debug!(peer_id = %node.id, count = node_count, has_new, "received nodes"); + + if !has_new { // Do nothing if candidates were not changed continue; } diff --git a/network/src/dht/storage.rs b/network/src/dht/storage.rs index 3b4cfb198..76041b389 100644 --- a/network/src/dht/storage.rs +++ b/network/src/dht/storage.rs @@ -32,9 +32,6 @@ pub(crate) struct StorageBuilder { cache_builder: DhtCacheBuilder, overlay_value_merger: Weak, max_ttl: Duration, - max_key_name_len: usize, - max_key_index: u32, - // TODO: add a hashset for allowed keys (maybe separate signed keys from overlay keys) } impl Default for StorageBuilder { @@ -43,8 +40,6 @@ impl Default for StorageBuilder { cache_builder: Default::default(), overlay_value_merger: Weak::<()>::new(), max_ttl: Duration::from_secs(3600), - max_key_name_len: 128, - max_key_index: 4, } } } @@ -66,8 +61,6 @@ impl StorageBuilder { .build_with_hasher(ahash::RandomState::default()), overlay_value_merger: self.overlay_value_merger, max_ttl_sec: self.max_ttl.as_secs().try_into().unwrap_or(u32::MAX), - max_key_name_len: self.max_key_name_len, - max_key_index: self.max_key_index, } } @@ -76,16 +69,6 @@ impl StorageBuilder { self } - pub fn with_max_key_name_len(mut self, len: usize) -> Self { - self.max_key_name_len = len; - self - } - - pub fn with_max_key_index(mut self, index: u32) -> Self { - self.max_key_index = index; - self - } - pub fn with_max_capacity(mut self, max_capacity: u64) -> Self { self.cache_builder = self.cache_builder.max_capacity(max_capacity); self @@ -106,8 +89,6 @@ pub(crate) struct Storage { cache: DhtCache, overlay_value_merger: Weak, max_ttl_sec: u32, - max_key_name_len: usize, - max_key_index: u32, } impl Storage { @@ -129,12 +110,6 @@ impl Storage { _ => {} } - if !(0..=self.max_key_name_len).contains(&value.key_name().len()) - || value.key_index() > self.max_key_index - { - return Err(StorageError::InvalidKey); - } - match value { ValueRef::Peer(value) => self.insert_signed_value(value), ValueRef::Overlay(value) => self.insert_overlay_value(value), diff --git a/network/src/proto.tl b/network/src/proto.tl index f139d5dab..1aa05ffcd 100644 --- a/network/src/proto.tl +++ b/network/src/proto.tl @@ -43,29 +43,32 @@ dht.nodes nodes:(vector dht.node) = dht.Nodes; /** * Key for the value that can only be updated by an owner * -* @param name key name as UTF-8 string -* @param idx key index used for versioning +* @param name key name enum * @param peer_id owner id */ dht.peerValueKey - name:bytes - idx:int + name:dht.PeerValueKeyName peer_id:transport.PeerId = dht.Key; /** * Key for the overlay-managed value * -* @param name key name as UTF-8 string -* @param idx key index used for versioning +* @param name key name enum * @param overlay_id overlay id */ dht.overlayValueKey - name:bytes - idx:int + name:dht.OverlayValueKeyName overlay_id:int256 = dht.Key; +// Peer value key names { +dht.peerValueKeyName.nodeInfo = dht.PeerValueKeyName; +// } + +// Overlay value key names { +dht.overlayValueKeyName.peersList = dht.OverlayValueKeyName; +// } /** * A value with an exact owner diff --git a/network/src/proto/dht.rs b/network/src/proto/dht.rs index 27e07c02d..1a41c26d2 100644 --- a/network/src/proto/dht.rs +++ b/network/src/proto/dht.rs @@ -6,6 +6,20 @@ use tl_proto::{TlRead, TlWrite}; use crate::types::{PeerId, PeerInfo}; use crate::util::{check_peer_signature, tl}; +#[derive(Debug, Clone, Copy, Hash, PartialEq, Eq, TlRead, TlWrite)] +#[tl(boxed, scheme = "proto.tl")] +pub enum PeerValueKeyName { + #[tl(id = "dht.peerValueKeyName.nodeInfo")] + NodeInfo, +} + +#[derive(Debug, Clone, Copy, Hash, PartialEq, Eq, TlRead, TlWrite)] +#[tl(boxed, scheme = "proto.tl")] +pub enum OverlayValueKeyName { + #[tl(id = "dht.overlayValueKeyName.peersList")] + PeersList, +} + /// Key for values that can only be updated by the owner. /// /// See [`SignedValueKeyRef`] for the non-owned version of the struct. @@ -13,10 +27,7 @@ use crate::util::{check_peer_signature, tl}; #[tl(boxed, id = "dht.peerValueKey", scheme = "proto.tl")] pub struct PeerValueKey { /// Key name. - #[tl(with = "tl_name_owned")] - pub name: Box<[u8]>, - /// Key index (version). - pub idx: u32, + pub name: PeerValueKeyName, /// Public key of the owner. pub peer_id: PeerId, } @@ -28,10 +39,7 @@ pub struct PeerValueKey { #[tl(boxed, id = "dht.peerValueKey", scheme = "proto.tl")] pub struct PeerValueKeyRef<'tl> { /// Key name. - #[tl(with = "tl_name_ref")] - pub name: &'tl [u8], - /// Key index (version). - pub idx: u32, + pub name: PeerValueKeyName, /// Public key of the owner. pub peer_id: &'tl PeerId, } @@ -39,8 +47,7 @@ pub struct PeerValueKeyRef<'tl> { impl PeerValueKeyRef<'_> { pub fn as_owned(&self) -> PeerValueKey { PeerValueKey { - name: Box::from(self.name), - idx: self.idx, + name: self.name, peer_id: *self.peer_id, } } @@ -53,10 +60,7 @@ impl PeerValueKeyRef<'_> { #[tl(boxed, id = "dht.overlayValueKey", scheme = "proto.tl")] pub struct OverlayValueKey { /// Key name. - #[tl(with = "tl_name_owned")] - pub name: Box<[u8]>, - /// Key index (version). - pub idx: u32, + pub name: OverlayValueKeyName, /// Overlay id. pub overlay_id: [u8; 32], } @@ -68,10 +72,7 @@ pub struct OverlayValueKey { #[tl(boxed, id = "dht.overlayValueKey", scheme = "proto.tl")] pub struct OverlayValueKeyRef<'tl> { /// Key name. - #[tl(with = "tl_name_ref")] - pub name: &'tl [u8], - /// Key index (version). - pub idx: u32, + pub name: OverlayValueKeyName, /// Overlay id. pub overlay_id: &'tl [u8; 32], } @@ -79,8 +80,7 @@ pub struct OverlayValueKeyRef<'tl> { impl OverlayValueKeyRef<'_> { pub fn as_owned(&self) -> OverlayValueKey { OverlayValueKey { - name: Box::from(self.name), - idx: self.idx, + name: self.name, overlay_id: *self.overlay_id, } } @@ -194,20 +194,6 @@ impl Value { } } - pub fn key_name(&self) -> &[u8] { - match self { - Self::Peer(value) => value.key.name.as_ref(), - Self::Overlay(value) => value.key.name.as_ref(), - } - } - - pub const fn key_index(&self) -> u32 { - match self { - Self::Peer(value) => value.key.idx, - Self::Overlay(value) => value.key.idx, - } - } - pub const fn expires_at(&self) -> u32 { match self { Self::Peer(value) => value.expires_at, @@ -276,20 +262,6 @@ impl ValueRef<'_> { } } - pub fn key_name(&self) -> &[u8] { - match self { - Self::Peer(value) => value.key.name, - Self::Overlay(value) => value.key.name, - } - } - - pub const fn key_index(&self) -> u32 { - match self { - Self::Peer(value) => value.key.idx, - Self::Overlay(value) => value.key.idx, - } - } - pub const fn expires_at(&self) -> u32 { match self { Self::Peer(value) => value.expires_at, @@ -449,40 +421,3 @@ pub mod rpc { #[tl(boxed, id = "dht.getNodeInfo", scheme = "proto.tl")] pub struct GetNodeInfo; } - -mod tl_name_ref { - use super::*; - - #[inline] - pub fn size_hint(name: &[u8]) -> usize { - name.max_size_hint() - } - - #[inline] - pub fn write(name: &[u8], packet: &mut P) { - name.as_ref().write_to(packet); - } - - pub fn read<'a>(packet: &'a [u8], offset: &mut usize) -> tl_proto::TlResult<&'a [u8]> { - <&tl_proto::BoundedBytes<128>>::read_from(packet, offset).map(|bytes| bytes.as_ref()) - } -} - -mod tl_name_owned { - use super::*; - - #[inline] - pub fn size_hint(name: &[u8]) -> usize { - name.max_size_hint() - } - - #[inline] - pub fn write(name: &[u8], packet: &mut P) { - name.as_ref().write_to(packet); - } - - pub fn read(packet: &[u8], offset: &mut usize) -> tl_proto::TlResult> { - <&tl_proto::BoundedBytes<128>>::read_from(packet, offset) - .map(|bytes| Box::from(bytes.as_ref())) - } -} diff --git a/network/src/types/mod.rs b/network/src/types/mod.rs index d494ed8fe..dc1cf8d0d 100644 --- a/network/src/types/mod.rs +++ b/network/src/types/mod.rs @@ -1,11 +1,8 @@ -use std::net::SocketAddr; -use std::sync::Arc; - -use bytes::Bytes; - pub use self::address::Address; +pub use self::peer_event::{DisconnectReason, PeerEvent}; pub use self::peer_id::{Direction, PeerId}; pub use self::peer_info::{PeerAffinity, PeerInfo}; +pub use self::request::{InboundRequestMeta, Request, Response, ServiceRequest, Version}; pub use self::rpc::RpcQuery; pub use self::service::{ service_datagram_fn, service_message_fn, service_query_fn, BoxCloneService, BoxService, @@ -13,144 +10,9 @@ pub use self::service::{ }; mod address; +mod peer_event; mod peer_id; mod peer_info; +mod request; mod rpc; mod service; - -#[derive(Default, Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord)] -#[repr(u16)] -pub enum Version { - #[default] - V1 = 1, -} - -impl TryFrom for Version { - type Error = anyhow::Error; - - fn try_from(value: u16) -> Result { - match value { - 1 => Ok(Self::V1), - _ => Err(anyhow::anyhow!("invalid version: {value}")), - } - } -} - -impl Version { - pub fn to_u16(self) -> u16 { - self as u16 - } -} - -pub struct Request { - pub version: Version, - pub body: Bytes, -} - -impl Request { - pub fn from_tl(body: T) -> Self - where - T: tl_proto::TlWrite, - { - Self { - version: Default::default(), - body: tl_proto::serialize(body).into(), - } - } -} - -impl AsRef<[u8]> for Request { - #[inline] - fn as_ref(&self) -> &[u8] { - self.body.as_ref() - } -} - -pub struct Response { - pub version: Version, - pub body: Bytes, -} - -impl Response { - pub fn from_tl(body: T) -> Self - where - T: tl_proto::TlWrite, - { - Self { - version: Default::default(), - body: tl_proto::serialize(body).into(), - } - } - - pub fn parse_tl(self) -> tl_proto::TlResult - where - for<'a> T: tl_proto::TlRead<'a, Repr = tl_proto::Boxed>, - { - tl_proto::deserialize(self.body.as_ref()) - } -} - -impl AsRef<[u8]> for Response { - #[inline] - fn as_ref(&self) -> &[u8] { - self.body.as_ref() - } -} - -pub struct ServiceRequest { - pub metadata: Arc, - pub body: Bytes, -} - -impl AsRef<[u8]> for ServiceRequest { - #[inline] - fn as_ref(&self) -> &[u8] { - self.body.as_ref() - } -} - -#[derive(Debug, Clone)] -pub struct InboundRequestMeta { - pub peer_id: PeerId, - pub origin: Direction, - pub remote_address: SocketAddr, -} - -#[derive(Debug, Clone, PartialEq, Eq)] -pub enum PeerEvent { - NewPeer(PeerId), - LostPeer(PeerId, DisconnectReason), -} - -#[derive(Debug, Clone, Copy, PartialEq, Eq)] -pub enum DisconnectReason { - Requested, - VersionMismatch, - TransportError, - ConnectionClosed, - ApplicationClosed, - Reset, - TimedOut, - LocallyClosed, -} - -impl From for DisconnectReason { - #[inline] - fn from(value: quinn::ConnectionError) -> Self { - Self::from(&value) - } -} - -impl From<&quinn::ConnectionError> for DisconnectReason { - fn from(value: &quinn::ConnectionError) -> Self { - match value { - quinn::ConnectionError::VersionMismatch => Self::VersionMismatch, - quinn::ConnectionError::TransportError(_) => Self::TransportError, - quinn::ConnectionError::ConnectionClosed(_) => Self::ConnectionClosed, - quinn::ConnectionError::ApplicationClosed(_) => Self::ApplicationClosed, - quinn::ConnectionError::Reset => Self::Reset, - quinn::ConnectionError::TimedOut => Self::TimedOut, - quinn::ConnectionError::LocallyClosed => Self::LocallyClosed, - } - } -} diff --git a/network/src/types/peer_event.rs b/network/src/types/peer_event.rs new file mode 100644 index 000000000..e545665fd --- /dev/null +++ b/network/src/types/peer_event.rs @@ -0,0 +1,40 @@ +use crate::types::PeerId; + +#[derive(Debug, Clone, PartialEq, Eq)] +pub enum PeerEvent { + NewPeer(PeerId), + LostPeer(PeerId, DisconnectReason), +} + +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +pub enum DisconnectReason { + Requested, + VersionMismatch, + TransportError, + ConnectionClosed, + ApplicationClosed, + Reset, + TimedOut, + LocallyClosed, +} + +impl From for DisconnectReason { + #[inline] + fn from(value: quinn::ConnectionError) -> Self { + Self::from(&value) + } +} + +impl From<&quinn::ConnectionError> for DisconnectReason { + fn from(value: &quinn::ConnectionError) -> Self { + match value { + quinn::ConnectionError::VersionMismatch => Self::VersionMismatch, + quinn::ConnectionError::TransportError(_) => Self::TransportError, + quinn::ConnectionError::ConnectionClosed(_) => Self::ConnectionClosed, + quinn::ConnectionError::ApplicationClosed(_) => Self::ApplicationClosed, + quinn::ConnectionError::Reset => Self::Reset, + quinn::ConnectionError::TimedOut => Self::TimedOut, + quinn::ConnectionError::LocallyClosed => Self::LocallyClosed, + } + } +} diff --git a/network/src/types/request.rs b/network/src/types/request.rs new file mode 100644 index 000000000..67bd905d2 --- /dev/null +++ b/network/src/types/request.rs @@ -0,0 +1,104 @@ +use std::net::SocketAddr; +use std::sync::Arc; + +use bytes::Bytes; + +use crate::types::{Direction, PeerId}; + +#[derive(Default, Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord)] +#[repr(u16)] +pub enum Version { + #[default] + V1 = 1, +} + +impl TryFrom for Version { + type Error = anyhow::Error; + + fn try_from(value: u16) -> Result { + match value { + 1 => Ok(Self::V1), + _ => Err(anyhow::anyhow!("invalid version: {value}")), + } + } +} + +impl Version { + pub fn to_u16(self) -> u16 { + self as u16 + } +} + +pub struct Request { + pub version: Version, + pub body: Bytes, +} + +impl Request { + pub fn from_tl(body: T) -> Self + where + T: tl_proto::TlWrite, + { + Self { + version: Default::default(), + body: tl_proto::serialize(body).into(), + } + } +} + +impl AsRef<[u8]> for Request { + #[inline] + fn as_ref(&self) -> &[u8] { + self.body.as_ref() + } +} + +pub struct Response { + pub version: Version, + pub body: Bytes, +} + +impl Response { + pub fn from_tl(body: T) -> Self + where + T: tl_proto::TlWrite, + { + Self { + version: Default::default(), + body: tl_proto::serialize(body).into(), + } + } + + pub fn parse_tl(self) -> tl_proto::TlResult + where + for<'a> T: tl_proto::TlRead<'a, Repr = tl_proto::Boxed>, + { + tl_proto::deserialize(self.body.as_ref()) + } +} + +impl AsRef<[u8]> for Response { + #[inline] + fn as_ref(&self) -> &[u8] { + self.body.as_ref() + } +} + +pub struct ServiceRequest { + pub metadata: Arc, + pub body: Bytes, +} + +impl AsRef<[u8]> for ServiceRequest { + #[inline] + fn as_ref(&self) -> &[u8] { + self.body.as_ref() + } +} + +#[derive(Debug, Clone)] +pub struct InboundRequestMeta { + pub peer_id: PeerId, + pub origin: Direction, + pub remote_address: SocketAddr, +} diff --git a/network/tests/dht.rs b/network/tests/dht.rs index 04c6425ec..244893a39 100644 --- a/network/tests/dht.rs +++ b/network/tests/dht.rs @@ -10,7 +10,7 @@ use anyhow::Result; use everscale_crypto::ed25519; use tl_proto::{TlRead, TlWrite}; use tycho_network::{ - Address, DhtClient, DhtService, FindValueError, Network, PeerId, PeerInfo, Router, + proto, Address, DhtClient, DhtService, FindValueError, Network, PeerId, PeerInfo, Router, }; use tycho_util::time::now_sec; @@ -114,23 +114,23 @@ async fn bootstrap_nodes_store_value() -> Result<()> { let first = &nodes[0].dht; first - .entry(b"test") + .entry(proto::dht::PeerValueKeyName::NodeInfo) .with_data(VALUE) .with_time(now_sec()) - .store_as_peer() + .store() .await?; // Retrieve an existing value let value = first - .entry(b"test") - .find_peer_value::(&first.network().peer_id()) + .entry(proto::dht::PeerValueKeyName::NodeInfo) + .find_value::(&first.network().peer_id()) .await?; assert_eq!(value, VALUE); // Retrieve a non-existing value let res = first - .entry(b"non-existing") - .find_peer_value_raw(&first.network().peer_id()) + .entry(proto::dht::PeerValueKeyName::NodeInfo) + .find_peer_value_raw(nodes[1].network.peer_id()) .await; assert!(matches!(res, Err(FindValueError::NotFound))); From b7bbc305e63b9de1417d67fb4f413a793c095357 Mon Sep 17 00:00:00 2001 From: Ivan Kalinin Date: Thu, 15 Feb 2024 20:12:15 +0100 Subject: [PATCH 27/35] feat(network): Improve dht populate background task --- network/src/dht/mod.rs | 116 ++++++++++++++++++++++++++++++++----- network/src/dht/query.rs | 14 ++++- network/src/dht/routing.rs | 30 +++++----- 3 files changed, 125 insertions(+), 35 deletions(-) diff --git a/network/src/dht/mod.rs b/network/src/dht/mod.rs index 0644698ea..691b44bc3 100644 --- a/network/src/dht/mod.rs +++ b/network/src/dht/mod.rs @@ -1,8 +1,14 @@ +use std::collections::hash_map; use std::sync::{Arc, Mutex}; use std::time::Duration; use anyhow::Result; use bytes::{Buf, Bytes}; +use futures_util::stream::FuturesUnordered; +use futures_util::StreamExt; +use rand::RngCore; +use tokio::sync::Semaphore; +use tokio::task::JoinHandle; use tycho_util::realloc_box_enum; use tycho_util::time::{now_sec, shifted_interval}; @@ -394,7 +400,7 @@ impl DhtInner { const ANNOUNCE_PERIOD: Duration = Duration::from_secs(600); const ANNOUNCE_SHIFT: Duration = Duration::from_secs(60); - const POPULATE_PERIOD: Duration = Duration::from_secs(60); + const POPULATE_PERIOD: Duration = Duration::from_secs(600); const POPULATE_SHIFT: Duration = Duration::from_secs(10); enum Action { @@ -410,6 +416,7 @@ impl DhtInner { let mut announce_interval = shifted_interval(ANNOUNCE_PERIOD, ANNOUNCE_SHIFT); let mut populate_interval = shifted_interval(POPULATE_PERIOD, POPULATE_SHIFT); + let mut prev_populate_fut = None::>; loop { let action = tokio::select! { _ = refresh_interval.tick() => Action::Refresh, @@ -435,8 +442,17 @@ impl DhtInner { } } Action::Populate => { - // TODO: spawn and await in the background? - this.find_more_dht_nodes(&network).await; + if let Some(fut) = prev_populate_fut.take() { + if let Err(e) = fut.await { + if e.is_panic() { + std::panic::resume_unwind(e.into_panic()); + } + } + } + + prev_populate_fut = Some(tokio::spawn(async move { + this.populate(&network).await; + })); } } } @@ -458,6 +474,7 @@ impl DhtInner { *self.node_info.lock().unwrap() = Some(node_info); } + #[tracing::instrument(level = "debug", skip_all, fields(local_id = %self.local_id))] async fn announce_local_node_info(&self, network: &Network, ttl: u32) -> Result<()> { let data = tl_proto::serialize(&[network.local_addr().into()] as &[Address]); @@ -469,22 +486,85 @@ impl DhtInner { self.store_value(network, ValueRef::Peer(value)).await } - async fn find_more_dht_nodes(&self, network: &Network) { - // TODO: deduplicate shared futures - let query = Query::new( - network.clone(), - &self.routing_table.lock().unwrap(), - self.local_id.as_bytes(), - self.max_k, - ); + #[tracing::instrument(level = "debug", skip_all, fields(local_id = %self.local_id))] + async fn populate(&self, network: &Network) { + const PARALLEL_QUERIES: usize = 3; + const MAX_DISTANCE: usize = 15; + const QUERY_DEPTH: usize = 3; + + // Prepare futures for each bucket + let semaphore = Semaphore::new(PARALLEL_QUERIES); + let mut futures = FuturesUnordered::new(); + { + // NOTE: rng is intentionally dropped after this block to make this future `Send`. + let rng = &mut rand::thread_rng(); + + let routing_table = self.routing_table.lock().unwrap(); + + // Iterate over the first buckets up until some distance (`MAX_DISTANCE`) + // or up to the last non-empty bucket. + let first_n_non_empty_buckets = routing_table + .buckets + .range(..=MAX_DISTANCE) + .rev() + .skip_while(|(_, bucket)| bucket.is_empty()); + + for (&distance, _) in first_n_non_empty_buckets { + // Query K closest nodes for a random ID at the specified distance from the local ID. + let random_id = random_key_at_distance(&routing_table.local_id, distance, rng); + let query = Query::new( + network.clone(), + &routing_table, + random_id.as_bytes(), + self.max_k, + ); + + futures.push(async { + let _permit = semaphore.acquire().await.unwrap(); + query.find_peers(Some(QUERY_DEPTH)).await + }); + } + } - // NOTE: expression is intentionally split to drop the routing table guard - let peers = query.find_peers().await; + // Receive initial set of peers + let Some(mut peers) = futures.next().await else { + tracing::debug!("no new peers found"); + return; + }; + + // Merge new peers into the result set + while let Some(new_peers) = futures.next().await { + for (peer_id, peer) in new_peers { + match peers.entry(peer_id) { + // Just insert the peer if it's new + hash_map::Entry::Vacant(entry) => { + entry.insert(peer); + } + // Replace the peer if it's newer (by creation time) + hash_map::Entry::Occupied(mut entry) => { + if entry.get().created_at < peer.created_at { + entry.insert(peer); + } + } + } + } + } let mut routing_table = self.routing_table.lock().unwrap(); - for peer in peers { - routing_table.add(peer, self.max_k, &self.node_ttl); + let mut count = 0usize; + for peer in peers.into_values() { + if peer.id == self.local_id { + continue; + } + + let is_new = routing_table.add(peer.clone(), self.max_k, &self.node_ttl); + if is_new { + network.known_peers().insert(peer, PeerAffinity::Allowed); + count += 1; + } } + + tracing::debug!(count, "found new peers"); } async fn find_value(&self, network: &Network, key_hash: &[u8; 32]) -> Option> { @@ -584,6 +664,12 @@ impl DhtInner { } } +fn random_key_at_distance(from: &PeerId, distance: usize, rng: &mut impl RngCore) -> PeerId { + let mut result = *from; + rng.fill_bytes(&mut result.0[distance..]); + result +} + pub fn xor_distance(left: &PeerId, right: &PeerId) -> usize { for (i, (left, right)) in std::iter::zip(left.0.chunks(8), right.0.chunks(8)).enumerate() { let left = u64::from_be_bytes(left.try_into().unwrap()); diff --git a/network/src/dht/query.rs b/network/src/dht/query.rs index 862f92f49..216aae009 100644 --- a/network/src/dht/query.rs +++ b/network/src/dht/query.rs @@ -43,7 +43,7 @@ impl Query { } fn local_id(&self) -> &[u8; 32] { - self.candidates.local_id().as_bytes() + self.candidates.local_id.as_bytes() } #[tracing::instrument(level = "debug", skip_all)] @@ -126,7 +126,7 @@ impl Query { } #[tracing::instrument(level = "debug", skip_all)] - pub async fn find_peers(mut self) -> impl Iterator> { + pub async fn find_peers(mut self, depth: Option) -> FastHashMap> { // Prepare shared request let request_body = Bytes::from(tl_proto::serialize(rpc::FindNode { key: *self.local_id(), @@ -147,6 +147,8 @@ impl Query { }); // Process responses and refill futures until all peers are traversed + let mut current_depth = 0; + let max_depth = depth.unwrap_or(usize::MAX); let mut result = FastHashMap::>::new(); while let Some((node, res)) = futures.next().await { match res { @@ -158,6 +160,12 @@ impl Query { continue; } + current_depth += 1; + if current_depth >= max_depth { + // Stop on max depth + break; + } + // Add new nodes from the closest range self.candidates .visit_closest(self.local_id(), self.max_k, |node| { @@ -185,7 +193,7 @@ impl Query { } // Done - result.into_values() + result } fn update_candidates( diff --git a/network/src/dht/routing.rs b/network/src/dht/routing.rs index 1f584bc68..cb81f11ca 100644 --- a/network/src/dht/routing.rs +++ b/network/src/dht/routing.rs @@ -6,8 +6,8 @@ use crate::dht::{xor_distance, MAX_XOR_DISTANCE}; use crate::types::{PeerId, PeerInfo}; pub(crate) struct RoutingTable { - local_id: PeerId, - buckets: BTreeMap, + pub local_id: PeerId, + pub buckets: BTreeMap, } impl RoutingTable { @@ -18,10 +18,6 @@ impl RoutingTable { } } - pub fn local_id(&self) -> &PeerId { - &self.local_id - } - #[allow(unused)] pub fn is_empty(&self) -> bool { self.buckets.values().all(Bucket::is_empty) @@ -32,8 +28,8 @@ impl RoutingTable { self.buckets.values().map(|bucket| bucket.nodes.len()).sum() } - pub fn add(&mut self, node: Arc, max_k: usize, node_ttl: &Duration) -> bool { - let distance = xor_distance(&self.local_id, &node.id); + pub fn add(&mut self, peer: Arc, max_k: usize, node_ttl: &Duration) -> bool { + let distance = xor_distance(&self.local_id, &peer.id); if distance == 0 { return false; } @@ -41,7 +37,7 @@ impl RoutingTable { self.buckets .entry(distance) .or_insert_with(|| Bucket::with_capacity(max_k)) - .insert(node, max_k, node_ttl) + .insert(peer, max_k, node_ttl) } #[allow(unused)] @@ -118,7 +114,7 @@ impl RoutingTable { } } -struct Bucket { +pub(crate) struct Bucket { nodes: VecDeque, } @@ -148,7 +144,7 @@ impl Bucket { true } - fn remove(&mut self, key: &PeerId) -> bool { + pub fn remove(&mut self, key: &PeerId) -> bool { if let Some(index) = self.nodes.iter().position(|node| &node.data.id == key) { self.nodes.remove(index); true @@ -157,18 +153,18 @@ impl Bucket { } } - fn contains(&self, key: &PeerId) -> bool { + pub fn contains(&self, key: &PeerId) -> bool { self.nodes.iter().any(|node| &node.data.id == key) } - fn is_empty(&self) -> bool { + pub fn is_empty(&self) -> bool { self.nodes.is_empty() } } -struct Node { - data: Arc, - last_updated_at: Instant, +pub(crate) struct Node { + pub data: Arc, + pub last_updated_at: Instant, } impl Node { @@ -179,7 +175,7 @@ impl Node { } } - fn is_expired(&self, timeout: &Duration) -> bool { + pub fn is_expired(&self, timeout: &Duration) -> bool { &self.last_updated_at.elapsed() >= timeout } } From 9853f0c4988c1f314048474b709f521a7994a71a Mon Sep 17 00:00:00 2001 From: Ivan Kalinin Date: Fri, 16 Feb 2024 15:37:53 +0100 Subject: [PATCH 28/35] feat(network): Impl serde for basic types and configs --- Cargo.lock | 43 +++++ network/Cargo.toml | 9 +- network/examples/simple.rs | 247 +++++++++++++++++++++++++ network/src/dht/config.rs | 19 +- network/src/network/config.rs | 41 ++++- network/src/types/address.rs | 27 ++- network/src/types/mod.rs | 6 +- network/src/types/peer_event.rs | 6 +- network/src/types/peer_id.rs | 54 ++++-- network/src/types/peer_info.rs | 94 +++++++++- network/src/types/request.rs | 91 +++++++++- network/tests/dht.rs | 2 + util/Cargo.toml | 3 + util/src/lib.rs | 1 + util/src/serde_helpers.rs | 312 ++++++++++++++++++++++++++++++++ 15 files changed, 925 insertions(+), 30 deletions(-) create mode 100644 network/examples/simple.rs create mode 100644 util/src/serde_helpers.rs diff --git a/Cargo.lock b/Cargo.lock index 98ce12e3c..4e3be6096 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -45,6 +45,37 @@ version = "1.0.79" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "080e9890a082662b09c1ad45f567faeeb47f22b5fb23895fbe1e651e718e25ca" +[[package]] +name = "argh" +version = "0.1.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7af5ba06967ff7214ce4c7419c7d185be7ecd6cc4965a8f6e1d8ce0398aad219" +dependencies = [ + "argh_derive", + "argh_shared", +] + +[[package]] +name = "argh_derive" +version = "0.1.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "56df0aeedf6b7a2fc67d06db35b09684c3e8da0c95f8f27685cb17e08413d87a" +dependencies = [ + "argh_shared", + "proc-macro2", + "quote", + "syn 2.0.48", +] + +[[package]] +name = "argh_shared" +version = "0.1.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5693f39141bda5760ecc4111ab08da40565d1771038c4a0250f03457ec707531" +dependencies = [ + "serde", +] + [[package]] name = "asn1-rs" version = "0.5.2" @@ -589,6 +620,12 @@ version = "0.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7f24254aa9a54b5c858eaee2f5bccdb46aaf0e486a595ed5fd8f86ba55232a70" +[[package]] +name = "humantime" +version = "2.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9a3a5bfb195931eeb336b2a7b4d761daec841b97f947d34394601737a7bba5e4" + [[package]] name = "itoa" version = "1.0.10" @@ -1820,6 +1857,8 @@ version = "0.0.1" dependencies = [ "ahash", "anyhow", + "argh", + "base64", "bytes", "castaway", "dashmap", @@ -1837,6 +1876,7 @@ dependencies = [ "rustls", "rustls-webpki", "serde", + "serde_json", "socket2", "thiserror", "tl-proto", @@ -1864,7 +1904,10 @@ dependencies = [ "castaway", "dashmap", "futures-util", + "hex", + "humantime", "rand", + "serde", "tokio", ] diff --git a/network/Cargo.toml b/network/Cargo.toml index a2f6566c1..e1ab4b61c 100644 --- a/network/Cargo.toml +++ b/network/Cargo.toml @@ -5,11 +5,16 @@ edition = "2021" description = "A peer-to-peer networking library." include = ["src/**/*.rs", "src/**/*.tl"] +[[example]] +name = "simple" +path = "examples/simple.rs" + [dependencies] # crates.io deps ahash = "0.8" anyhow = "1.0" -bytes = "1.0" +base64 = "0.21" +bytes = { version = "1.0", features = ["serde"] } castaway = "0.2" dashmap = "5.4" ed25519 = { version = "2.0", features = ["alloc", "pkcs8"] } @@ -38,6 +43,8 @@ x509-parser = "0.15" tycho-util = { path = "../util", version = "=0.0.1" } [dev-dependencies] +argh = "0.1" +serde_json = "1.0" tokio = { version = "1", features = ["rt-multi-thread"] } tracing-subscriber = { version = "0.3", features = ["env-filter"] } tracing-test = "0.2" diff --git a/network/examples/simple.rs b/network/examples/simple.rs new file mode 100644 index 000000000..683aa7df1 --- /dev/null +++ b/network/examples/simple.rs @@ -0,0 +1,247 @@ +//! Run tests with this env: +//! ```text +//! RUST_LOG=info,tycho_network=trace +//! ``` + +use std::io::IsTerminal; +use std::net::SocketAddr; +use std::sync::Arc; + +use anyhow::Result; +use argh::FromArgs; +use everscale_crypto::ed25519; +use serde::{Deserialize, Serialize}; +use tycho_network::{ + Address, DhtClient, DhtConfig, DhtService, Network, NetworkConfig, PeerId, PeerInfo, Router, +}; +use tycho_util::time::now_sec; + +#[tokio::main] +async fn main() -> Result<()> { + let app: App = argh::from_env(); + app.run().await +} + +/// Tycho network node. +#[derive(FromArgs)] +struct App { + #[argh(subcommand)] + cmd: Cmd, +} + +impl App { + async fn run(self) -> Result<()> { + tracing_subscriber::fmt() + .with_env_filter( + tracing_subscriber::EnvFilter::builder() + .with_default_directive(tracing::Level::INFO.into()) + .from_env_lossy(), + ) + .with_ansi(std::io::stdout().is_terminal()) + .init(); + + match self.cmd { + Cmd::Run(cmd) => cmd.run().await, + Cmd::GenKey(cmd) => cmd.run(), + Cmd::GenDht(cmd) => cmd.run(), + } + } +} + +#[derive(FromArgs)] +#[argh(subcommand)] +enum Cmd { + Run(CmdRun), + GenKey(CmdGenKey), + GenDht(CmdGenDht), +} + +/// run a node +#[derive(FromArgs)] +#[argh(subcommand, name = "run")] +struct CmdRun { + /// local node address + #[argh(positional)] + addr: SocketAddr, + + /// node secret key + #[argh(option)] + key: String, + + /// path to the node config + #[argh(option)] + config: Option, + + /// path to the global config + #[argh(option)] + global_config: String, +} + +impl CmdRun { + async fn run(self) -> Result<()> { + let node_config = self + .config + .map(NodeConfig::from_file) + .transpose()? + .unwrap_or_default(); + let global_config = GlobalConfig::from_file(self.global_config)?; + + let node = Node::new(parse_key(&self.key)?, self.addr.into(), node_config)?; + + let mut initial_peer_count = 0usize; + for peer in global_config.bootstrap_peers { + let is_new = node.dht.add_peer(Arc::new(peer))?; + initial_peer_count += is_new as usize; + } + + tracing::info!( + local_id = %node.network.peer_id(), + addr = %self.addr, + initial_peer_count, + "node started" + ); + + futures_util::future::pending().await + } +} + +/// generate a key +#[derive(FromArgs)] +#[argh(subcommand, name = "genkey")] +struct CmdGenKey {} + +impl CmdGenKey { + fn run(self) -> Result<()> { + let secret_key = ed25519::SecretKey::generate(&mut rand::thread_rng()); + let public_key = ed25519::PublicKey::from(&secret_key); + let peer_id = PeerId::from(public_key); + + let data = serde_json::json!({ + "key": hex::encode(secret_key.as_bytes()), + "peer_id": peer_id.to_string(), + }); + let output = if std::io::stdin().is_terminal() { + serde_json::to_string_pretty(&data) + } else { + serde_json::to_string(&data) + }?; + println!("{output}"); + Ok(()) + } +} + +/// generate a dht node info +#[derive(FromArgs)] +#[argh(subcommand, name = "gendht")] +struct CmdGenDht { + /// local node address + #[argh(positional)] + addr: SocketAddr, + + /// node secret key + #[argh(option)] + key: String, + + /// time to live in seconds (default: unlimited) + #[argh(option)] + ttl: Option, +} + +impl CmdGenDht { + fn run(self) -> Result<()> { + let entry = Node::make_peer_info(parse_key(&self.key)?, self.addr.into(), self.ttl); + let output = if std::io::stdin().is_terminal() { + serde_json::to_string_pretty(&entry) + } else { + serde_json::to_string(&entry) + }?; + println!("{output}"); + Ok(()) + } +} + +#[derive(Serialize, Deserialize)] +struct GlobalConfig { + bootstrap_peers: Vec, +} + +impl GlobalConfig { + fn from_file(path: impl AsRef) -> Result { + let config: Self = { + let data = std::fs::read_to_string(path.as_ref())?; + serde_json::from_str(&data)? + }; + + let now = now_sec(); + for peer in &config.bootstrap_peers { + anyhow::ensure!(peer.is_valid(now), "invalid peer info for {}", peer.id); + } + + Ok(config) + } +} + +#[derive(Default, Serialize, Deserialize)] +#[serde(default)] +struct NodeConfig { + network: NetworkConfig, + dht: DhtConfig, +} + +impl NodeConfig { + fn from_file(path: impl AsRef) -> Result { + let data = std::fs::read_to_string(path.as_ref())?; + let config = serde_json::from_str(&data)?; + Ok(config) + } +} + +struct Node { + network: Network, + dht: DhtClient, +} + +impl Node { + fn new(key: ed25519::SecretKey, address: Address, config: NodeConfig) -> Result { + let keypair = everscale_crypto::ed25519::KeyPair::from(&key); + + let (dht_client, dht) = DhtService::builder(keypair.public_key.into()) + .with_config(config.dht) + .build(); + + let router = Router::builder().route(dht).build(); + + let network = Network::builder() + .with_config(config.network) + .with_private_key(key.to_bytes()) + .with_service_name("test-service") + .build(address, router)?; + + let dht = dht_client.build(network.clone()); + + Ok(Self { network, dht }) + } + + fn make_peer_info(key: ed25519::SecretKey, address: Address, ttl: Option) -> PeerInfo { + let keypair = ed25519::KeyPair::from(&key); + let peer_id = PeerId::from(keypair.public_key); + + let now = now_sec(); + let mut node_info = PeerInfo { + id: peer_id, + address_list: vec![address].into_boxed_slice(), + created_at: now, + expires_at: ttl.unwrap_or(u32::MAX), + signature: Box::new([0; 64]), + }; + *node_info.signature = keypair.sign(&node_info); + node_info + } +} + +fn parse_key(key: &str) -> Result { + match hex::decode(key)?.try_into() { + Ok(bytes) => Ok(ed25519::SecretKey::from_bytes(bytes)), + Err(_) => anyhow::bail!("invalid secret key"), + } +} diff --git a/network/src/dht/config.rs b/network/src/dht/config.rs index a1bd60bcf..ee81eb59e 100644 --- a/network/src/dht/config.rs +++ b/network/src/dht/config.rs @@ -1,17 +1,34 @@ use std::time::Duration; +use serde::{Deserialize, Serialize}; +use tycho_util::serde_helpers; + // TODO: add max storage item size -#[derive(Debug, Clone)] +#[derive(Debug, Clone, Serialize, Deserialize)] +#[serde(default)] pub struct DhtConfig { /// DHT K parameter. + /// + /// Default: 6. pub max_k: usize, /// Maximum time to live for node info. + /// + /// Default: 1 hour. + #[serde(with = "serde_helpers::humantime")] pub max_node_info_ttl: Duration, /// Maximum time to live for stored values. + /// + /// Default: 1 hour. + #[serde(with = "serde_helpers::humantime")] pub max_stored_value_ttl: Duration, /// Maximum storage capacity (number of entries). + /// + /// Default: 10000. pub max_storage_capacity: u64, /// Time until a stored item is considered idle and can be removed. + /// + /// Default: unlimited. + #[serde(with = "serde_helpers::humantime")] pub storage_item_time_to_idle: Option, } diff --git a/network/src/network/config.rs b/network/src/network/config.rs index 5c4da552a..1ef9dbdc4 100644 --- a/network/src/network/config.rs +++ b/network/src/network/config.rs @@ -2,25 +2,53 @@ use std::sync::Arc; use std::time::Duration; use anyhow::{Context, Result}; +use serde::{Deserialize, Serialize}; +use tycho_util::serde_helpers; use crate::network::crypto::{ generate_cert, peer_id_from_certificate, CertVerifier, CertVerifierWithPeerId, }; use crate::types::PeerId; -#[derive(Debug, Clone)] +#[derive(Debug, Clone, Serialize, Deserialize)] +#[serde(default)] #[non_exhaustive] pub struct NetworkConfig { pub quic: Option, + + /// Default: 128. pub connection_manager_channel_capacity: usize, + + /// Default: 5 seconds. + #[serde(with = "serde_helpers::humantime")] pub connectivity_check_interval: Duration, + + /// Default: yes. pub max_frame_size: Option, + + /// Default: 10 seconds. + #[serde(with = "serde_helpers::humantime")] pub connect_timeout: Duration, + + /// Default: 10 seconds. + #[serde(with = "serde_helpers::humantime")] pub connection_backoff: Duration, + + /// Default: 1 minute. + #[serde(with = "serde_helpers::humantime")] pub max_connection_backoff: Duration, + + /// Default: 100. pub max_concurrent_outstanding_connections: usize, + + /// Default: unlimited. pub max_concurrent_connections: Option, + + /// Default: 128. pub active_peers_event_channel_capacity: usize, + + /// Default: 1 minute. + #[serde(with = "serde_helpers::humantime")] pub shutdown_idle_timeout: Duration, } @@ -42,15 +70,24 @@ impl Default for NetworkConfig { } } -#[derive(Debug, Clone)] +#[derive(Debug, Clone, Serialize, Deserialize)] +#[serde(default)] pub struct QuicConfig { + /// Default: 100. pub max_concurrent_bidi_streams: u64, + /// Default: 100. pub max_concurrent_uni_streams: u64, + /// Default: auto. pub stream_receive_window: Option, + /// Default: auto. pub receive_window: Option, + /// Default: auto. pub send_window: Option, + // TODO: add all other fields from quin::TransportConfig + /// Default: auto. pub socket_send_buffer_size: Option, + /// Default: auto. pub socket_recv_buffer_size: Option, } diff --git a/network/src/types/address.rs b/network/src/types/address.rs index b954652a3..f772054c0 100644 --- a/network/src/types/address.rs +++ b/network/src/types/address.rs @@ -1,10 +1,12 @@ use std::net::{SocketAddr, SocketAddrV4, SocketAddrV6}; use std::str::FromStr; +use serde::{Deserialize, Serialize}; use tl_proto::{TlRead, TlWrite}; -#[derive(Debug, Clone, Hash, PartialEq, Eq, PartialOrd, Ord)] -pub struct Address(SocketAddr); +#[derive(Debug, Clone, Hash, PartialEq, Eq, PartialOrd, Ord, Serialize, Deserialize)] +#[serde(transparent)] +pub struct Address(#[serde(with = "tycho_util::serde_helpers::socket_addr")] SocketAddr); impl Address { pub fn resolve(&self) -> std::io::Result { @@ -131,3 +133,24 @@ impl FromStr for Address { const ADDRESS_V4_TL_ID: u32 = tl_proto::id!("transport.address.ipv4", scheme = "proto.tl"); const ADDRESS_V6_TL_ID: u32 = tl_proto::id!("transport.address.ipv6", scheme = "proto.tl"); + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn serde() { + const SOME_ADDR_V4: &str = "101.102.103.104:12345"; + const SOME_ADDR_V6: &str = "[2345:0425:2CA1:0:0:0567:5673:23b5]:12345"; + + for addr in [SOME_ADDR_V4, SOME_ADDR_V6] { + let from_json: Address = serde_json::from_str(&format!("\"{addr}\"")).unwrap(); + let from_str = Address::from_str(addr).unwrap(); + assert_eq!(from_json, from_str); + + let to_json = serde_json::to_string(&from_json).unwrap(); + let from_json: Address = serde_json::from_str(&to_json).unwrap(); + assert_eq!(from_json, from_str); + } + } +} diff --git a/network/src/types/mod.rs b/network/src/types/mod.rs index dc1cf8d0d..f40e152fe 100644 --- a/network/src/types/mod.rs +++ b/network/src/types/mod.rs @@ -1,8 +1,10 @@ pub use self::address::Address; pub use self::peer_event::{DisconnectReason, PeerEvent}; -pub use self::peer_id::{Direction, PeerId}; +pub use self::peer_id::PeerId; pub use self::peer_info::{PeerAffinity, PeerInfo}; -pub use self::request::{InboundRequestMeta, Request, Response, ServiceRequest, Version}; +pub use self::request::{ + Direction, InboundRequestMeta, Request, Response, ServiceRequest, Version, +}; pub use self::rpc::RpcQuery; pub use self::service::{ service_datagram_fn, service_message_fn, service_query_fn, BoxCloneService, BoxService, diff --git a/network/src/types/peer_event.rs b/network/src/types/peer_event.rs index e545665fd..33834af2f 100644 --- a/network/src/types/peer_event.rs +++ b/network/src/types/peer_event.rs @@ -1,12 +1,14 @@ +use serde::{Deserialize, Serialize}; + use crate::types::PeerId; -#[derive(Debug, Clone, PartialEq, Eq)] +#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] pub enum PeerEvent { NewPeer(PeerId), LostPeer(PeerId, DisconnectReason), } -#[derive(Debug, Clone, Copy, PartialEq, Eq)] +#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)] pub enum DisconnectReason { Requested, VersionMismatch, diff --git a/network/src/types/peer_id.rs b/network/src/types/peer_id.rs index b429122e6..031e45289 100644 --- a/network/src/types/peer_id.rs +++ b/network/src/types/peer_id.rs @@ -1,7 +1,6 @@ use std::str::FromStr; use everscale_crypto::ed25519; -use rand::Rng; use tl_proto::{TlRead, TlWrite}; #[derive(Clone, Copy, TlRead, TlWrite, Hash, PartialEq, Eq, PartialOrd, Ord)] @@ -30,7 +29,7 @@ impl PeerId { } pub fn random() -> Self { - Self(rand::thread_rng().gen()) + Self(rand::random()) } } @@ -71,6 +70,32 @@ impl FromStr for PeerId { } } +impl serde::Serialize for PeerId { + fn serialize(&self, serializer: S) -> Result + where + S: serde::Serializer, + { + if serializer.is_human_readable() { + serializer.collect_str(self) + } else { + self.0.serialize(serializer) + } + } +} + +impl<'de> serde::Deserialize<'de> for PeerId { + fn deserialize(deserializer: D) -> Result + where + D: serde::Deserializer<'de>, + { + if deserializer.is_human_readable() { + deserializer.deserialize_str(tycho_util::serde_helpers::StrVisitor::new()) + } else { + <[u8; 32]>::deserialize(deserializer).map(Self) + } + } +} + impl From for PeerId { #[inline] fn from(public_key: ed25519::PublicKey) -> Self { @@ -123,17 +148,20 @@ impl std::ops::BitXorAssign<&PeerId> for PeerId { } } -#[derive(Debug, Clone, Copy, Hash, PartialEq, Eq)] -pub enum Direction { - Inbound, - Outbound, -} +#[cfg(test)] +mod tests { + use super::*; -impl std::fmt::Display for Direction { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - f.write_str(match self { - Self::Inbound => "inbound", - Self::Outbound => "outbound", - }) + #[test] + fn serde() { + const SOME_ID: &str = "5d09fe251943525a30f471791d5b4fea1298613f52ad2ad6d985fed05eb00533"; + + let from_json: PeerId = serde_json::from_str(&format!("\"{SOME_ID}\"")).unwrap(); + let from_str = PeerId::from_str(SOME_ID).unwrap(); + assert_eq!(from_json, from_str); + + let to_json = serde_json::to_string(&from_json).unwrap(); + let from_json: PeerId = serde_json::from_str(&to_json).unwrap(); + assert_eq!(from_json, from_str); } } diff --git a/network/src/types/peer_info.rs b/network/src/types/peer_info.rs index 5190c1644..08cbbcd64 100644 --- a/network/src/types/peer_info.rs +++ b/network/src/types/peer_info.rs @@ -1,9 +1,10 @@ +use serde::{Deserialize, Serialize}; use tl_proto::{TlRead, TlWrite}; use crate::types::{Address, PeerId}; use crate::util::{check_peer_signature, tl}; -#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord)] +#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Serialize, Deserialize)] pub enum PeerAffinity { High, Allowed, @@ -11,7 +12,7 @@ pub enum PeerAffinity { } /// A signed node info. -#[derive(Debug, Clone, TlRead, TlWrite)] +#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize, TlRead, TlWrite)] pub struct PeerInfo { /// Node public key. pub id: PeerId, @@ -23,6 +24,7 @@ pub struct PeerInfo { /// Unix timestamp up to which the info is valid. pub expires_at: u32, /// A `ed25519` signature of the info. + #[serde(with = "serde_signature")] #[tl(signature, with = "tl::signature_owned")] pub signature: Box<[u8; 64]>, } @@ -78,3 +80,91 @@ mod tl_address_list { Ok(items.into_boxed_slice()) } } + +mod serde_signature { + use base64::engine::Engine as _; + use base64::prelude::BASE64_STANDARD; + use tycho_util::serde_helpers::{BorrowedStr, BytesVisitor}; + + use super::*; + + pub fn serialize(data: &[u8; 64], serializer: S) -> Result + where + S: serde::Serializer, + { + if serializer.is_human_readable() { + serializer.serialize_str(&BASE64_STANDARD.encode(data)) + } else { + data.serialize(serializer) + } + } + + pub fn deserialize<'de, D>(deserializer: D) -> Result, D::Error> + where + D: serde::Deserializer<'de>, + { + use serde::de::Error; + + if deserializer.is_human_readable() { + as Deserialize>::deserialize(deserializer).and_then( + |BorrowedStr(s)| { + let mut buffer = [0u8; 66]; + match BASE64_STANDARD.decode_slice(s.as_ref(), &mut buffer) { + Ok(64) => { + let [data @ .., _, _] = buffer; + Ok(Box::new(data)) + } + _ => Err(Error::custom("Invalid signature")), + } + }, + ) + } else { + deserializer + .deserialize_bytes(BytesVisitor::<64>) + .map(Box::new) + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + use std::str::FromStr; + + #[test] + fn serde() { + let target_peer_info = PeerInfo { + id: PeerId::from_str( + "40ed1f0e3730d9086156e706b0706b21805db8a30a2b7c73a837403e553124ee", + ) + .unwrap(), + address_list: Box::new([Address::from_str("101.102.103.104:12345").unwrap()]), + created_at: 1700000000, + expires_at: 1710000000, + signature: Box::new([ + 0xe4, 0x3b, 0xc4, 0x50, 0x73, 0xe6, 0xe2, 0x5e, 0xfa, 0xb0, 0x74, 0xc8, 0xef, 0x33, + 0xdb, 0x61, 0xf3, 0x4c, 0x68, 0xec, 0x56, 0xae, 0x38, 0x88, 0xfb, 0xc0, 0x2b, 0x1b, + 0x44, 0x6b, 0xe1, 0xc3, 0xb1, 0xdb, 0x4d, 0x34, 0xeb, 0x37, 0x03, 0x96, 0xc2, 0x9d, + 0xb2, 0xd8, 0xc0, 0x41, 0x2b, 0x9f, 0x70, 0x9a, 0x8f, 0x3c, 0x1d, 0xe6, 0x8e, 0x28, + 0x44, 0x1d, 0x7a, 0x4f, 0x39, 0xc5, 0xe1, 0x3d, + ]), + }; + + let target_peer_info_str = r#"{ + "id": "40ed1f0e3730d9086156e706b0706b21805db8a30a2b7c73a837403e553124ee", + "address_list": [ + "101.102.103.104:12345" + ], + "created_at": 1700000000, + "expires_at": 1710000000, + "signature": "5DvEUHPm4l76sHTI7zPbYfNMaOxWrjiI+8ArG0Rr4cOx20006zcDlsKdstjAQSufcJqPPB3mjihEHXpPOcXhPQ==" +}"#; + assert_eq!( + serde_json::to_string_pretty(&target_peer_info).unwrap(), + target_peer_info_str + ); + + let from_json: PeerInfo = serde_json::from_str(target_peer_info_str).unwrap(); + assert_eq!(from_json, target_peer_info); + } +} diff --git a/network/src/types/request.rs b/network/src/types/request.rs index 67bd905d2..a641e5d7e 100644 --- a/network/src/types/request.rs +++ b/network/src/types/request.rs @@ -2,8 +2,9 @@ use std::net::SocketAddr; use std::sync::Arc; use bytes::Bytes; +use serde::{Deserialize, Serialize}; -use crate::types::{Direction, PeerId}; +use crate::types::PeerId; #[derive(Default, Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord)] #[repr(u16)] @@ -12,6 +13,12 @@ pub enum Version { V1 = 1, } +impl Version { + pub fn to_u16(self) -> u16 { + self as u16 + } +} + impl TryFrom for Version { type Error = anyhow::Error; @@ -23,14 +30,31 @@ impl TryFrom for Version { } } -impl Version { - pub fn to_u16(self) -> u16 { - self as u16 +impl Serialize for Version { + #[inline] + fn serialize(&self, serializer: S) -> Result + where + S: serde::Serializer, + { + serializer.serialize_u16(self.to_u16()) + } +} + +impl<'de> Deserialize<'de> for Version { + fn deserialize(deserializer: D) -> Result + where + D: serde::Deserializer<'de>, + { + use serde::de::Error; + + u16::deserialize(deserializer).and_then(|v| Self::try_from(v).map_err(Error::custom)) } } +#[derive(Serialize, Deserialize)] pub struct Request { pub version: Version, + #[serde(with = "serde_body")] pub body: Bytes, } @@ -53,8 +77,10 @@ impl AsRef<[u8]> for Request { } } +#[derive(Serialize, Deserialize)] pub struct Response { pub version: Version, + #[serde(with = "serde_body")] pub body: Bytes, } @@ -96,9 +122,64 @@ impl AsRef<[u8]> for ServiceRequest { } } -#[derive(Debug, Clone)] +#[derive(Debug, Clone, Serialize, Deserialize)] pub struct InboundRequestMeta { pub peer_id: PeerId, pub origin: Direction, + #[serde(with = "tycho_util::serde_helpers::socket_addr")] pub remote_address: SocketAddr, } + +#[derive(Debug, Clone, Copy, Hash, PartialEq, Eq, Serialize, Deserialize)] +pub enum Direction { + Inbound, + Outbound, +} + +impl std::fmt::Display for Direction { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + f.write_str(match self { + Self::Inbound => "inbound", + Self::Outbound => "outbound", + }) + } +} + +mod serde_body { + use base64::engine::Engine as _; + use base64::prelude::BASE64_STANDARD; + use tycho_util::serde_helpers::BorrowedStr; + + use super::*; + + pub fn serialize(data: &[u8], serializer: S) -> Result + where + S: serde::Serializer, + { + if serializer.is_human_readable() { + serializer.serialize_str(&BASE64_STANDARD.encode(data)) + } else { + data.serialize(serializer) + } + } + + pub fn deserialize<'de, D>(deserializer: D) -> Result + where + D: serde::Deserializer<'de>, + { + use serde::de::Error; + + if deserializer.is_human_readable() { + as Deserialize>::deserialize(deserializer).and_then( + |BorrowedStr(s)| { + BASE64_STANDARD + .decode(s.as_ref()) + .map(Bytes::from) + .map_err(Error::custom) + }, + ) + } else { + Bytes::deserialize(deserializer) + } + } +} diff --git a/network/tests/dht.rs b/network/tests/dht.rs index 244893a39..5c583656b 100644 --- a/network/tests/dht.rs +++ b/network/tests/dht.rs @@ -81,6 +81,7 @@ fn make_network(node_count: usize) -> (Vec, Vec>) { #[tokio::test] async fn bootstrap_nodes_accessible() -> Result<()> { tracing_subscriber::fmt::try_init().ok(); + tracing::info!("bootstrap_nodes_accessible"); let (nodes, _) = make_network(5); @@ -102,6 +103,7 @@ async fn bootstrap_nodes_accessible() -> Result<()> { #[tokio::test] async fn bootstrap_nodes_store_value() -> Result<()> { tracing_subscriber::fmt::try_init().ok(); + tracing::info!("bootstrap_nodes_store_value"); #[derive(Debug, Clone, PartialEq, Eq, TlWrite, TlRead)] struct SomeValue(u32); diff --git a/util/Cargo.toml b/util/Cargo.toml index 59830c5ec..3e24d5ec7 100644 --- a/util/Cargo.toml +++ b/util/Cargo.toml @@ -10,7 +10,10 @@ ahash = "0.8" castaway = "0.2" dashmap = "5.4" futures-util = "0.3" +hex = "0.4" +humantime = "2" rand = "0.8" +serde = { version = "1.0", features = ["derive"] } tokio = { version = "1", default-features = false, features = ["time"] } # local deps diff --git a/util/src/lib.rs b/util/src/lib.rs index 1405a6117..7106dbf0d 100644 --- a/util/src/lib.rs +++ b/util/src/lib.rs @@ -2,6 +2,7 @@ use std::collections::HashMap; use std::collections::HashSet; pub mod futures; +pub mod serde_helpers; pub mod time; pub type FastDashMap = dashmap::DashMap; diff --git a/util/src/serde_helpers.rs b/util/src/serde_helpers.rs new file mode 100644 index 000000000..bf38f98d5 --- /dev/null +++ b/util/src/serde_helpers.rs @@ -0,0 +1,312 @@ +use std::borrow::Cow; +use std::marker::PhantomData; +use std::str::FromStr; + +use serde::de::{Error, Expected, Visitor}; +use serde::{Deserialize, Deserializer, Serialize, Serializer}; + +pub mod socket_addr { + use std::net::SocketAddr; + + use super::*; + + pub fn serialize(value: &SocketAddr, serializer: S) -> Result { + if serializer.is_human_readable() { + serializer.collect_str(value) + } else { + value.serialize(serializer) + } + } + + pub fn deserialize<'de, D: Deserializer<'de>>(deserializer: D) -> Result { + if deserializer.is_human_readable() { + deserializer.deserialize_str(StrVisitor::new()) + } else { + SocketAddr::deserialize(deserializer) + } + } +} + +pub mod humantime { + use std::time::{Duration, SystemTime}; + + use super::*; + + pub fn serialize(value: &T, serializer: S) -> Result + where + for<'a> Serde<&'a T>: Serialize, + { + Serde::from(value).serialize(serializer) + } + + pub fn deserialize<'a, T, D: Deserializer<'a>>(deserializer: D) -> Result + where + Serde: Deserialize<'a>, + { + Serde::deserialize(deserializer).map(Serde::into_inner) + } + + pub struct Serde(T); + + impl Serde { + #[inline] + pub fn into_inner(self) -> T { + self.0 + } + } + + impl From for Serde { + fn from(value: T) -> Serde { + Serde(value) + } + } + + impl<'de> Deserialize<'de> for Serde { + fn deserialize>(d: D) -> Result, D::Error> { + struct V; + + impl<'de2> Visitor<'de2> for V { + type Value = Duration; + + fn expecting(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + f.write_str("a duration") + } + + fn visit_str(self, v: &str) -> Result { + ::humantime::parse_duration(v) + .map_err(|_e| E::invalid_value(serde::de::Unexpected::Str(v), &self)) + } + } + + d.deserialize_str(V).map(Serde) + } + } + + impl<'de> Deserialize<'de> for Serde { + fn deserialize>(d: D) -> Result, D::Error> { + struct V; + + impl<'de2> Visitor<'de2> for V { + type Value = SystemTime; + + fn expecting(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + f.write_str("a timestamp") + } + + fn visit_str(self, v: &str) -> Result { + ::humantime::parse_rfc3339_weak(v) + .map_err(|_e| E::invalid_value(serde::de::Unexpected::Str(v), &self)) + } + } + + d.deserialize_str(V).map(Serde) + } + } + + impl<'de> Deserialize<'de> for Serde> { + fn deserialize>(d: D) -> Result>, D::Error> { + match Option::>::deserialize(d)? { + Some(Serde(v)) => Ok(Serde(Some(v))), + None => Ok(Serde(None)), + } + } + } + + impl<'de> Deserialize<'de> for Serde> { + fn deserialize>(d: D) -> Result>, D::Error> { + match Option::>::deserialize(d)? { + Some(Serde(v)) => Ok(Serde(Some(v))), + None => Ok(Serde(None)), + } + } + } + + impl<'a> Serialize for Serde<&'a Duration> { + fn serialize(&self, serializer: S) -> Result { + serializer.collect_str(&::humantime::format_duration(*self.0)) + } + } + + impl Serialize for Serde { + fn serialize(&self, serializer: S) -> Result { + serializer.collect_str(&::humantime::format_duration(self.0)) + } + } + + impl<'a> Serialize for Serde<&'a SystemTime> { + fn serialize(&self, serializer: S) -> Result { + serializer.collect_str(&::humantime::format_rfc3339(*self.0)) + } + } + + impl Serialize for Serde { + fn serialize(&self, serializer: S) -> Result { + ::humantime::format_rfc3339(self.0) + .to_string() + .serialize(serializer) + } + } + + impl<'a> Serialize for Serde<&'a Option> { + fn serialize(&self, serializer: S) -> Result { + match *self.0 { + Some(v) => serializer.serialize_some(&Serde(v)), + None => serializer.serialize_none(), + } + } + } + + impl Serialize for Serde> { + fn serialize(&self, serializer: S) -> Result { + Serde(&self.0).serialize(serializer) + } + } + + impl<'a> Serialize for Serde<&'a Option> { + fn serialize(&self, serializer: S) -> Result { + match *self.0 { + Some(v) => serializer.serialize_some(&Serde(v)), + None => serializer.serialize_none(), + } + } + } + + impl Serialize for Serde> { + fn serialize(&self, serializer: S) -> Result { + Serde(&self.0).serialize(serializer) + } + } +} + +#[derive(Deserialize)] +#[repr(transparent)] +pub struct BorrowedStr<'a>(#[serde(borrow)] pub Cow<'a, str>); + +pub struct StrVisitor(PhantomData); + +impl StrVisitor { + pub const fn new() -> Self { + Self(PhantomData) + } +} + +impl<'de, S: FromStr> Visitor<'de> for StrVisitor +where + ::Err: std::fmt::Display, +{ + type Value = S; + + fn expecting(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "a string") + } + + fn visit_str(self, value: &str) -> Result { + value.parse::().map_err(Error::custom) + } +} + +pub struct BytesVisitor; + +impl<'de, const M: usize> Visitor<'de> for BytesVisitor { + type Value = [u8; M]; + + fn expecting(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + f.write_fmt(format_args!("a byte array of size {M}")) + } + + fn visit_bytes(self, v: &[u8]) -> Result { + v.try_into() + .map_err(|_e| Error::invalid_length(v.len(), &self)) + } + + fn visit_seq(self, seq: A) -> Result + where + A: serde::de::SeqAccess<'de>, + { + struct SeqIter<'de, A, T> { + access: A, + marker: PhantomData<(&'de (), T)>, + } + + impl<'de, A, T> SeqIter<'de, A, T> { + pub(crate) fn new(access: A) -> Self + where + A: serde::de::SeqAccess<'de>, + { + Self { + access, + marker: PhantomData, + } + } + } + + impl<'de, A, T> Iterator for SeqIter<'de, A, T> + where + A: serde::de::SeqAccess<'de>, + T: Deserialize<'de>, + { + type Item = Result; + + fn next(&mut self) -> Option { + self.access.next_element().transpose() + } + + fn size_hint(&self) -> (usize, Option) { + match self.access.size_hint() { + Some(size) => (size, Some(size)), + None => (0, None), + } + } + } + + fn array_from_iterator( + mut iter: I, + expected: &dyn Expected, + ) -> Result<[T; N], E> + where + I: Iterator>, + E: Error, + { + use core::mem::MaybeUninit; + + /// # Safety + /// The following must be true: + /// - The first `num` elements must be initialized. + unsafe fn drop_array_elems( + num: usize, + mut arr: [MaybeUninit; N], + ) { + arr[..num] + .iter_mut() + .for_each(|item| item.assume_init_drop()); + } + + // SAFETY: It is safe to assume that array of uninitialized values is initialized itself. + let mut arr: [MaybeUninit; N] = unsafe { MaybeUninit::uninit().assume_init() }; + + // NOTE: Leaks memory on panic + for (i, elem) in arr[..].iter_mut().enumerate() { + *elem = match iter.next() { + Some(Ok(value)) => MaybeUninit::new(value), + Some(Err(err)) => { + // SAFETY: Items until `i` were initialized. + unsafe { drop_array_elems(i, arr) }; + return Err(err); + } + None => { + // SAFETY: Items until `i` were initialized. + unsafe { drop_array_elems(i, arr) }; + return Err(Error::invalid_length(i, expected)); + } + }; + } + + // Everything is initialized. Transmute the array to the initialized type. + // A normal transmute is not possible because of: + // https://github.com/rust-lang/rust/issues/61956 + Ok(unsafe { std::mem::transmute_copy(&arr) }) + } + + array_from_iterator(SeqIter::new(seq), &self) + } +} From 8b0eb6c375be0a3bf6a6d622b920a0d7be5ee306 Mon Sep 17 00:00:00 2001 From: Ivan Kalinin Date: Fri, 16 Feb 2024 15:40:29 +0100 Subject: [PATCH 29/35] fix(network): Fix example name --- network/Cargo.toml | 4 ++-- network/examples/{simple.rs => network_node.rs} | 0 2 files changed, 2 insertions(+), 2 deletions(-) rename network/examples/{simple.rs => network_node.rs} (100%) diff --git a/network/Cargo.toml b/network/Cargo.toml index e1ab4b61c..a9f25cfc4 100644 --- a/network/Cargo.toml +++ b/network/Cargo.toml @@ -6,8 +6,8 @@ description = "A peer-to-peer networking library." include = ["src/**/*.rs", "src/**/*.tl"] [[example]] -name = "simple" -path = "examples/simple.rs" +name = "network-node" +path = "examples/network_node.rs" [dependencies] # crates.io deps diff --git a/network/examples/simple.rs b/network/examples/network_node.rs similarity index 100% rename from network/examples/simple.rs rename to network/examples/network_node.rs From d80490f456a154e7a0d9fe7bb57a4f3623cf90b1 Mon Sep 17 00:00:00 2001 From: Ivan Kalinin Date: Fri, 16 Feb 2024 15:58:06 +0100 Subject: [PATCH 30/35] feat(network): Extend DHT config --- network/src/dht/config.rs | 39 ++++++++++++++++ network/src/dht/mod.rs | 94 +++++++++++++++++++++------------------ 2 files changed, 89 insertions(+), 44 deletions(-) diff --git a/network/src/dht/config.rs b/network/src/dht/config.rs index ee81eb59e..3a7f9aac7 100644 --- a/network/src/dht/config.rs +++ b/network/src/dht/config.rs @@ -11,25 +11,59 @@ pub struct DhtConfig { /// /// Default: 6. pub max_k: usize, + /// Maximum time to live for node info. /// /// Default: 1 hour. #[serde(with = "serde_helpers::humantime")] pub max_node_info_ttl: Duration, + /// Maximum time to live for stored values. /// /// Default: 1 hour. #[serde(with = "serde_helpers::humantime")] pub max_stored_value_ttl: Duration, + /// Maximum storage capacity (number of entries). /// /// Default: 10000. pub max_storage_capacity: u64, + /// Time until a stored item is considered idle and can be removed. /// /// Default: unlimited. #[serde(with = "serde_helpers::humantime")] pub storage_item_time_to_idle: Option, + + /// A period of refreshing the local node info. + /// + /// Default: 1 minute. + #[serde(with = "serde_helpers::humantime")] + pub local_info_refresh_period: Duration, + + /// A period of storing the local node info into the DHT. + /// + /// Default: 10 minutes. + #[serde(with = "serde_helpers::humantime")] + pub local_info_announce_period: Duration, + + /// A maximum value of a random jitter for the peer announce period. + /// + /// Default: 1 minute. + #[serde(with = "serde_helpers::humantime")] + pub max_local_info_announce_period_jitter: Duration, + + /// A period of updating and populating the routing table. + /// + /// Default: 10 minutes. + #[serde(with = "serde_helpers::humantime")] + pub populate_period: Duration, + + /// A maximum value of a random jitter for the populate period. + /// + /// Default: 1 minutes. + #[serde(with = "serde_helpers::humantime")] + pub max_populate_period_jitter: Duration, } impl Default for DhtConfig { @@ -40,6 +74,11 @@ impl Default for DhtConfig { max_stored_value_ttl: Duration::from_secs(3600), max_storage_capacity: 10000, storage_item_time_to_idle: None, + local_info_refresh_period: Duration::from_secs(60), + local_info_announce_period: Duration::from_secs(600), + max_local_info_announce_period_jitter: Duration::from_secs(60), + populate_period: Duration::from_secs(600), + max_populate_period_jitter: Duration::from_secs(60), } } } diff --git a/network/src/dht/mod.rs b/network/src/dht/mod.rs index 691b44bc3..512afd1ff 100644 --- a/network/src/dht/mod.rs +++ b/network/src/dht/mod.rs @@ -1,6 +1,5 @@ use std::collections::hash_map; use std::sync::{Arc, Mutex}; -use std::time::Duration; use anyhow::Result; use bytes::{Buf, Bytes}; @@ -158,7 +157,7 @@ impl<'a> DhtQueryBuilder<'a> { inner: *self, data: tl_proto::serialize(&data), at: None, - ttl: DEFAULT_TTL, + ttl: self.inner.config.max_stored_value_ttl.as_secs() as _, } } } @@ -275,8 +274,7 @@ impl DhtServiceBuilder { routing_table: Mutex::new(RoutingTable::new(self.local_id)), storage, node_info: Mutex::new(None), - max_k: config.max_k, - node_ttl: config.max_node_info_ttl, + config, }); let client_builder = DhtClientBuilder { @@ -308,10 +306,10 @@ impl Service for DhtService { type OnDatagramFuture = futures_util::future::Ready<()>; #[tracing::instrument( - level = "debug", - name = "on_dht_query", - skip_all, - fields(peer_id = %req.metadata.peer_id, addr = %req.metadata.remote_address) + level = "debug", + name = "on_dht_query", + skip_all, + fields(peer_id = % req.metadata.peer_id, addr = % req.metadata.remote_address) )] fn on_query(&self, req: ServiceRequest) -> Self::OnQueryFuture { let response = crate::match_tl_request!(req.body, { @@ -344,10 +342,10 @@ impl Service for DhtService { } #[tracing::instrument( - level = "debug", - name = "on_dht_message", - skip_all, - fields(peer_id = %req.metadata.peer_id, addr = %req.metadata.remote_address) + level = "debug", + name = "on_dht_message", + skip_all, + fields(peer_id = % req.metadata.peer_id, addr = % req.metadata.remote_address) )] fn on_message(&self, req: ServiceRequest) -> Self::OnMessageFuture { crate::match_tl_request!(req.body, { @@ -390,31 +388,30 @@ struct DhtInner { routing_table: Mutex, storage: Storage, node_info: Mutex>, - max_k: usize, - node_ttl: Duration, + config: DhtConfig, } impl DhtInner { fn start_background_tasks(self: &Arc, network: WeakNetwork) { - const INFO_UPDATE_PERIOD: Duration = Duration::from_secs(60); - - const ANNOUNCE_PERIOD: Duration = Duration::from_secs(600); - const ANNOUNCE_SHIFT: Duration = Duration::from_secs(60); - const POPULATE_PERIOD: Duration = Duration::from_secs(600); - const POPULATE_SHIFT: Duration = Duration::from_secs(10); - enum Action { Refresh, Announce, Populate, } + let mut refresh_interval = tokio::time::interval(self.config.local_info_refresh_period); + let mut announce_interval = shifted_interval( + self.config.local_info_announce_period, + self.config.max_local_info_announce_period_jitter, + ); + let mut populate_interval = shifted_interval( + self.config.populate_period, + self.config.max_populate_period_jitter, + ); + let this = Arc::downgrade(self); tokio::spawn(async move { tracing::debug!("background DHT loop started"); - let mut refresh_interval = tokio::time::interval(INFO_UPDATE_PERIOD); - let mut announce_interval = shifted_interval(ANNOUNCE_PERIOD, ANNOUNCE_SHIFT); - let mut populate_interval = shifted_interval(POPULATE_PERIOD, POPULATE_SHIFT); let mut prev_populate_fut = None::>; loop { @@ -430,14 +427,14 @@ impl DhtInner { match action { Action::Refresh => { - this.refresh_local_node_info(&network, DEFAULT_TTL); + this.refresh_local_node_info(&network); } Action::Announce => { // Always refresh node info before announcing - this.refresh_local_node_info(&network, DEFAULT_TTL); + this.refresh_local_node_info(&network); refresh_interval.reset(); - if let Err(e) = this.announce_local_node_info(&network, DEFAULT_TTL).await { + if let Err(e) = this.announce_local_node_info(&network).await { tracing::error!("failed to announce local DHT node info: {e:?}"); } } @@ -460,13 +457,13 @@ impl DhtInner { }); } - fn refresh_local_node_info(&self, network: &Network, ttl: u32) { + fn refresh_local_node_info(&self, network: &Network) { let now = now_sec(); let mut node_info = PeerInfo { id: self.local_id, address_list: vec![network.local_addr().into()].into_boxed_slice(), created_at: now, - expires_at: now + ttl, + expires_at: now + self.config.max_node_info_ttl.as_secs() as u32, signature: Box::new([0; 64]), }; *node_info.signature = network.sign_tl(&node_info); @@ -474,19 +471,22 @@ impl DhtInner { *self.node_info.lock().unwrap() = Some(node_info); } - #[tracing::instrument(level = "debug", skip_all, fields(local_id = %self.local_id))] - async fn announce_local_node_info(&self, network: &Network, ttl: u32) -> Result<()> { + #[tracing::instrument(level = "debug", skip_all, fields(local_id = % self.local_id))] + async fn announce_local_node_info(&self, network: &Network) -> Result<()> { let data = tl_proto::serialize(&[network.local_addr().into()] as &[Address]); - let mut value = - self.make_unsigned_peer_value(PeerValueKeyName::NodeInfo, &data, now_sec() + ttl); + let mut value = self.make_unsigned_peer_value( + PeerValueKeyName::NodeInfo, + &data, + now_sec() + self.config.max_node_info_ttl.as_secs() as u32, + ); let signature = network.sign_tl(&value); value.signature = &signature; self.store_value(network, ValueRef::Peer(value)).await } - #[tracing::instrument(level = "debug", skip_all, fields(local_id = %self.local_id))] + #[tracing::instrument(level = "debug", skip_all, fields(local_id = % self.local_id))] async fn populate(&self, network: &Network) { const PARALLEL_QUERIES: usize = 3; const MAX_DISTANCE: usize = 15; @@ -510,13 +510,13 @@ impl DhtInner { .skip_while(|(_, bucket)| bucket.is_empty()); for (&distance, _) in first_n_non_empty_buckets { - // Query K closest nodes for a random ID at the specified distance from the local ID. + // Query the K closest nodes for a random ID at the specified distance from the local ID. let random_id = random_key_at_distance(&routing_table.local_id, distance, rng); let query = Query::new( network.clone(), &routing_table, random_id.as_bytes(), - self.max_k, + self.config.max_k, ); futures.push(async { @@ -557,7 +557,11 @@ impl DhtInner { continue; } - let is_new = routing_table.add(peer.clone(), self.max_k, &self.node_ttl); + let is_new = routing_table.add( + peer.clone(), + self.config.max_k, + &self.config.max_node_info_ttl, + ); if is_new { network.known_peers().insert(peer, PeerAffinity::Allowed); count += 1; @@ -573,7 +577,7 @@ impl DhtInner { network.clone(), &self.routing_table.lock().unwrap(), key_hash, - self.max_k, + self.config.max_k, ); // NOTE: expression is intentionally split to drop the routing table guard @@ -587,7 +591,7 @@ impl DhtInner { network.clone(), &self.routing_table.lock().unwrap(), value, - self.max_k, + self.config.max_k, ); // NOTE: expression is intentionally split to drop the routing table guard @@ -603,7 +607,11 @@ impl DhtInner { } let mut routing_table = self.routing_table.lock().unwrap(); - let is_new = routing_table.add(node.clone(), self.max_k, &self.node_ttl); + let is_new = routing_table.add( + node.clone(), + self.config.max_k, + &self.config.max_node_info_ttl, + ); if is_new { network.known_peers().insert(node, PeerAffinity::Allowed); } @@ -636,7 +644,7 @@ impl DhtInner { .routing_table .lock() .unwrap() - .closest(&req.key, (req.k as usize).min(self.max_k)); + .closest(&req.key, (req.k as usize).min(self.config.max_k)); NodeResponse { nodes } } @@ -649,7 +657,7 @@ impl DhtInner { .routing_table .lock() .unwrap() - .closest(&req.key, (req.k as usize).min(self.max_k)); + .closest(&req.key, (req.k as usize).min(self.config.max_k)); ValueResponseRaw::NotFound(nodes) } @@ -692,5 +700,3 @@ pub enum FindValueError { #[error("value not found")] NotFound, } - -const DEFAULT_TTL: u32 = 3600; From 77afd270ac49ea03ade0504725fcb2002e0dfa2d Mon Sep 17 00:00:00 2001 From: Ivan Kalinin Date: Fri, 16 Feb 2024 18:45:43 +0100 Subject: [PATCH 31/35] feat(network): Improve expired dht peers eviction --- network/src/dht/config.rs | 10 ++-- network/src/dht/mod.rs | 65 ++++++++++++----------- network/src/dht/routing.rs | 45 ++++------------ network/src/network/connection_manager.rs | 32 ++++++++--- 4 files changed, 76 insertions(+), 76 deletions(-) diff --git a/network/src/dht/config.rs b/network/src/dht/config.rs index 3a7f9aac7..57b47fae0 100644 --- a/network/src/dht/config.rs +++ b/network/src/dht/config.rs @@ -57,13 +57,13 @@ pub struct DhtConfig { /// /// Default: 10 minutes. #[serde(with = "serde_helpers::humantime")] - pub populate_period: Duration, + pub routing_table_refresh_period: Duration, - /// A maximum value of a random jitter for the populate period. + /// A maximum value of a random jitter for the routing table refresh period. /// /// Default: 1 minutes. #[serde(with = "serde_helpers::humantime")] - pub max_populate_period_jitter: Duration, + pub max_routing_table_refresh_period_jitter: Duration, } impl Default for DhtConfig { @@ -77,8 +77,8 @@ impl Default for DhtConfig { local_info_refresh_period: Duration::from_secs(60), local_info_announce_period: Duration::from_secs(600), max_local_info_announce_period_jitter: Duration::from_secs(60), - populate_period: Duration::from_secs(600), - max_populate_period_jitter: Duration::from_secs(60), + routing_table_refresh_period: Duration::from_secs(600), + max_routing_table_refresh_period_jitter: Duration::from_secs(60), } } } diff --git a/network/src/dht/mod.rs b/network/src/dht/mod.rs index 512afd1ff..28fc225dc 100644 --- a/network/src/dht/mod.rs +++ b/network/src/dht/mod.rs @@ -394,31 +394,32 @@ struct DhtInner { impl DhtInner { fn start_background_tasks(self: &Arc, network: WeakNetwork) { enum Action { - Refresh, - Announce, - Populate, + RefreshLocalNodeInfo, + AnnounceLocalNodeInfo, + RefreshRoutingTable, } - let mut refresh_interval = tokio::time::interval(self.config.local_info_refresh_period); - let mut announce_interval = shifted_interval( + let mut refresh_node_info_interval = + tokio::time::interval(self.config.local_info_refresh_period); + let mut announce_node_info_interval = shifted_interval( self.config.local_info_announce_period, self.config.max_local_info_announce_period_jitter, ); - let mut populate_interval = shifted_interval( - self.config.populate_period, - self.config.max_populate_period_jitter, + let mut refresh_routing_table_interval = shifted_interval( + self.config.routing_table_refresh_period, + self.config.max_routing_table_refresh_period_jitter, ); let this = Arc::downgrade(self); tokio::spawn(async move { tracing::debug!("background DHT loop started"); - let mut prev_populate_fut = None::>; + let mut prev_refresh_routing_table_fut = None::>; loop { let action = tokio::select! { - _ = refresh_interval.tick() => Action::Refresh, - _ = announce_interval.tick() => Action::Announce, - _ = populate_interval.tick() => Action::Populate, + _ = refresh_node_info_interval.tick() => Action::RefreshLocalNodeInfo, + _ = announce_node_info_interval.tick() => Action::AnnounceLocalNodeInfo, + _ = refresh_routing_table_interval.tick() => Action::RefreshRoutingTable, }; let (Some(this), Some(network)) = (this.upgrade(), network.upgrade()) else { @@ -426,20 +427,20 @@ impl DhtInner { }; match action { - Action::Refresh => { + Action::RefreshLocalNodeInfo => { this.refresh_local_node_info(&network); } - Action::Announce => { + Action::AnnounceLocalNodeInfo => { // Always refresh node info before announcing this.refresh_local_node_info(&network); - refresh_interval.reset(); + refresh_node_info_interval.reset(); if let Err(e) = this.announce_local_node_info(&network).await { tracing::error!("failed to announce local DHT node info: {e:?}"); } } - Action::Populate => { - if let Some(fut) = prev_populate_fut.take() { + Action::RefreshRoutingTable => { + if let Some(fut) = prev_refresh_routing_table_fut.take() { if let Err(e) = fut.await { if e.is_panic() { std::panic::resume_unwind(e.into_panic()); @@ -447,8 +448,8 @@ impl DhtInner { } } - prev_populate_fut = Some(tokio::spawn(async move { - this.populate(&network).await; + prev_refresh_routing_table_fut = Some(tokio::spawn(async move { + this.refresh_routing_table(&network).await; })); } } @@ -487,7 +488,7 @@ impl DhtInner { } #[tracing::instrument(level = "debug", skip_all, fields(local_id = % self.local_id))] - async fn populate(&self, network: &Network) { + async fn refresh_routing_table(&self, network: &Network) { const PARALLEL_QUERIES: usize = 3; const MAX_DISTANCE: usize = 15; const QUERY_DEPTH: usize = 3; @@ -496,20 +497,24 @@ impl DhtInner { let semaphore = Semaphore::new(PARALLEL_QUERIES); let mut futures = FuturesUnordered::new(); { - // NOTE: rng is intentionally dropped after this block to make this future `Send`. let rng = &mut rand::thread_rng(); - let routing_table = self.routing_table.lock().unwrap(); + let mut routing_table = self.routing_table.lock().unwrap(); + + // Filter out expired nodes + let now = now_sec(); + for (_, bucket) in routing_table.buckets.range_mut(..=MAX_DISTANCE) { + bucket.retain_nodes(|node| !node.is_expired(now, &self.config.max_node_info_ttl)); + } // Iterate over the first buckets up until some distance (`MAX_DISTANCE`) - // or up to the last non-empty bucket. - let first_n_non_empty_buckets = routing_table - .buckets - .range(..=MAX_DISTANCE) - .rev() - .skip_while(|(_, bucket)| bucket.is_empty()); - - for (&distance, _) in first_n_non_empty_buckets { + // or up to the last non-empty bucket (?). + for (&distance, bucket) in routing_table.buckets.range(..=MAX_DISTANCE).rev() { + // TODO: Should we skip empty buckets? + if bucket.is_empty() { + continue; + } + // Query the K closest nodes for a random ID at the specified distance from the local ID. let random_id = random_key_at_distance(&routing_table.local_id, distance, rng); let query = Query::new( diff --git a/network/src/dht/routing.rs b/network/src/dht/routing.rs index cb81f11ca..abc0969b7 100644 --- a/network/src/dht/routing.rs +++ b/network/src/dht/routing.rs @@ -2,6 +2,8 @@ use std::collections::{BTreeMap, VecDeque}; use std::sync::Arc; use std::time::{Duration, Instant}; +use tycho_util::time::now_sec; + use crate::dht::{xor_distance, MAX_XOR_DISTANCE}; use crate::types::{PeerId, PeerInfo}; @@ -40,16 +42,6 @@ impl RoutingTable { .insert(peer, max_k, node_ttl) } - #[allow(unused)] - pub fn remove(&mut self, key: &PeerId) -> bool { - let distance = xor_distance(&self.local_id, key); - if let Some(bucket) = self.buckets.get_mut(&distance) { - bucket.remove(key) - } else { - false - } - } - pub fn closest(&self, key: &[u8; 32], count: usize) -> Vec> { if count == 0 { return Vec::new(); @@ -103,15 +95,6 @@ impl RoutingTable { } } } - - #[allow(unused)] - pub fn contains(&self, key: &PeerId) -> bool { - let distance = xor_distance(&self.local_id, key); - self.buckets - .get(&distance) - .map(|bucket| bucket.contains(key)) - .unwrap_or_default() - } } pub(crate) struct Bucket { @@ -133,7 +116,7 @@ impl Bucket { { self.nodes.remove(index); } else if self.nodes.len() >= max_k { - if matches!(self.nodes.front(), Some(node) if node.is_expired(timeout)) { + if matches!(self.nodes.front(), Some(node) if node.is_expired(now_sec(), timeout)) { self.nodes.pop_front(); } else { return false; @@ -144,17 +127,11 @@ impl Bucket { true } - pub fn remove(&mut self, key: &PeerId) -> bool { - if let Some(index) = self.nodes.iter().position(|node| &node.data.id == key) { - self.nodes.remove(index); - true - } else { - false - } - } - - pub fn contains(&self, key: &PeerId) -> bool { - self.nodes.iter().any(|node| &node.data.id == key) + pub fn retain_nodes(&mut self, f: F) + where + F: FnMut(&Node) -> bool, + { + self.nodes.retain(f) } pub fn is_empty(&self) -> bool { @@ -175,8 +152,8 @@ impl Node { } } - pub fn is_expired(&self, timeout: &Duration) -> bool { - &self.last_updated_at.elapsed() >= timeout + pub fn is_expired(&self, at: u32, timeout: &Duration) -> bool { + self.data.is_expired(at) || &self.last_updated_at.elapsed() >= timeout } } @@ -193,7 +170,7 @@ mod tests { id, address_list: Default::default(), created_at: 0, - expires_at: 0, + expires_at: u32::MAX, signature: Box::new([0; 64]), }) } diff --git a/network/src/network/connection_manager.rs b/network/src/network/connection_manager.rs index df9692430..cfbe2cd75 100644 --- a/network/src/network/connection_manager.rs +++ b/network/src/network/connection_manager.rs @@ -581,13 +581,31 @@ impl KnownPeers { } pub fn insert(&self, peer_info: Arc, affinity: PeerAffinity) -> Option { - self.0.insert( - peer_info.id, - KnownPeer { - peer_info, - affinity, - }, - ) + match self.0.entry(peer_info.id) { + dashmap::mapref::entry::Entry::Vacant(entry) => { + entry.insert(KnownPeer { + peer_info, + affinity, + }); + None + } + dashmap::mapref::entry::Entry::Occupied(entry) => { + if entry.get().peer_info.created_at >= peer_info.created_at { + return None; + } + + let affinity = match affinity { + PeerAffinity::High | PeerAffinity::Never => affinity, + PeerAffinity::Allowed => entry.get().affinity, + }; + + let (_, old) = entry.replace_entry(KnownPeer { + peer_info, + affinity, + }); + Some(old) + } + } } pub fn remove(&self, peer_id: &PeerId) -> Option { From d1be28761f9aa7828c0515007ae3b132b8407c7f Mon Sep 17 00:00:00 2001 From: Vladimir Petrzhikovskii Date: Fri, 16 Feb 2024 15:45:26 +0100 Subject: [PATCH 32/35] ci(network) add docker compose for newtork tests --- .dockerignore | 12 ++++++ .gitignore | 3 +- network.Dockerfile | 7 +++ scripts/test_dht.py | 103 ++++++++++++++++++++++++++++++++++++++++++++ 4 files changed, 124 insertions(+), 1 deletion(-) create mode 100644 .dockerignore create mode 100644 network.Dockerfile create mode 100755 scripts/test_dht.py diff --git a/.dockerignore b/.dockerignore new file mode 100644 index 000000000..e039b6afe --- /dev/null +++ b/.dockerignore @@ -0,0 +1,12 @@ +debug/ +target/ +**/*.rs.bk +*.pdb + +.idea/ +.vscode/ +.fleet/ + +perf.data* + +.scratch \ No newline at end of file diff --git a/.gitignore b/.gitignore index 004ef028a..ca739ab20 100644 --- a/.gitignore +++ b/.gitignore @@ -7,4 +7,5 @@ target/ .vscode/ .fleet/ -perf.data* \ No newline at end of file +perf.data* +.scratch \ No newline at end of file diff --git a/network.Dockerfile b/network.Dockerfile new file mode 100644 index 000000000..df5d53bc9 --- /dev/null +++ b/network.Dockerfile @@ -0,0 +1,7 @@ +FROM rust:1.76-buster as builder +COPY . . +RUN cargo build --release --example network-node + +FROM debian:buster-slim +RUN mkdir /app +COPY --from=builder /target/release/examples/network-node /app/network-node \ No newline at end of file diff --git a/scripts/test_dht.py b/scripts/test_dht.py new file mode 100755 index 000000000..a4b7cb654 --- /dev/null +++ b/scripts/test_dht.py @@ -0,0 +1,103 @@ +#!/usr/bin/env python3 +import yaml +import time +import subprocess +import os + + +def generate_entrypoint_script(service_name, start_delay, params: str = ""): + script_content = f"""#!/bin/bash + # Introduce startup delay + sleep {start_delay} + /app/network-node {params} + """ + script_path = f".scratch/entrypoints/{service_name}_entrypoint.sh" + os.makedirs(os.path.dirname(script_path), exist_ok=True) + with open(script_path, "w") as file: + file.write(script_content) + os.chmod(script_path, 0o755) # Make the script executable + + +def generate_docker_compose(services): + """ + Generates a Docker Compose file with specified services, IPs, and entrypoints. + """ + compose_dict = {"version": "3.7", "services": {}} + + for service, details in services.items(): + # Generate entrypoint script for each service + generate_entrypoint_script( + service, details.get("start_delay", 0), details.get("latency", 0) + ) + + compose_dict["services"][service] = { + "image": details["image"], + "entrypoint": f"/entrypoints/{service}_entrypoint.sh", + "volumes": [ + f"./entrypoints/{service}_entrypoint.sh:/entrypoints/{service}_entrypoint.sh" + ], + "networks": {"default": {"ipv4_address": details["ip"]}}, + } + + networks_dict = { + "networks": {"default": {"ipam": {"config": [{"subnet": "172.20.0.0/16"}]}}} + } + + compose_dict.update(networks_dict) + + with open(".scratch/docker-compose.yml", "w") as file: + yaml.dump(compose_dict, file) + + print("Docker Compose file and entrypoint scripts generated.") + + +def run_docker_compose(services): + """ + Runs the Docker Compose file and applies the specified start delays. + """ + os.system("docker compose up -f .scratch/docker-compose.yml -d") + + # for service, details in services.items(): + # latency = details.get("latency", 0) + # if latency: + # print(f"Applying {latency}ms latency to {service}...") + # # Assuming eth0 as the default network interface inside the container + # container_id = ( + # subprocess.check_output(["docker", "ps", "-qf", f"name={service}"]) + # .decode() + # .strip() + # ) + # os.system( + # f"docker exec {container_id} tc qdisc add dev eth0 root netem delay {latency}ms" + # ) + + print("Docker Compose services started with specified delays and latencies.") + + +def main(): + # Example input + services = { + "node-1": { + "image": "tycho-network", + "ip": "172.20.0.2", + "start_delay": 5, + "latency": 100, + }, + "node-2": { + "image": "tycho-network", + "ip": "172.20.0.3", + "start_delay": 10, + "latency": 50, + }, + } + + generate_entrypoint_script("node-1", 5, "--help") + generate_entrypoint_script("node-2", 10, "--help") + generate_docker_compose(services) + print("To manually test the setup, run the following commands:") + print("docker-compose up -f .scratch/docker-compose.yml -d") + run_docker_compose(services) + + +if __name__ == "__main__": + main() From 723ac9d0026819598395efa750193b9f622899f7 Mon Sep 17 00:00:00 2001 From: Ivan Kalinin Date: Fri, 16 Feb 2024 20:51:09 +0100 Subject: [PATCH 33/35] ci(network): Fix simulator script --- scripts/test_dht.py | 67 +++++++++++++++++++++++++++------------------ 1 file changed, 41 insertions(+), 26 deletions(-) diff --git a/scripts/test_dht.py b/scripts/test_dht.py index a4b7cb654..26a7f4e08 100755 --- a/scripts/test_dht.py +++ b/scripts/test_dht.py @@ -1,14 +1,15 @@ #!/usr/bin/env python3 import yaml -import time import subprocess import os +import json def generate_entrypoint_script(service_name, start_delay, params: str = ""): script_content = f"""#!/bin/bash # Introduce startup delay sleep {start_delay} + export RUST_LOG="info,tycho_network=trace" /app/network-node {params} """ script_path = f".scratch/entrypoints/{service_name}_entrypoint.sh" @@ -25,22 +26,18 @@ def generate_docker_compose(services): compose_dict = {"version": "3.7", "services": {}} for service, details in services.items(): - # Generate entrypoint script for each service - generate_entrypoint_script( - service, details.get("start_delay", 0), details.get("latency", 0) - ) - compose_dict["services"][service] = { "image": details["image"], - "entrypoint": f"/entrypoints/{service}_entrypoint.sh", + "entrypoint": "/entrypoints/entrypoint.sh", "volumes": [ - f"./entrypoints/{service}_entrypoint.sh:/entrypoints/{service}_entrypoint.sh" + f"./entrypoints/{service}_entrypoint.sh:/entrypoints/entrypoint.sh", + "./global-config.json:/app/global-config.json", ], "networks": {"default": {"ipv4_address": details["ip"]}}, } networks_dict = { - "networks": {"default": {"ipam": {"config": [{"subnet": "172.20.0.0/16"}]}}} + "networks": {"default": {"ipam": {"config": [{"subnet": "172.30.0.0/24"}]}}} } compose_dict.update(networks_dict) @@ -51,11 +48,16 @@ def generate_docker_compose(services): print("Docker Compose file and entrypoint scripts generated.") +def execute_command(command): + result = subprocess.run(command, shell=True, capture_output=True, text=True) + return result.stdout + + def run_docker_compose(services): """ Runs the Docker Compose file and applies the specified start delays. """ - os.system("docker compose up -f .scratch/docker-compose.yml -d") + os.system("docker compose -f .scratch/docker-compose.yml up") # for service, details in services.items(): # latency = details.get("latency", 0) @@ -76,26 +78,39 @@ def run_docker_compose(services): def main(): # Example input - services = { - "node-1": { - "image": "tycho-network", - "ip": "172.20.0.2", - "start_delay": 5, - "latency": 100, - }, - "node-2": { + node_count = 5 + node_port = 25565 + + services = {} + bootstrap_peers = [] + + for i in range(node_count): + key = os.urandom(32).hex() + ip = f"172.30.0.{i + 10}" + + cmd = ( + f"cargo run --example network-node -- gendht '{ip}:{node_port}' --key {key}" + ) + dht_entry = json.loads(execute_command(cmd)) + bootstrap_peers.append(dht_entry) + + node_name = f"node-{i}" + services[node_name] = { "image": "tycho-network", - "ip": "172.20.0.3", - "start_delay": 10, - "latency": 50, - }, - } + "ip": ip, + } + generate_entrypoint_script( + node_name, + start_delay=0, + params=f"run '{ip}:{node_port}' --key {key} --global-config /app/global-config.json", + ) + + with open(".scratch/global-config.json", "w") as f: + json.dump({"bootstrap_peers": bootstrap_peers}, f, indent=2) - generate_entrypoint_script("node-1", 5, "--help") - generate_entrypoint_script("node-2", 10, "--help") generate_docker_compose(services) print("To manually test the setup, run the following commands:") - print("docker-compose up -f .scratch/docker-compose.yml -d") + print("docker compose -f .scratch/docker-compose.yml up") run_docker_compose(services) From 0f15a972ebe14e87a44e31849a3c79e65ee2c76b Mon Sep 17 00:00:00 2001 From: Ivan Kalinin Date: Mon, 19 Feb 2024 18:40:37 +0100 Subject: [PATCH 34/35] feat(network): allow announcing peer info with dht messages and queries --- network/src/dht/config.rs | 16 ++- network/src/dht/mod.rs | 215 +++++++++++++++++++++++++++---------- network/src/dht/query.rs | 23 +++- network/src/dht/routing.rs | 76 +++++++++++-- network/src/proto.tl | 7 ++ network/src/proto/dht.rs | 16 +++ network/src/util/mod.rs | 35 +++--- 7 files changed, 297 insertions(+), 91 deletions(-) diff --git a/network/src/dht/config.rs b/network/src/dht/config.rs index 57b47fae0..0552bb5ee 100644 --- a/network/src/dht/config.rs +++ b/network/src/dht/config.rs @@ -12,11 +12,11 @@ pub struct DhtConfig { /// Default: 6. pub max_k: usize, - /// Maximum time to live for node info. + /// Maximum time to live for peer info. /// /// Default: 1 hour. #[serde(with = "serde_helpers::humantime")] - pub max_node_info_ttl: Duration, + pub max_peer_info_ttl: Duration, /// Maximum time to live for stored values. /// @@ -35,13 +35,13 @@ pub struct DhtConfig { #[serde(with = "serde_helpers::humantime")] pub storage_item_time_to_idle: Option, - /// A period of refreshing the local node info. + /// A period of refreshing the local peer info. /// /// Default: 1 minute. #[serde(with = "serde_helpers::humantime")] pub local_info_refresh_period: Duration, - /// A period of storing the local node info into the DHT. + /// A period of storing the local peer info into the DHT. /// /// Default: 10 minutes. #[serde(with = "serde_helpers::humantime")] @@ -64,13 +64,18 @@ pub struct DhtConfig { /// Default: 1 minutes. #[serde(with = "serde_helpers::humantime")] pub max_routing_table_refresh_period_jitter: Duration, + + /// The capacity of the announced peers channel. + /// + /// Default: 10. + pub announced_peers_channel_capacity: usize, } impl Default for DhtConfig { fn default() -> Self { Self { max_k: 6, - max_node_info_ttl: Duration::from_secs(3600), + max_peer_info_ttl: Duration::from_secs(3600), max_stored_value_ttl: Duration::from_secs(3600), max_storage_capacity: 10000, storage_item_time_to_idle: None, @@ -79,6 +84,7 @@ impl Default for DhtConfig { max_local_info_announce_period_jitter: Duration::from_secs(60), routing_table_refresh_period: Duration::from_secs(600), max_routing_table_refresh_period_jitter: Duration::from_secs(60), + announced_peers_channel_capacity: 10, } } } diff --git a/network/src/dht/mod.rs b/network/src/dht/mod.rs index 28fc225dc..bcafb37c3 100644 --- a/network/src/dht/mod.rs +++ b/network/src/dht/mod.rs @@ -6,13 +6,14 @@ use bytes::{Buf, Bytes}; use futures_util::stream::FuturesUnordered; use futures_util::StreamExt; use rand::RngCore; -use tokio::sync::Semaphore; +use tl_proto::TlRead; +use tokio::sync::{broadcast, Semaphore}; use tokio::task::JoinHandle; use tycho_util::realloc_box_enum; use tycho_util::time::{now_sec, shifted_interval}; use self::query::{Query, StoreValue}; -use self::routing::RoutingTable; +use self::routing::{RoutingTable, RoutingTableSource}; use self::storage::Storage; use crate::network::{Network, WeakNetwork}; use crate::proto::dht::{ @@ -68,7 +69,8 @@ impl DhtClient { } pub fn add_peer(&self, peer: Arc) -> Result { - self.inner.add_node_info(&self.network, peer) + self.inner + .add_peer_info(&self.network, peer, RoutingTableSource::Trusted) } pub async fn get_node_info(&self, peer_id: &PeerId) -> Result { @@ -158,6 +160,7 @@ impl<'a> DhtQueryBuilder<'a> { data: tl_proto::serialize(&data), at: None, ttl: self.inner.config.max_stored_value_ttl.as_secs() as _, + with_peer_info: false, } } } @@ -167,6 +170,7 @@ pub struct DhtQueryWithDataBuilder<'a> { data: Vec, at: Option, ttl: u32, + with_peer_info: bool, } impl DhtQueryWithDataBuilder<'_> { @@ -180,6 +184,11 @@ impl DhtQueryWithDataBuilder<'_> { self } + pub fn with_peer_info(&mut self, with_peer_info: bool) -> &mut Self { + self.with_peer_info = with_peer_info; + self + } + pub async fn store(&self) -> Result<()> { let dht = self.inner.inner; let network = self.inner.network; @@ -196,7 +205,8 @@ impl DhtQueryWithDataBuilder<'_> { let signature = network.sign_tl(&value); value.signature = &signature; - dht.store_value(network, ValueRef::Peer(value)).await + dht.store_value(network, ValueRef::Peer(value), self.with_peer_info) + .await } pub fn into_signed_value(self) -> PeerValue { @@ -269,12 +279,15 @@ impl DhtServiceBuilder { builder.build() }; + let (announced_peers, _) = broadcast::channel(config.announced_peers_channel_capacity); + let inner = Arc::new(DhtInner { local_id: self.local_id, routing_table: Mutex::new(RoutingTable::new(self.local_id)), storage, - node_info: Mutex::new(None), + local_peer_info: Mutex::new(None), config, + announced_peers, }); let client_builder = DhtClientBuilder { @@ -306,13 +319,21 @@ impl Service for DhtService { type OnDatagramFuture = futures_util::future::Ready<()>; #[tracing::instrument( - level = "debug", - name = "on_dht_query", - skip_all, - fields(peer_id = % req.metadata.peer_id, addr = % req.metadata.remote_address) + level = "debug", + name = "on_dht_query", + skip_all, + fields(peer_id = %req.metadata.peer_id, addr = %req.metadata.remote_address) )] fn on_query(&self, req: ServiceRequest) -> Self::OnQueryFuture { - let response = crate::match_tl_request!(req.body, { + let (constructor, body) = match self.0.try_handle_prefix(&req) { + Ok(rest) => rest, + Err(e) => { + tracing::debug!("failed to deserialize query: {e:?}"); + return futures_util::future::ready(None); + } + }; + + let response = crate::match_tl_request!(body, tag = constructor, { rpc::FindNode as ref r => { tracing::debug!(key = %PeerId::wrap(&r.key), k = r.k, "find_node"); @@ -331,7 +352,7 @@ impl Service for DhtService { self.0.handle_get_node_info().map(tl_proto::serialize) }, }, e => { - tracing::debug!("failed to deserialize query from: {e:?}"); + tracing::debug!("failed to deserialize query: {e:?}"); None }); @@ -342,13 +363,21 @@ impl Service for DhtService { } #[tracing::instrument( - level = "debug", - name = "on_dht_message", - skip_all, - fields(peer_id = % req.metadata.peer_id, addr = % req.metadata.remote_address) + level = "debug", + name = "on_dht_message", + skip_all, + fields(peer_id = %req.metadata.peer_id, addr = %req.metadata.remote_address) )] fn on_message(&self, req: ServiceRequest) -> Self::OnMessageFuture { - crate::match_tl_request!(req.body, { + let (constructor, body) = match self.0.try_handle_prefix(&req) { + Ok(rest) => rest, + Err(e) => { + tracing::debug!("failed to deserialize message: {e:?}"); + return futures_util::future::ready(()); + } + }; + + crate::match_tl_request!(body, tag = constructor, { rpc::StoreRef<'_> as ref r => { tracing::debug!("store"); @@ -357,7 +386,7 @@ impl Service for DhtService { } } }, e => { - tracing::debug!("failed to deserialize message from: {e:?}"); + tracing::debug!("failed to deserialize message: {e:?}"); }); futures_util::future::ready(()) @@ -372,6 +401,7 @@ impl Service for DhtService { impl Routable for DhtService { fn query_ids(&self) -> impl IntoIterator { [ + rpc::WithPeerInfo::TL_ID, rpc::FindNode::TL_ID, rpc::FindValue::TL_ID, rpc::GetNodeInfo::TL_ID, @@ -379,7 +409,7 @@ impl Routable for DhtService { } fn message_ids(&self) -> impl IntoIterator { - [rpc::Store::TL_ID] + [rpc::WithPeerInfo::TL_ID, rpc::Store::TL_ID] } } @@ -387,21 +417,23 @@ struct DhtInner { local_id: PeerId, routing_table: Mutex, storage: Storage, - node_info: Mutex>, + local_peer_info: Mutex>, config: DhtConfig, + announced_peers: broadcast::Sender>, } impl DhtInner { fn start_background_tasks(self: &Arc, network: WeakNetwork) { enum Action { - RefreshLocalNodeInfo, - AnnounceLocalNodeInfo, + RefreshLocalPeerInfo, + AnnounceLocalPeerInfo, RefreshRoutingTable, + AddPeer(Arc), } - let mut refresh_node_info_interval = + let mut refresh_peer_info_interval = tokio::time::interval(self.config.local_info_refresh_period); - let mut announce_node_info_interval = shifted_interval( + let mut announce_peer_info_interval = shifted_interval( self.config.local_info_announce_period, self.config.max_local_info_announce_period_jitter, ); @@ -410,6 +442,8 @@ impl DhtInner { self.config.max_routing_table_refresh_period_jitter, ); + let mut announced_peers = self.announced_peers.subscribe(); + let this = Arc::downgrade(self); tokio::spawn(async move { tracing::debug!("background DHT loop started"); @@ -417,9 +451,13 @@ impl DhtInner { let mut prev_refresh_routing_table_fut = None::>; loop { let action = tokio::select! { - _ = refresh_node_info_interval.tick() => Action::RefreshLocalNodeInfo, - _ = announce_node_info_interval.tick() => Action::AnnounceLocalNodeInfo, + _ = refresh_peer_info_interval.tick() => Action::RefreshLocalPeerInfo, + _ = announce_peer_info_interval.tick() => Action::AnnounceLocalPeerInfo, _ = refresh_routing_table_interval.tick() => Action::RefreshRoutingTable, + peer = announced_peers.recv() => match peer { + Ok(peer) => Action::AddPeer(peer), + Err(_) => continue, + } }; let (Some(this), Some(network)) = (this.upgrade(), network.upgrade()) else { @@ -427,15 +465,15 @@ impl DhtInner { }; match action { - Action::RefreshLocalNodeInfo => { - this.refresh_local_node_info(&network); + Action::RefreshLocalPeerInfo => { + this.refresh_local_peer_info(&network); } - Action::AnnounceLocalNodeInfo => { - // Always refresh node info before announcing - this.refresh_local_node_info(&network); - refresh_node_info_interval.reset(); + Action::AnnounceLocalPeerInfo => { + // Always refresh peer info before announcing + this.refresh_local_peer_info(&network); + refresh_peer_info_interval.reset(); - if let Err(e) = this.announce_local_node_info(&network).await { + if let Err(e) = this.announce_local_peer_info(&network).await { tracing::error!("failed to announce local DHT node info: {e:?}"); } } @@ -452,39 +490,38 @@ impl DhtInner { this.refresh_routing_table(&network).await; })); } + Action::AddPeer(peer_info) => { + tracing::info!(peer_id = %peer_info.id, "received peer info"); + if let Err(e) = + this.add_peer_info(&network, peer_info, RoutingTableSource::Untrusted) + { + tracing::error!("failed to add peer to the routing table: {e:?}"); + } + } } } tracing::debug!("background DHT loop finished"); }); } - fn refresh_local_node_info(&self, network: &Network) { - let now = now_sec(); - let mut node_info = PeerInfo { - id: self.local_id, - address_list: vec![network.local_addr().into()].into_boxed_slice(), - created_at: now, - expires_at: now + self.config.max_node_info_ttl.as_secs() as u32, - signature: Box::new([0; 64]), - }; - *node_info.signature = network.sign_tl(&node_info); - - *self.node_info.lock().unwrap() = Some(node_info); + fn refresh_local_peer_info(&self, network: &Network) { + let peer_info = self.make_local_peer_info(network, now_sec()); + *self.local_peer_info.lock().unwrap() = Some(peer_info); } #[tracing::instrument(level = "debug", skip_all, fields(local_id = % self.local_id))] - async fn announce_local_node_info(&self, network: &Network) -> Result<()> { + async fn announce_local_peer_info(&self, network: &Network) -> Result<()> { let data = tl_proto::serialize(&[network.local_addr().into()] as &[Address]); let mut value = self.make_unsigned_peer_value( PeerValueKeyName::NodeInfo, &data, - now_sec() + self.config.max_node_info_ttl.as_secs() as u32, + now_sec() + self.config.max_peer_info_ttl.as_secs() as u32, ); let signature = network.sign_tl(&value); value.signature = &signature; - self.store_value(network, ValueRef::Peer(value)).await + self.store_value(network, ValueRef::Peer(value), true).await } #[tracing::instrument(level = "debug", skip_all, fields(local_id = % self.local_id))] @@ -504,7 +541,7 @@ impl DhtInner { // Filter out expired nodes let now = now_sec(); for (_, bucket) in routing_table.buckets.range_mut(..=MAX_DISTANCE) { - bucket.retain_nodes(|node| !node.is_expired(now, &self.config.max_node_info_ttl)); + bucket.retain_nodes(|node| !node.is_expired(now, &self.config.max_peer_info_ttl)); } // Iterate over the first buckets up until some distance (`MAX_DISTANCE`) @@ -565,7 +602,8 @@ impl DhtInner { let is_new = routing_table.add( peer.clone(), self.config.max_k, - &self.config.max_node_info_ttl, + &self.config.max_peer_info_ttl, + RoutingTableSource::Trusted, ); if is_new { network.known_peers().insert(peer, PeerAffinity::Allowed); @@ -589,14 +627,31 @@ impl DhtInner { query.find_value().await } - async fn store_value(&self, network: &Network, value: ValueRef<'_>) -> Result<()> { + async fn store_value( + &self, + network: &Network, + value: ValueRef<'_>, + with_peer_info: bool, + ) -> Result<()> { self.storage.insert(&value)?; + let local_peer_info = if with_peer_info { + let mut node_info = self.local_peer_info.lock().unwrap(); + Some( + node_info + .get_or_insert_with(|| self.make_local_peer_info(network, now_sec())) + .clone(), + ) + } else { + None + }; + let query = StoreValue::new( network.clone(), &self.routing_table.lock().unwrap(), value, self.config.max_k, + local_peer_info.as_ref(), ); // NOTE: expression is intentionally split to drop the routing table guard @@ -604,21 +659,29 @@ impl DhtInner { Ok(()) } - fn add_node_info(&self, network: &Network, node: Arc) -> Result { - anyhow::ensure!(node.is_valid(now_sec()), "invalid peer node info"); + fn add_peer_info( + &self, + network: &Network, + peer_info: Arc, + source: RoutingTableSource, + ) -> Result { + anyhow::ensure!(peer_info.is_valid(now_sec()), "invalid peer info"); - if node.id == self.local_id { + if peer_info.id == self.local_id { return Ok(false); } let mut routing_table = self.routing_table.lock().unwrap(); let is_new = routing_table.add( - node.clone(), + peer_info.clone(), self.config.max_k, - &self.config.max_node_info_ttl, + &self.config.max_peer_info_ttl, + source, ); if is_new { - network.known_peers().insert(node, PeerAffinity::Allowed); + network + .known_peers() + .insert(peer_info, PeerAffinity::Allowed); } Ok(is_new) } @@ -640,6 +703,44 @@ impl DhtInner { } } + fn make_local_peer_info(&self, network: &Network, now: u32) -> PeerInfo { + let mut peer_info = PeerInfo { + id: self.local_id, + address_list: vec![network.local_addr().into()].into_boxed_slice(), + created_at: now, + expires_at: now + self.config.max_peer_info_ttl.as_secs() as u32, + signature: Box::new([0; 64]), + }; + *peer_info.signature = network.sign_tl(&peer_info); + peer_info + } + + fn try_handle_prefix<'a>(&self, req: &'a ServiceRequest) -> Result<(u32, &'a [u8])> { + let mut body = req.as_ref(); + anyhow::ensure!(body.len() >= 4, tl_proto::TlError::UnexpectedEof); + + // NOTE: read constructor without advancing the body + let mut constructor = std::convert::identity(body).get_u32_le(); + let mut offset = 0; + + if constructor == rpc::WithPeerInfo::TL_ID { + let peer_info = rpc::WithPeerInfo::read_from(body, &mut offset)?.peer_info; + anyhow::ensure!( + peer_info.id == req.metadata.peer_id, + "suggested peer ID does not belong to the sender" + ); + self.announced_peers.send(peer_info).ok(); + + body = &body[offset..]; + anyhow::ensure!(body.len() >= 4, tl_proto::TlError::UnexpectedEof); + + // NOTE: read constructor without advancing the body + constructor = std::convert::identity(body).get_u32_le(); + } + + Ok((constructor, body)) + } + fn handle_store(&self, req: &rpc::StoreRef<'_>) -> Result { self.storage.insert(&req.value) } @@ -669,7 +770,7 @@ impl DhtInner { } fn handle_get_node_info(&self) -> Option { - self.node_info + self.local_peer_info .lock() .unwrap() .clone() diff --git a/network/src/dht/query.rs b/network/src/dht/query.rs index 216aae009..5355ba6d1 100644 --- a/network/src/dht/query.rs +++ b/network/src/dht/query.rs @@ -11,7 +11,7 @@ use tokio::sync::Semaphore; use tycho_util::time::now_sec; use tycho_util::{FastHashMap, FastHashSet}; -use crate::dht::routing::RoutingTable; +use crate::dht::routing::{RoutingTable, RoutingTableSource}; use crate::network::Network; use crate::proto::dht::{rpc, NodeResponse, Value, ValueRef, ValueResponse}; use crate::types::{PeerId, PeerInfo, Request}; @@ -32,7 +32,12 @@ impl Query { ) -> Self { let mut candidates = RoutingTable::new(PeerId(*target_id)); routing_table.visit_closest(target_id, max_k, |node| { - candidates.add(node.clone(), max_k, &Duration::MAX); + candidates.add( + node.clone(), + max_k, + &Duration::MAX, + RoutingTableSource::Trusted, + ); }); Self { @@ -212,7 +217,8 @@ impl Query { // Insert a new entry if visited.insert(node.id) { - self.candidates.add(node, max_k, &Duration::MAX); + self.candidates + .add(node, max_k, &Duration::MAX, RoutingTableSource::Trusted); has_new = true; } } @@ -238,7 +244,8 @@ impl Query { // Insert a new entry hash_map::Entry::Vacant(entry) => { let node = entry.insert(node).clone(); - self.candidates.add(node, max_k, &Duration::MAX); + self.candidates + .add(node, max_k, &Duration::MAX, RoutingTableSource::Trusted); has_new = true; } // Try to replace an old entry @@ -295,13 +302,19 @@ impl StoreValue<()> { routing_table: &RoutingTable, value: ValueRef<'_>, max_k: usize, + local_peer_info: Option<&PeerInfo>, ) -> StoreValue, Option>)> + Send> { let key_hash = match &value { ValueRef::Peer(value) => tl_proto::hash(&value.key), ValueRef::Overlay(value) => tl_proto::hash(&value.key), }; - let request_body = Bytes::from(tl_proto::serialize(rpc::StoreRef { value })); + let request_body = Bytes::from(match local_peer_info { + Some(peer_info) => { + tl_proto::serialize((rpc::WithPeerInfoRef { peer_info }, rpc::StoreRef { value })) + } + None => tl_proto::serialize(rpc::StoreRef { value }), + }); let semaphore = Arc::new(Semaphore::new(10)); let futures = futures_util::stream::FuturesUnordered::new(); diff --git a/network/src/dht/routing.rs b/network/src/dht/routing.rs index abc0969b7..3a054011e 100644 --- a/network/src/dht/routing.rs +++ b/network/src/dht/routing.rs @@ -7,6 +7,12 @@ use tycho_util::time::now_sec; use crate::dht::{xor_distance, MAX_XOR_DISTANCE}; use crate::types::{PeerId, PeerInfo}; +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +pub(crate) enum RoutingTableSource { + Untrusted, + Trusted, +} + pub(crate) struct RoutingTable { pub local_id: PeerId, pub buckets: BTreeMap, @@ -30,7 +36,13 @@ impl RoutingTable { self.buckets.values().map(|bucket| bucket.nodes.len()).sum() } - pub fn add(&mut self, peer: Arc, max_k: usize, node_ttl: &Duration) -> bool { + pub fn add( + &mut self, + peer: Arc, + max_k: usize, + node_ttl: &Duration, + source: RoutingTableSource, + ) -> bool { let distance = xor_distance(&self.local_id, &peer.id); if distance == 0 { return false; @@ -39,7 +51,7 @@ impl RoutingTable { self.buckets .entry(distance) .or_insert_with(|| Bucket::with_capacity(max_k)) - .insert(peer, max_k, node_ttl) + .insert(peer, max_k, node_ttl, source) } pub fn closest(&self, key: &[u8; 32], count: usize) -> Vec> { @@ -108,12 +120,26 @@ impl Bucket { } } - fn insert(&mut self, node: Arc, max_k: usize, timeout: &Duration) -> bool { + fn insert( + &mut self, + node: Arc, + max_k: usize, + timeout: &Duration, + source: RoutingTableSource, + ) -> bool { if let Some(index) = self .nodes .iter_mut() .position(|item| item.data.id == node.id) { + if source == RoutingTableSource::Untrusted { + let slot = &mut self.nodes[index]; + // Do nothing if node info was not updated (by created_at field) + if node.created_at <= slot.data.created_at { + return false; + } + } + self.nodes.remove(index); } else if self.nodes.len() >= max_k { if matches!(self.nodes.front(), Some(node) if node.is_expired(now_sec(), timeout)) { @@ -131,7 +157,7 @@ impl Bucket { where F: FnMut(&Node) -> bool, { - self.nodes.retain(f) + self.nodes.retain(f); } pub fn is_empty(&self) -> bool { @@ -180,8 +206,18 @@ mod tests { let mut table = RoutingTable::new(PeerId::random()); let peer = PeerId::random(); - assert!(table.add(make_node(peer), MAX_K, &Duration::MAX)); - assert!(table.add(make_node(peer), MAX_K, &Duration::MAX)); // returns true because the node was updated + assert!(table.add( + make_node(peer), + MAX_K, + &Duration::MAX, + RoutingTableSource::Trusted + )); + assert!(table.add( + make_node(peer), + MAX_K, + &Duration::MAX, + RoutingTableSource::Trusted + )); // returns true because the node was updated assert_eq!(table.len(), 1); } @@ -190,7 +226,12 @@ mod tests { let local_id = PeerId::random(); let mut table = RoutingTable::new(local_id); - assert!(!table.add(make_node(local_id), MAX_K, &Duration::MAX)); + assert!(!table.add( + make_node(local_id), + MAX_K, + &Duration::MAX, + RoutingTableSource::Trusted + )); assert!(table.is_empty()); } @@ -201,9 +242,19 @@ mod tests { let mut bucket = Bucket::with_capacity(k); for _ in 0..k { - assert!(bucket.insert(make_node(PeerId::random()), k, &timeout)); + assert!(bucket.insert( + make_node(PeerId::random()), + k, + &timeout, + RoutingTableSource::Trusted + )); } - assert!(!bucket.insert(make_node(PeerId::random()), k, &timeout)); + assert!(!bucket.insert( + make_node(PeerId::random()), + k, + &timeout, + RoutingTableSource::Trusted + )); } #[test] @@ -322,7 +373,12 @@ mod tests { let mut table = RoutingTable::new(local_id); for id in ids { - table.add(make_node(id), MAX_K, &Duration::MAX); + table.add( + make_node(id), + MAX_K, + &Duration::MAX, + RoutingTableSource::Trusted, + ); } { diff --git a/network/src/proto.tl b/network/src/proto.tl index 1aa05ffcd..21be43d69 100644 --- a/network/src/proto.tl +++ b/network/src/proto.tl @@ -120,6 +120,13 @@ dht.nodeInfoFound info:dht.node = dht.NodeInfoResponse; ---functions--- +/** +* Query wrapper with an announced peer info. +* +* @param peer_info a signed info of the sender +*/ +dht.withPeerInfo peer_info:dht.node = True; + /** * Suggest a node to store that value * diff --git a/network/src/proto/dht.rs b/network/src/proto/dht.rs index 1a41c26d2..53c9dfbff 100644 --- a/network/src/proto/dht.rs +++ b/network/src/proto/dht.rs @@ -374,6 +374,22 @@ pub struct NodeInfoResponse { pub mod rpc { use super::*; + /// Query wrapper with an announced peer info. + #[derive(Debug, Clone, TlRead, TlWrite)] + #[tl(boxed, id = "dht.withPeerInfo", scheme = "proto.tl")] + pub struct WithPeerInfo { + /// A signed info of the sender. + pub peer_info: Arc, + } + + /// Query wrapper with an announced peer info. + #[derive(Debug, Clone, TlWrite)] + #[tl(boxed, id = "dht.withPeerInfo", scheme = "proto.tl")] + pub struct WithPeerInfoRef<'tl> { + /// A signed info of the sender. + pub peer_info: &'tl PeerInfo, + } + /// Suggest a node to store that value. #[derive(Debug, Clone, TlRead, TlWrite)] #[tl(boxed, id = "dht.store", scheme = "proto.tl")] diff --git a/network/src/util/mod.rs b/network/src/util/mod.rs index 76eb5eba8..c868a54aa 100644 --- a/network/src/util/mod.rs +++ b/network/src/util/mod.rs @@ -10,26 +10,33 @@ pub(crate) mod tl; #[macro_export] macro_rules! match_tl_request { - ($req_body:expr, { + ($req_body:expr, $(tag = $tag:expr,)? { $($ty:path as $pat:pat => $expr:expr),*$(,)? }, $err:pat => $err_exr:expr) => { '__match_req: { - let $err = if ($req_body).len() >= 4 { - match ($req_body).as_ref().get_u32_le() { - $( - <$ty>::TL_ID => match $crate::__internal::tl_proto::deserialize::<$ty>(&($req_body)) { - Ok($pat) => break '__match_req ($expr), - Err(e) => e, - } - )* - _ => $crate::__internal::tl_proto::TlError::UnknownConstructor, - } - } else { - $crate::__internal::tl_proto::TlError::UnexpectedEof - }; + let $err = $crate::match_tl_request!(@inner $req_body, $($tag)?, { + $( + <$ty>::TL_ID => match $crate::__internal::tl_proto::deserialize::<$ty>(&($req_body)) { + Ok($pat) => break '__match_req ($expr), + Err(e) => e, + } + )* + _ => $crate::__internal::tl_proto::TlError::UnknownConstructor, + }); $err_exr } }; + + (@inner $req_body:expr, $tag:expr, $($rest:tt)*) => { + match $tag $($rest)* + }; + (@inner $req_body:expr, , $($rest:tt)*) => { + if ($req_body).len() >= 4 { + match ($req_body).as_ref().get_u32_le() $($rest)* + } else { + $crate::__internal::tl_proto::TlError::UnexpectedEof + } + }; } pub fn check_peer_signature(peed_id: &PeerId, signature: &[u8; 64], data: &T) -> bool From 6e2b98e9f0c2a8e05b2e8f8144ae54b6304bef32 Mon Sep 17 00:00:00 2001 From: Ivan Kalinin Date: Mon, 19 Feb 2024 18:59:14 +0100 Subject: [PATCH 35/35] feat(network): add test for peer announcement --- network/tests/dht.rs | 48 ++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 48 insertions(+) diff --git a/network/tests/dht.rs b/network/tests/dht.rs index 5c583656b..cb80aa3da 100644 --- a/network/tests/dht.rs +++ b/network/tests/dht.rs @@ -138,3 +138,51 @@ async fn bootstrap_nodes_store_value() -> Result<()> { Ok(()) } + +#[tokio::test] +async fn connect_new_node_to_bootstrap() -> Result<()> { + tracing_subscriber::fmt::try_init().ok(); + tracing::info!("connect_new_node_to_bootstrap"); + + #[derive(Debug, Clone, PartialEq, Eq, TlWrite, TlRead)] + struct SomeValue(u32); + + const VALUE: SomeValue = SomeValue(123123); + + let (bootstrap_nodes, global_config) = make_network(5); + + let node = Node::new(&ed25519::SecretKey::generate(&mut rand::thread_rng()))?; + for peer_info in &global_config { + node.dht.add_peer(peer_info.clone())?; + } + + // Ensure that the node is not known by the bootstrap nodes + let mut somebody_knows_the_peer = false; + for bootstrap_node in &bootstrap_nodes { + somebody_knows_the_peer |= bootstrap_node + .network + .known_peers() + .contains(node.network.peer_id()); + } + assert!(!somebody_knows_the_peer); + + // Store value and announce the peer info + node.dht + .entry(proto::dht::PeerValueKeyName::NodeInfo) + .with_data(VALUE) + .with_peer_info(true) + .store() + .await?; + + // The node must be known by some bootstrap nodes now + let mut somebody_knows_the_peer = false; + for bootstrap_node in &bootstrap_nodes { + somebody_knows_the_peer |= bootstrap_node + .network + .known_peers() + .contains(node.network.peer_id()); + } + assert!(somebody_knows_the_peer); + + Ok(()) +}