From e9ecebae7b2daad0840bd14cee3864f5780f20a7 Mon Sep 17 00:00:00 2001 From: idky137 Date: Wed, 31 Jul 2024 21:19:11 +0100 Subject: [PATCH 01/18] added queue::ingestor and bones of queue::worker --- Cargo.lock | 2 + zingo-proxyd/Cargo.toml | 2 + zingo-proxyd/src/nym_server.rs | 13 +- zingo-proxyd/src/proxy.rs | 20 ++- zingo-proxyd/src/server.rs | 79 +++++++++-- zingo-rpc/src/jsonrpc/connector.rs | 1 + zingo-rpc/src/nym/client.rs | 31 +++-- zingo-rpc/src/nym/error.rs | 14 +- zingo-rpc/src/primitives/client.rs | 4 - zingo-rpc/src/queue.rs | 2 + zingo-rpc/src/queue/error.rs | 24 +++- zingo-rpc/src/queue/ingestor.rs | 190 ++++++++++++++++++++++++++ zingo-rpc/src/queue/manager.rs | 0 zingo-rpc/src/queue/request.rs | 143 ++++++++----------- zingo-rpc/src/queue/worker.rs | 67 +++++++++ zingo-rpc/src/rpc/nymservice.rs | 6 +- zingo-rpc/src/rpc/nymwalletservice.rs | 19 ++- zingo-rpc/src/walletrpc/service.rs | 14 +- 18 files changed, 487 insertions(+), 144 deletions(-) create mode 100644 zingo-rpc/src/queue/ingestor.rs create mode 100644 zingo-rpc/src/queue/manager.rs create mode 100644 zingo-rpc/src/queue/worker.rs diff --git a/Cargo.lock b/Cargo.lock index 60a4fd5..82a2168 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -7195,11 +7195,13 @@ version = "0.1.0" dependencies = [ "ctrlc", "http", + "hyper", "nym-bin-common", "nym-sdk", "nym-sphinx-anonymous-replies", "tokio", "tonic", + "tower", "zcash_client_backend", "zingo-netutils", "zingo-rpc", diff --git a/zingo-proxyd/Cargo.toml b/zingo-proxyd/Cargo.toml index 385250c..42636e5 100644 --- a/zingo-proxyd/Cargo.toml +++ b/zingo-proxyd/Cargo.toml @@ -41,4 +41,6 @@ tonic = { workspace = true } http = { workspace = true } # Miscellaneous Crate +tower = { version = "0.4.13" } +hyper = { version = "0.14.28", features = ["full"] } # { version = "1.4.1", features = ["full"] } ctrlc = "3.2.1" # "3.4.4" diff --git a/zingo-proxyd/src/nym_server.rs b/zingo-proxyd/src/nym_server.rs index 6b8c92c..b4cc3f5 100644 --- a/zingo-proxyd/src/nym_server.rs +++ b/zingo-proxyd/src/nym_server.rs @@ -13,8 +13,7 @@ use nym_sdk::mixnet::{MixnetMessageSender, ReconstructedMessage}; use nym_sphinx_anonymous_replies::requests::AnonymousSenderTag; use zingo_rpc::{ - primitives::client::{NymClient, ProxyClient}, - queue::request::ZingoProxyRequest, + nym::client::NymClient, primitives::client::ProxyClient, queue::request::ZingoProxyRequest, }; /// Wrapper struct for a Nym client. @@ -55,7 +54,7 @@ impl NymServer { }; while self.online.load(Ordering::SeqCst) { // --- wait for request. - while let Some(request_nym) = self.nym_client.0.wait_for_messages().await { + while let Some(request_nym) = self.nym_client.client.wait_for_messages().await { if request_nym.is_empty() { interval.tick().await; if !self.online.load(Ordering::SeqCst) { @@ -105,7 +104,7 @@ impl NymServer { // --- send response self.nym_client - .0 + .client .send_reply(return_recipient, response) .await .unwrap(); @@ -117,9 +116,9 @@ impl NymServer { } /// Returns a new NymServer Inatanse - pub async fn new(nym_conf_path: &str, online: Arc) -> Self { - let nym_client = NymClient::nym_spawn(nym_conf_path).await.unwrap(); - let nym_addr = nym_client.0.nym_address().to_string(); + pub async fn spawn(nym_conf_path: &str, online: Arc) -> Self { + let nym_client = NymClient::spawn(nym_conf_path).await.unwrap(); + let nym_addr = nym_client.client.nym_address().to_string(); NymServer { nym_client, nym_addr, diff --git a/zingo-proxyd/src/proxy.rs b/zingo-proxyd/src/proxy.rs index 2b07d37..3b8442e 100644 --- a/zingo-proxyd/src/proxy.rs +++ b/zingo-proxyd/src/proxy.rs @@ -13,6 +13,24 @@ use std::sync::atomic::{AtomicBool, Ordering}; use std::sync::Arc; use tokio::task::JoinHandle; +// /// Holds configuration data for ZingoProxyD. +// pub struct ProxyConfig { +// proxy_port: u16, +// zebrad_port: u16, +// nym_conf_path: String, +// max_queue_size: usize, +// max_workers: usize, +// max_cache_mem: u16, +// } + +// pub struct Proxy { +// grpc_server: GrpcServer,* +// nym_server: NymServer,* +// state_engine,* +// queue_manager,* +// config: ProxyConfig, +// } + /// Launches test Zingo_Proxy server. pub async fn spawn_proxy( proxy_port: &u16, @@ -58,7 +76,7 @@ pub async fn spawn_proxy( // let nym_server: NymServer = NymServer(NymClient::nym_spawn(nym_conf_path).await); // nym_addr_out = Some(nym_server.0 .0.nym_address().to_string()); // let nym_proxy_handle = nym_server.serve(online).await; - let nym_server = NymServer::new(nym_conf_path, online).await; + let nym_server = NymServer::spawn(nym_conf_path, online).await; nym_addr_out = Some(nym_server.nym_addr.clone()); let nym_proxy_handle = nym_server.serve().await; diff --git a/zingo-proxyd/src/server.rs b/zingo-proxyd/src/server.rs index 17ed5ca..7233877 100644 --- a/zingo-proxyd/src/server.rs +++ b/zingo-proxyd/src/server.rs @@ -3,14 +3,26 @@ //! TODO: - Add GrpcServerError error type and rewrite functions to return >, propagating internal errors. //! - Add user and password as fields of ProxyClient and use here. +// use http::Uri; +// use std::{ +// net::{Ipv4Addr, SocketAddr}, +// sync::{ +// atomic::{AtomicBool, Ordering}, +// Arc, +// }, +// }; + use http::Uri; -use std::{ - net::{Ipv4Addr, SocketAddr}, - sync::{ - atomic::{AtomicBool, Ordering}, - Arc, - }, +use std::net::{Ipv4Addr, SocketAddr}; +use std::sync::{ + atomic::{AtomicBool, Ordering}, + Arc, }; +use std::task::{Context, Poll}; +use tonic::codegen::{BoxFuture, StdError}; +use tonic::transport::NamedService; +use tower::Service; + use zingo_rpc::{jsonrpc::connector::test_node_and_return_uri, primitives::client::ProxyClient}; #[cfg(not(feature = "nym_poc"))] @@ -20,9 +32,9 @@ use zingo_rpc::proto::service::compact_tx_streamer_server::CompactTxStreamerServ use zcash_client_backend::proto::service::compact_tx_streamer_server::CompactTxStreamerServer; /// Configuration data for gRPC server. -pub struct ProxyServer(pub ProxyClient); +pub struct GrpcServer(pub ProxyClient); -impl ProxyServer { +impl GrpcServer { /// Starts gRPC service. pub fn serve( self, @@ -31,11 +43,13 @@ impl ProxyServer { ) -> tokio::task::JoinHandle> { tokio::task::spawn(async move { let svc = CompactTxStreamerServer::new(self.0); + let logging_svc = LoggingService::new(svc); + let sockaddr = SocketAddr::new(std::net::IpAddr::V4(Ipv4Addr::LOCALHOST), port.into()); println!("@zingoproxyd: gRPC server listening on: {sockaddr}"); let server = tonic::transport::Server::builder() - .add_service(svc.clone()) + .add_service(logging_svc.clone()) .serve(sockaddr); let mut interval = tokio::time::interval(tokio::time::Duration::from_millis(500)); @@ -68,7 +82,7 @@ impl ProxyServer { /// Creates configuration data for gRPC server. pub fn new(lightwalletd_uri: http::Uri, zebrad_uri: http::Uri) -> Self { - ProxyServer(ProxyClient { + GrpcServer(ProxyClient { lightwalletd_uri, zebrad_uri, online: Arc::new(AtomicBool::new(true)), @@ -99,6 +113,49 @@ pub async fn spawn_grpc_server( .await .unwrap(); - let server = ProxyServer::new(lwd_uri, zebra_uri); + let server = GrpcServer::new(lwd_uri, zebra_uri); server.serve(*proxy_port, online) } + +#[derive(Clone)] +struct LoggingService { + inner: T, +} + +impl LoggingService { + pub fn new(inner: T) -> Self { + Self { inner } + } +} + +impl Service> for LoggingService +where + T: Service, Response = http::Response> + Send + 'static, + B: Send + 'static + std::fmt::Debug, + T::Error: Into + Send + 'static, + T::Future: Send + 'static, +{ + type Response = T::Response; + type Error = T::Error; + type Future = BoxFuture; + + fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll> { + self.inner.poll_ready(cx) + } + + fn call(&mut self, req: http::Request) -> Self::Future { + println!("Received request: {:?}", req); + let fut = self.inner.call(req); + Box::pin(async move { + let res = fut.await?; + Ok(res) + }) + } +} + +impl NamedService for LoggingService +where + T: NamedService, +{ + const NAME: &'static str = T::NAME; +} diff --git a/zingo-rpc/src/jsonrpc/connector.rs b/zingo-rpc/src/jsonrpc/connector.rs index 8a9ce22..b440bec 100644 --- a/zingo-rpc/src/jsonrpc/connector.rs +++ b/zingo-rpc/src/jsonrpc/connector.rs @@ -42,6 +42,7 @@ struct RpcError { } /// JsonRPC Client config data. +#[derive(Debug)] pub struct JsonRpcConnector { uri: http::Uri, id_counter: AtomicI32, diff --git a/zingo-rpc/src/nym/client.rs b/zingo-rpc/src/nym/client.rs index 6a8e458..4dae03e 100644 --- a/zingo-rpc/src/nym/client.rs +++ b/zingo-rpc/src/nym/client.rs @@ -3,15 +3,24 @@ //! TODO: - Add NymClientError error type and rewrite functions to return >. use nym_sdk::mixnet::{ - MixnetClientBuilder, MixnetMessageSender, Recipient, ReconstructedMessage, StoragePaths, + MixnetClient, MixnetClientBuilder, MixnetMessageSender, Recipient, ReconstructedMessage, + StoragePaths, }; use std::path::PathBuf; -use crate::{nym::error::NymError, primitives::client::NymClient}; +use crate::nym::error::NymError; + +/// Wrapper struct for a Nym client. +pub struct NymClient { + /// Nym SDK Client. + pub client: MixnetClient, + /// Nym client address. + pub addr: String, +} impl NymClient { /// Spawns a nym client and connects to the mixnet. - pub async fn nym_spawn(str_path: &str) -> Result { + pub async fn spawn(str_path: &str) -> Result { //nym_bin_common::logging::setup_logging(); let client = MixnetClientBuilder::new_with_default_storage(StoragePaths::new_from_dir( PathBuf::from(str_path), @@ -21,16 +30,16 @@ impl NymClient { .connect_to_mixnet() .await?; - let nym_addr = client.nym_address().to_string(); - println!("@zingoproxyd[nym]: Nym server listening on: {nym_addr}."); + let addr = client.nym_address().to_string(); + println!("@zingoproxyd[nym]: Nym server listening on: {addr}."); - Ok(Self(client)) + Ok(Self { client, addr }) } /// Forwards an encoded gRPC request over the nym mixnet to the nym address specified and waits for the response. /// /// TODO: Add timout for waiting for response. - pub async fn nym_forward( + pub async fn send( &mut self, recipient_address: &str, message: Vec, @@ -38,10 +47,10 @@ impl NymClient { // Box> { let recipient: Recipient = Recipient::try_from_base58_string(recipient_address.to_string())?; - self.0.send_plain_message(recipient, message).await?; + self.client.send_plain_message(recipient, message).await?; let mut nym_response: Vec = Vec::new(); - while let Some(response_in) = self.0.wait_for_messages().await { + while let Some(response_in) = self.client.wait_for_messages().await { if response_in.is_empty() { continue; } @@ -58,7 +67,7 @@ impl NymClient { } /// Closes the nym client. - pub async fn nym_close(self) { - self.0.disconnect().await; + pub async fn close(self) { + self.client.disconnect().await; } } diff --git a/zingo-rpc/src/nym/error.rs b/zingo-rpc/src/nym/error.rs index a609530..a6fa214 100644 --- a/zingo-rpc/src/nym/error.rs +++ b/zingo-rpc/src/nym/error.rs @@ -14,11 +14,17 @@ pub enum NymError { #[error("Nym-SDK Error: {0}")] NymError(#[from] nym_sdk::Error), /// Nym address formatting errors. - #[error("Nym Recipient Formatting Error Error: {0}")] + #[error("Nym Recipient Formatting Error: {0}")] RecipientFormattingError(#[from] nym_sphinx_addressing::clients::RecipientFormattingError), /// Mixnet connection error. #[error("Connection Error: {0}")] ConnectionError(String), + /// Custom error for empty messages received from the Nym network. + #[error("Empty message received from the mixnet")] + EmptyMessageError, + /// Custom error for receiveing not AnonSenderTag (surb) from the Nym network. + #[error("No AnonSenderTag received from the mixnet")] + EmptyRecipientTagError, } impl From for tonic::Status { @@ -32,6 +38,12 @@ impl From for tonic::Status { NymError::ConnectionError(e) => { tonic::Status::internal(format!("Connection error: {}", e)) } + NymError::EmptyMessageError => { + tonic::Status::internal(format!("Empty message received from nym mixnet")) + } + NymError::EmptyRecipientTagError => { + tonic::Status::internal(format!("No AnonSenderTag received from nym mixnet")) + } } } } diff --git a/zingo-rpc/src/primitives/client.rs b/zingo-rpc/src/primitives/client.rs index 6ad8e7a..7e36b6a 100644 --- a/zingo-rpc/src/primitives/client.rs +++ b/zingo-rpc/src/primitives/client.rs @@ -1,6 +1,5 @@ //! Holds primitive structs for ZingoProxy network clients. -use nym_sdk::mixnet::MixnetClient; use std::sync::{atomic::AtomicBool, Arc}; /// Configuration data for gRPC server. @@ -13,6 +12,3 @@ pub struct ProxyClient { /// Represents the Online status of the gRPC server. pub online: Arc, } - -/// Wrapper struct for a Nym client. -pub struct NymClient(pub MixnetClient); diff --git a/zingo-rpc/src/queue.rs b/zingo-rpc/src/queue.rs index 6c82246..6619c51 100644 --- a/zingo-rpc/src/queue.rs +++ b/zingo-rpc/src/queue.rs @@ -1,4 +1,6 @@ //! Zingo-Proxy request queue. pub mod error; +pub mod ingestor; pub mod request; +pub mod worker; diff --git a/zingo-rpc/src/queue/error.rs b/zingo-rpc/src/queue/error.rs index 697eb62..b5505ac 100644 --- a/zingo-rpc/src/queue/error.rs +++ b/zingo-rpc/src/queue/error.rs @@ -1,6 +1,9 @@ //! Hold error types for the queue and related functionality. -use crate::nym::error::NymError; +use std::io; +use tokio::sync::mpsc::error::TrySendError; + +use crate::{nym::error::NymError, queue::request::ZingoProxyRequest}; /// Zingo-Proxy request errors. #[derive(Debug, thiserror::Error)] @@ -15,3 +18,22 @@ pub enum RequestError { #[error("Nym error: {0}")] NymError(#[from] NymError), } + +/// Zingo-Proxy ingestor errors. +#[derive(Debug, thiserror::Error)] +pub enum IngestorError { + /// Request based errors. + #[error("Request error: {0}")] + RequestError(#[from] RequestError), + /// Nym based errors. + #[error("Nym error: {0}")] + NymError(#[from] NymError), + /// Tcp listener based error. + #[error("Failed to accept TcpStream: {0}")] + ClientConnectionError(#[from] io::Error), + /// Error from failing to send new request to the queue. + #[error("Failed to send request to the queue: {0}")] + QueuePushError(#[from] TrySendError), +} + +// WorkerError. diff --git a/zingo-rpc/src/queue/ingestor.rs b/zingo-rpc/src/queue/ingestor.rs new file mode 100644 index 0000000..f2ad16d --- /dev/null +++ b/zingo-rpc/src/queue/ingestor.rs @@ -0,0 +1,190 @@ +//! Holds the ingestor (listener) implementations. + +use std::{ + net::SocketAddr, + sync::{ + atomic::{AtomicBool, Ordering}, + Arc, + }, +}; +use tokio::{net::TcpListener, sync::mpsc}; + +use crate::{ + nym::{client::NymClient, error::NymError}, + queue::{error::IngestorError, request::ZingoProxyRequest}, +}; + +/// Status of the worker. +/// +/// TODO: Add duration to each variant. +#[derive(Debug, Clone)] +pub enum IngestorStatus { + /// On hold, due to blockcache / node error. + Inactive, + /// Processing requests from the queue. + Listening, +} + +/// Configuration data for gRPC server. +pub struct TcpIngestor { + /// Tcp Listener. + pub ingestor: TcpListener, + /// Used to send requests to the queue. + pub queue: mpsc::Sender, + /// Represents the Online status of the gRPC server. + pub online: Arc, + /// Current status of the ingestor. + pub status: IngestorStatus, +} + +impl TcpIngestor { + /// Creates a Tcp Ingestor. + pub async fn spawn( + listen_addr: SocketAddr, + queue: mpsc::Sender, + online: Arc, + ) -> Result { + let listener = TcpListener::bind(listen_addr).await?; + Ok(TcpIngestor { + ingestor: listener, + queue, + online, + status: IngestorStatus::Inactive, + }) + } + + /// Starts Tcp service. + pub fn serve(mut self) -> tokio::task::JoinHandle> { + tokio::task::spawn(async move { + // NOTE: This interval may need to be reduced or removed / moved once scale testing begins. + let mut interval = tokio::time::interval(tokio::time::Duration::from_millis(50)); + // TODO Check self.status and wait on server / node if on hold. + self.status = IngestorStatus::Listening; + loop { + tokio::select! { + _ = interval.tick() => { + if !self.check_online() { + println!("Tcp ingestor shutting down."); + return Ok(()); + } + } + incoming = self.ingestor.accept() => { + match incoming { + Ok((stream, _)) => { + if !self.check_online() { + println!("Tcp ingestor shutting down."); + return Ok(()); + } + if let Err(e) = self.queue.send(ZingoProxyRequest::new_from_grpc(stream)).await { + // TODO:: Return queue full tonic status over tcpstream and close (that TcpStream..). + eprintln!("Failed to send connection: {}", e); + } + } + Err(e) => { + // TODO: Handle error here (count errors and restart ingestor / proxy or initiate shotdown?) + eprintln!("Failed to accept connection with client: {}", e); + if !self.check_online() { + println!("Tcp ingestor shutting down."); + return Ok(()); + } + continue; + } + } + } + } + } + }) + } + + fn check_online(&self) -> bool { + self.online.load(Ordering::SeqCst) + } +} + +/// Wrapper struct for a Nym client. +pub struct NymIngestor { + /// Nym Client + pub ingestor: NymClient, + /// Used to send requests to the queue. + pub queue: mpsc::Sender, + /// Represents the Online status of the gRPC server. + pub online: Arc, + /// Current status of the ingestor. + pub status: IngestorStatus, +} + +impl NymIngestor { + /// Creates a Nym Ingestor + pub async fn spawn( + nym_conf_path: &str, + queue: mpsc::Sender, + online: Arc, + ) -> Result { + let listener = NymClient::spawn(nym_conf_path).await?; + Ok(NymIngestor { + ingestor: listener, + queue, + online, + status: IngestorStatus::Inactive, + }) + } + + /// Starts Nym service. + pub async fn serve(mut self) -> tokio::task::JoinHandle> { + tokio::task::spawn(async move { + // NOTE: This interval may need to be reduced or removed / moved once scale testing begins. + let mut interval = tokio::time::interval(tokio::time::Duration::from_millis(50)); + // TODO Check self.status and wait on server / node if on hold. + self.status = IngestorStatus::Listening; + + loop { + tokio::select! { + _ = interval.tick() => { + if !self.check_online() { + println!("Nym ingestor shutting down."); + return Ok(()); + } + } + incoming = self.ingestor.client.wait_for_messages() => { + match incoming { + Some(request) => { + if !self.check_online() { + println!("Nym ingestor shutting down."); + return Ok(()); + } + // NOTE / TODO: POC server checked for empty emssages here (if request.is_empty()..). Could be required here. + // TODO: Handle EmptyMessageError here. + let request_vu8 = request + .first() + .map(|r| r.message.clone()) + .ok_or_else(|| IngestorError::NymError(NymError::EmptyMessageError))?; + // TODO: Handle EmptyRecipientTagError here. + let return_recipient = request[0] + .sender_tag + .ok_or_else(|| IngestorError::NymError(NymError::EmptyRecipientTagError))?; + // TODO: Handle RequestError here. + let zingo_proxy_request = + ZingoProxyRequest::new_from_nym(return_recipient, request_vu8.as_ref())?; + if let Err(e) = self.queue.send(zingo_proxy_request).await { + // TODO: Return queue full tonic status over nym mixnet. + eprintln!("Failed to send connection: {}", e); + } + } + None => { + eprintln!("Failed to receive message from Nym network."); + if !self.online.load(Ordering::SeqCst) { + println!("Nym ingestor shutting down."); + return Ok(()); + } + } + } + } + } + } + }) + } + + fn check_online(&self) -> bool { + self.online.load(Ordering::SeqCst) + } +} diff --git a/zingo-rpc/src/queue/manager.rs b/zingo-rpc/src/queue/manager.rs new file mode 100644 index 0000000..e69de29 diff --git a/zingo-rpc/src/queue/request.rs b/zingo-rpc/src/queue/request.rs index 9a3a8e6..92f8f17 100644 --- a/zingo-rpc/src/queue/request.rs +++ b/zingo-rpc/src/queue/request.rs @@ -3,12 +3,12 @@ use std::time::SystemTime; use nym_sphinx_anonymous_replies::requests::AnonymousSenderTag; -use tonic::metadata::MetadataMap; +use tokio::net::TcpStream; use crate::{nym::utils::read_nym_request_data, queue::error::RequestError}; /// Requests queuing metadata. -#[derive(Debug)] +#[derive(Debug, Clone)] struct QueueData { // / Exclusive request id. // request_id: u64, // TODO: implement with request queue (implement exlusive request_id generator in queue object). @@ -43,68 +43,74 @@ impl QueueData { } } -/// Requests metadata either contains a return address for nym requests or a tonic MetaDataMap for gRPC requests. +/// Nym request data. #[derive(Debug, Clone)] -pub enum RequestMetaData { - /// Return address for Nym requests. - AnonSendrTag(AnonymousSenderTag), - /// Metadata for gRPC requests. - MetaDataMap(MetadataMap), +pub struct NymRequest { + id: u64, + method: String, + metadata: AnonymousSenderTag, + body: Vec, } -impl TryFrom for AnonymousSenderTag { - type Error = RequestError; +impl NymRequest { + /// Returns the client assigned id for this request, only used to construct response. + pub fn client_id(&self) -> u64 { + self.id + } - fn try_from(value: RequestMetaData) -> Result { - match value { - RequestMetaData::AnonSendrTag(tag) => Ok(tag), - _ => Err(RequestError::IncorrectVariant), - } + /// Returns the RPC being called by the request. + pub fn method(&self) -> String { + self.method.clone() } -} -impl TryFrom for MetadataMap { - type Error = RequestError; + /// Returns request metadata including sender data. + pub fn metadata(&self) -> AnonymousSenderTag { + self.metadata + } - fn try_from(value: RequestMetaData) -> Result { - match value { - RequestMetaData::MetaDataMap(map) => Ok(map), - _ => Err(RequestError::IncorrectVariant), - } + /// Returns the request body. + pub fn body(&self) -> Vec { + self.body.clone() } } -/// Nym request data. +/// TcpStream holing an incoming gRPC request. #[derive(Debug)] -struct NymRequest { - id: u64, - method: String, - metadata: RequestMetaData, - body: Vec, -} +pub struct TcpRequest(TcpStream); -/// Grpc request data. -/// TODO: Convert incoming gRPC calls to GrpcRequest before adding to queue (implement with request queue). -#[derive(Debug)] -struct GrpcRequest { - id: u64, - method: String, - metadata: RequestMetaData, - body: Vec, +impl TcpRequest { + /// Returns the underlying TcpStream help by the request + pub fn get_stream(self) -> TcpStream { + self.0 + } } /// Requests originating from the Nym server. -#[derive(Debug)] +#[derive(Debug, Clone)] pub struct NymServerRequest { queuedata: QueueData, request: NymRequest, } -/// Requests originating from the gRPC server. +impl NymServerRequest { + /// Returns the underlying request. + pub fn get_request(&self) -> NymRequest { + self.request.clone() + } +} + +/// Requests originating from the Tcp server. #[derive(Debug)] -pub struct GrpcServerRequest { +pub struct TcpServerRequest { queuedata: QueueData, - request: GrpcRequest, + request: TcpRequest, +} + +impl TcpServerRequest { + /// Returns the underlying request. + pub fn get_request(self) -> TcpRequest { + self.request + } } /// Zingo-Proxy request, used by request queue. @@ -113,7 +119,7 @@ pub enum ZingoProxyRequest { /// Requests originating from the Nym server. NymServerRequest(NymServerRequest), /// Requests originating from the gRPC server. - GrpcServerRequest(GrpcServerRequest), + TcpServerRequest(TcpServerRequest), } impl ZingoProxyRequest { @@ -125,7 +131,7 @@ impl ZingoProxyRequest { request: NymRequest { id, method, - metadata: RequestMetaData::AnonSendrTag(metadata), + metadata, body: body.to_vec(), }, })) @@ -134,15 +140,10 @@ impl ZingoProxyRequest { /// Creates a ZingoProxyRequest from a gRPC service call, recieved by the gRPC server. /// /// TODO: implement proper functionality along with queue. - pub fn new_from_grpc(metadata: MetadataMap, bytes: &[u8]) -> Self { - ZingoProxyRequest::GrpcServerRequest(GrpcServerRequest { + pub fn new_from_grpc(stream: TcpStream) -> Self { + ZingoProxyRequest::TcpServerRequest(TcpServerRequest { queuedata: QueueData::new(), - request: GrpcRequest { - id: 0, // TODO - method: "TODO".to_string(), // TODO - metadata: RequestMetaData::MetaDataMap(metadata), - body: bytes.to_vec(), - }, + request: TcpRequest(stream), }) } @@ -150,7 +151,7 @@ impl ZingoProxyRequest { pub fn increase_requeues(&mut self) { match self { ZingoProxyRequest::NymServerRequest(ref mut req) => req.queuedata.increase_requeues(), - ZingoProxyRequest::GrpcServerRequest(ref mut req) => req.queuedata.increase_requeues(), + ZingoProxyRequest::TcpServerRequest(ref mut req) => req.queuedata.increase_requeues(), } } @@ -158,7 +159,7 @@ impl ZingoProxyRequest { pub fn duration(&self) -> Result { match self { ZingoProxyRequest::NymServerRequest(ref req) => req.queuedata.duration(), - ZingoProxyRequest::GrpcServerRequest(ref req) => req.queuedata.duration(), + ZingoProxyRequest::TcpServerRequest(ref req) => req.queuedata.duration(), } } @@ -166,39 +167,7 @@ impl ZingoProxyRequest { pub fn requeues(&self) -> u32 { match self { ZingoProxyRequest::NymServerRequest(ref req) => req.queuedata.requeues(), - ZingoProxyRequest::GrpcServerRequest(ref req) => req.queuedata.requeues(), - } - } - - /// Returns the client assigned id for this request, only used to construct response. - pub fn client_id(&self) -> u64 { - match self { - ZingoProxyRequest::NymServerRequest(ref req) => req.request.id, - ZingoProxyRequest::GrpcServerRequest(ref req) => req.request.id, - } - } - - /// Returns the RPC being called by the request. - pub fn method(&self) -> String { - match self { - ZingoProxyRequest::NymServerRequest(ref req) => req.request.method.clone(), - ZingoProxyRequest::GrpcServerRequest(ref req) => req.request.method.clone(), - } - } - - /// Returns request metadata including sender data. - pub fn metadata(&self) -> RequestMetaData { - match self { - ZingoProxyRequest::NymServerRequest(ref req) => req.request.metadata.clone(), - ZingoProxyRequest::GrpcServerRequest(ref req) => req.request.metadata.clone(), - } - } - - /// Returns the number of times the request has been requeued. - pub fn body(&self) -> Vec { - match self { - ZingoProxyRequest::NymServerRequest(ref req) => req.request.body.clone(), - ZingoProxyRequest::GrpcServerRequest(ref req) => req.request.body.clone(), + ZingoProxyRequest::TcpServerRequest(ref req) => req.queuedata.requeues(), } } } diff --git a/zingo-rpc/src/queue/worker.rs b/zingo-rpc/src/queue/worker.rs new file mode 100644 index 0000000..1f2f512 --- /dev/null +++ b/zingo-rpc/src/queue/worker.rs @@ -0,0 +1,67 @@ +//! Holds the queue worker implementation. + +use tokio::sync::mpsc; + +use super::request::ZingoProxyRequest; + +/// Status of the worker. +/// +/// TODO: Add duration to each variant. +#[derive(Debug, Clone)] +pub enum WorkerStatus { + /// Running initial startup routine. + Spawning, + /// Processing requests from the queue. + Working, + /// Waiting for requests from the queue. + Standby, + /// Running shutdown routine. + Closing, +} + +/// A queue working is the entity that takes requests from the queue and processes them. +/// +/// TODO: - Add JsonRpcConnector to worker and use by RPC services. +/// - Currently a new JsonRpcConnector is spawned for every RPC serviced. +#[derive(Debug)] +pub struct Worker { + /// Worker ID. + worker_id: u16, + /// Workers current status. + status: WorkerStatus, + /// Used to pop requests from the queue + queue_receiver: mpsc::Receiver, + /// Used to requeue requests. + queue_sender: mpsc::Sender, + // /// Nym Client used to return responses for requests received over nym. + // nym_client: + // /// Tonic server used for processing requests received over http. + // grpc_client: +} + +impl Worker { + /// Creates a new queue worker. + pub async fn new() -> Self { + todo!() + } + + /// Starts queue workers service routine. + pub async fn serve(&self) { + todo!() + } + + /// Ends the worker. + pub async fn shutdown(self) { + todo!() + } + + /// Returns the worker's ID. + pub fn id(&self) -> u16 { + self.worker_id + } + + /// Returns the workers current status. + pub fn status(&self) -> WorkerStatus { + self.status.clone() + } +} diff --git a/zingo-rpc/src/rpc/nymservice.rs b/zingo-rpc/src/rpc/nymservice.rs index b9b3f17..be892bd 100644 --- a/zingo-rpc/src/rpc/nymservice.rs +++ b/zingo-rpc/src/rpc/nymservice.rs @@ -17,8 +17,8 @@ impl ProxyClient { request: &ZingoProxyRequest, ) -> Result, tonic::Status> { match request { - ZingoProxyRequest::NymServerRequest(_) => match request.method().as_str() { - "GetLightdInfo" => match prost::Message::decode(&request.body()[..]) { + ZingoProxyRequest::NymServerRequest(request) => match request.get_request().method().as_str() { + "GetLightdInfo" => match prost::Message::decode(&request.get_request().body()[..]) { Ok(input) => { let tonic_request = tonic::Request::new(input); let tonic_response = self.get_lightd_info(tonic_request) @@ -38,7 +38,7 @@ impl ProxyClient { e ))), }, - "SendTransaction" => match prost::Message::decode(&request.body()[..]) { + "SendTransaction" => match prost::Message::decode(&request.get_request().body()[..]) { Ok(input) => { let tonic_request = tonic::Request::new(input); let tonic_response = self.send_transaction(tonic_request) diff --git a/zingo-rpc/src/rpc/nymwalletservice.rs b/zingo-rpc/src/rpc/nymwalletservice.rs index c1c7c97..bc9b80f 100644 --- a/zingo-rpc/src/rpc/nymwalletservice.rs +++ b/zingo-rpc/src/rpc/nymwalletservice.rs @@ -6,7 +6,8 @@ use std::env; use tonic::{async_trait, Request, Response, Status}; use crate::{ - primitives::client::{NymClient, ProxyClient}, + nym::client::NymClient, + primitives::client::ProxyClient, walletrpc::utils::{deserialize_response, serialize_request, write_nym_request_data}, }; use zcash_client_backend::proto::{ @@ -119,11 +120,9 @@ impl CompactTxStreamer for ProxyClient { let args: Vec = env::args().collect(); let recipient_address: String = args[1].clone(); let nym_conf_path = "/tmp/nym_client"; - let mut client = NymClient::nym_spawn(nym_conf_path).await?; - let response_data = client - .nym_forward(recipient_address.as_str(), nym_request) - .await?; - client.nym_close().await; + let mut client = NymClient::spawn(nym_conf_path).await?; + let response_data = client.send(recipient_address.as_str(), nym_request).await?; + client.close().await; // -- deserialize SendResponse let response: SendResponse = match deserialize_response(response_data.as_slice()).await { Ok(res) => res, @@ -257,11 +256,9 @@ impl CompactTxStreamer for ProxyClient { let args: Vec = env::args().collect(); let recipient_address: String = args[1].clone(); let nym_conf_path = "/tmp/nym_client"; - let mut client = NymClient::nym_spawn(nym_conf_path).await?; - let response_data = client - .nym_forward(recipient_address.as_str(), nym_request) - .await?; - client.nym_close().await; + let mut client = NymClient::spawn(nym_conf_path).await?; + let response_data = client.send(recipient_address.as_str(), nym_request).await?; + client.close().await; // -- deserialize LightdInfo let response: LightdInfo = match deserialize_response(response_data.as_slice()).await { Ok(res) => res, diff --git a/zingo-rpc/src/walletrpc/service.rs b/zingo-rpc/src/walletrpc/service.rs index d012809..f5ba8da 100644 --- a/zingo-rpc/src/walletrpc/service.rs +++ b/zingo-rpc/src/walletrpc/service.rs @@ -10,7 +10,7 @@ use tonic::{self, codec::CompressionEncoding, Status}; use tonic::{service::interceptor::InterceptedService, transport::Endpoint}; use crate::{ - primitives::client::NymClient, + nym::client::NymClient, proto::{ compact_formats::{CompactBlock, CompactTx}, service::{ @@ -238,9 +238,9 @@ where } }; let nym_conf_path = "/tmp/nym_client"; - let mut client = NymClient::nym_spawn(nym_conf_path).await?; - let response_data = client.nym_forward(addr, nym_request).await?; - client.nym_close().await; + let mut client = NymClient::spawn(nym_conf_path).await?; + let response_data = client.send(addr, nym_request).await?; + client.close().await; let response: SendResponse = match deserialize_response(response_data.as_slice()).await { Ok(res) => res, @@ -426,9 +426,9 @@ where } }; let nym_conf_path = "/tmp/nym_client"; - let mut client = NymClient::nym_spawn(nym_conf_path).await?; - let response_data = client.nym_forward(addr, nym_request).await?; - client.nym_close().await; + let mut client = NymClient::spawn(nym_conf_path).await?; + let response_data = client.send(addr, nym_request).await?; + client.close().await; let response: LightdInfo = match deserialize_response(response_data.as_slice()).await { Ok(res) => res, From ee63c3949ed8fe61566de78f0fb4cd8b1c711794 Mon Sep 17 00:00:00 2001 From: idky137 Date: Thu, 1 Aug 2024 16:48:02 +0100 Subject: [PATCH 02/18] started adding worker --- zingo-proxyd/src/nym_server.rs | 6 +- zingo-proxyd/src/server.rs | 6 +- zingo-rpc/src/primitives/client.rs | 13 -- zingo-rpc/src/queue.rs | 2 +- zingo-rpc/src/queue/error.rs | 4 +- zingo-rpc/src/queue/ingestor.rs | 16 +-- zingo-rpc/src/queue/worker.rs | 175 ++++++++++++++++++++++---- zingo-rpc/src/rpc.rs | 14 +++ zingo-rpc/src/rpc/nymservice.rs | 4 +- zingo-rpc/src/rpc/nymwalletservice.rs | 4 +- zingo-rpc/src/rpc/service.rs | 4 +- 11 files changed, 188 insertions(+), 60 deletions(-) diff --git a/zingo-proxyd/src/nym_server.rs b/zingo-proxyd/src/nym_server.rs index b4cc3f5..d1f9846 100644 --- a/zingo-proxyd/src/nym_server.rs +++ b/zingo-proxyd/src/nym_server.rs @@ -12,9 +12,7 @@ use std::sync::{ use nym_sdk::mixnet::{MixnetMessageSender, ReconstructedMessage}; use nym_sphinx_anonymous_replies::requests::AnonymousSenderTag; -use zingo_rpc::{ - nym::client::NymClient, primitives::client::ProxyClient, queue::request::ZingoProxyRequest, -}; +use zingo_rpc::{nym::client::NymClient, queue::request::ZingoProxyRequest, rpc::GrpcClient}; /// Wrapper struct for a Nym client. pub struct NymServer { @@ -37,7 +35,7 @@ impl NymServer { // NOTE: the following should be removed with the addition of the queue and worker pool. let lwd_port = 8080; let zebrad_port = 18232; - let proxy_client = ProxyClient { + let proxy_client = GrpcClient { lightwalletd_uri: http::Uri::builder() .scheme("http") .authority(format!("localhost:{lwd_port}")) diff --git a/zingo-proxyd/src/server.rs b/zingo-proxyd/src/server.rs index 7233877..46b34f7 100644 --- a/zingo-proxyd/src/server.rs +++ b/zingo-proxyd/src/server.rs @@ -23,7 +23,7 @@ use tonic::codegen::{BoxFuture, StdError}; use tonic::transport::NamedService; use tower::Service; -use zingo_rpc::{jsonrpc::connector::test_node_and_return_uri, primitives::client::ProxyClient}; +use zingo_rpc::{jsonrpc::connector::test_node_and_return_uri, rpc::GrpcClient}; #[cfg(not(feature = "nym_poc"))] use zingo_rpc::proto::service::compact_tx_streamer_server::CompactTxStreamerServer; @@ -32,7 +32,7 @@ use zingo_rpc::proto::service::compact_tx_streamer_server::CompactTxStreamerServ use zcash_client_backend::proto::service::compact_tx_streamer_server::CompactTxStreamerServer; /// Configuration data for gRPC server. -pub struct GrpcServer(pub ProxyClient); +pub struct GrpcServer(pub GrpcClient); impl GrpcServer { /// Starts gRPC service. @@ -82,7 +82,7 @@ impl GrpcServer { /// Creates configuration data for gRPC server. pub fn new(lightwalletd_uri: http::Uri, zebrad_uri: http::Uri) -> Self { - GrpcServer(ProxyClient { + GrpcServer(GrpcClient { lightwalletd_uri, zebrad_uri, online: Arc::new(AtomicBool::new(true)), diff --git a/zingo-rpc/src/primitives/client.rs b/zingo-rpc/src/primitives/client.rs index 7e36b6a..8b13789 100644 --- a/zingo-rpc/src/primitives/client.rs +++ b/zingo-rpc/src/primitives/client.rs @@ -1,14 +1 @@ -//! Holds primitive structs for ZingoProxy network clients. -use std::sync::{atomic::AtomicBool, Arc}; - -/// Configuration data for gRPC server. -pub struct ProxyClient { - /// Lightwalletd uri. - /// Used by grpc_passthrough to pass on unimplemented RPCs. - pub lightwalletd_uri: http::Uri, - /// Zebrad uri. - pub zebrad_uri: http::Uri, - /// Represents the Online status of the gRPC server. - pub online: Arc, -} diff --git a/zingo-rpc/src/queue.rs b/zingo-rpc/src/queue.rs index 6619c51..b9b18bf 100644 --- a/zingo-rpc/src/queue.rs +++ b/zingo-rpc/src/queue.rs @@ -1,4 +1,4 @@ -//! Zingo-Proxy request queue. +//! Zingo-Proxy client server. pub mod error; pub mod ingestor; diff --git a/zingo-rpc/src/queue/error.rs b/zingo-rpc/src/queue/error.rs index b5505ac..fdd8668 100644 --- a/zingo-rpc/src/queue/error.rs +++ b/zingo-rpc/src/queue/error.rs @@ -36,4 +36,6 @@ pub enum IngestorError { QueuePushError(#[from] TrySendError), } -// WorkerError. +/// Zingo-Proxy worker errors. +#[derive(Debug, thiserror::Error)] +pub enum WorkerError {} diff --git a/zingo-rpc/src/queue/ingestor.rs b/zingo-rpc/src/queue/ingestor.rs index f2ad16d..13f7ff0 100644 --- a/zingo-rpc/src/queue/ingestor.rs +++ b/zingo-rpc/src/queue/ingestor.rs @@ -28,13 +28,13 @@ pub enum IngestorStatus { /// Configuration data for gRPC server. pub struct TcpIngestor { /// Tcp Listener. - pub ingestor: TcpListener, + ingestor: TcpListener, /// Used to send requests to the queue. - pub queue: mpsc::Sender, + queue: mpsc::Sender, /// Represents the Online status of the gRPC server. - pub online: Arc, + online: Arc, /// Current status of the ingestor. - pub status: IngestorStatus, + status: IngestorStatus, } impl TcpIngestor { @@ -104,13 +104,13 @@ impl TcpIngestor { /// Wrapper struct for a Nym client. pub struct NymIngestor { /// Nym Client - pub ingestor: NymClient, + ingestor: NymClient, /// Used to send requests to the queue. - pub queue: mpsc::Sender, + queue: mpsc::Sender, /// Represents the Online status of the gRPC server. - pub online: Arc, + online: Arc, /// Current status of the ingestor. - pub status: IngestorStatus, + status: IngestorStatus, } impl NymIngestor { diff --git a/zingo-rpc/src/queue/worker.rs b/zingo-rpc/src/queue/worker.rs index 1f2f512..92fbcab 100644 --- a/zingo-rpc/src/queue/worker.rs +++ b/zingo-rpc/src/queue/worker.rs @@ -1,14 +1,27 @@ //! Holds the queue worker implementation. -use tokio::sync::mpsc; +use std::sync::{ + atomic::{AtomicBool, Ordering}, + Arc, +}; -use super::request::ZingoProxyRequest; +use http::Uri; +use nym_sphinx_anonymous_replies::requests::AnonymousSenderTag; +use tokio::{ + sync::mpsc, + time::{Duration, Instant}, +}; +use tonic::transport::Server; + +use crate::{ + proto::service::compact_tx_streamer_server::CompactTxStreamerServer, + queue::{error::WorkerError, request::ZingoProxyRequest}, + rpc::GrpcClient, +}; /// Status of the worker. -/// -/// TODO: Add duration to each variant. -#[derive(Debug, Clone)] -pub enum WorkerStatus { +#[derive(Debug, Clone, Copy)] +pub enum StatusType { /// Running initial startup routine. Spawning, /// Processing requests from the queue. @@ -19,35 +32,144 @@ pub enum WorkerStatus { Closing, } +/// Wrapper for StatusType that also holds initiation time, used for standby monitoring. +#[derive(Debug, Clone)] +pub enum WorkerStatus { + /// Running initial startup routine. + Spawning(Instant), + /// Processing requests from the queue. + Working(Instant), + /// Waiting for requests from the queue. + Standby(Instant), + /// Running shutdown routine. + Closing(Instant), +} + +impl WorkerStatus { + /// Create a new status with the current timestamp. + pub fn new(status: StatusType) -> WorkerStatus { + match status { + StatusType::Spawning => WorkerStatus::Spawning(Instant::now()), + StatusType::Working => WorkerStatus::Working(Instant::now()), + StatusType::Standby => WorkerStatus::Standby(Instant::now()), + StatusType::Closing => WorkerStatus::Closing(Instant::now()), + } + } + + /// Return the current status type and the duration the worker has been in this status. + pub fn status(&self) -> (StatusType, Duration) { + match self { + WorkerStatus::Spawning(timestamp) => (StatusType::Spawning, timestamp.elapsed()), + WorkerStatus::Working(timestamp) => (StatusType::Working, timestamp.elapsed()), + WorkerStatus::Standby(timestamp) => (StatusType::Standby, timestamp.elapsed()), + WorkerStatus::Closing(timestamp) => (StatusType::Closing, timestamp.elapsed()), + } + } + + /// Update the status to a new one, resetting the timestamp. + pub fn set(&mut self, new_status: StatusType) { + *self = match new_status { + StatusType::Spawning => WorkerStatus::Spawning(Instant::now()), + StatusType::Working => WorkerStatus::Working(Instant::now()), + StatusType::Standby => WorkerStatus::Standby(Instant::now()), + StatusType::Closing => WorkerStatus::Closing(Instant::now()), + } + } +} + /// A queue working is the entity that takes requests from the queue and processes them. /// -/// TODO: - Add JsonRpcConnector to worker and use by RPC services. -/// - Currently a new JsonRpcConnector is spawned for every RPC serviced. +/// TODO: - Add JsonRpcConnector to worker and pass to underlying RPC services. +/// - Currently a new JsonRpcConnector is spawned for every new RPC serviced. #[derive(Debug)] pub struct Worker { /// Worker ID. - worker_id: u16, - /// Workers current status. - status: WorkerStatus, + worker_id: usize, /// Used to pop requests from the queue - queue_receiver: mpsc::Receiver, + queue: mpsc::Receiver, /// Used to requeue requests. - queue_sender: mpsc::Sender, - // /// Nym Client used to return responses for requests received over nym. - // nym_client: - // /// Tonic server used for processing requests received over http. - // grpc_client: + requeue: mpsc::Sender, + /// Used to send responses to the nym_responder. + nym_responder: mpsc::Sender<(Vec, AnonymousSenderTag)>, + /// gRPC client used for processing requests received over http. + grpc_client: GrpcClient, + /// Workers current status. + status: WorkerStatus, + /// Represents the Online status of the gRPC server. + pub online: Arc, } impl Worker { /// Creates a new queue worker. - pub async fn new() -> Self { - todo!() + pub async fn spawn( + worker_id: usize, + queue: mpsc::Receiver, + requeue: mpsc::Sender, + nym_responder: mpsc::Sender<(Vec, AnonymousSenderTag)>, + lightwalletd_uri: Uri, + zebrad_uri: Uri, + online: Arc, + ) -> Result { + let grpc_client = GrpcClient { + lightwalletd_uri, + zebrad_uri, + online: online.clone(), + }; + Ok(Worker { + worker_id, + queue, + requeue, + nym_responder, + grpc_client, + status: WorkerStatus::new(StatusType::Spawning), + online, + }) } /// Starts queue workers service routine. - pub async fn serve(&self) { - todo!() + pub async fn serve(mut self) -> tokio::task::JoinHandle> { + tokio::task::spawn(async move { + // NOTE: This interval may need to be reduced or removed / moved once scale testing begins. + let mut interval = tokio::time::interval(tokio::time::Duration::from_millis(50)); + let svc = CompactTxStreamerServer::new(self.grpc_client.clone()); + let mut grpc_server = Server::builder().add_service(svc.clone()); + loop { + tokio::select! { + _ = interval.tick() => { + if !self.check_online() { + println!("Worker shutting down."); + return Ok(()); + } + } + incoming = self.queue.recv() => { + if !self.check_online() { + println!("worker shutting down."); + return Ok(()); + } + match incoming { + Some(ZingoProxyRequest::TcpServerRequest(req)) => { + let stream = req.get_request().get_stream(); + let incoming = async_stream::stream! { + yield Ok::<_, std::io::Error>(stream); + }; + grpc_server + .serve_with_incoming(incoming) + .await?; + } + Some(ZingoProxyRequest::NymServerRequest(req)) => { + // Handle NymServerRequest, for example: + // self.process_nym_request(req).await?; + // Or other logic specific to your application + } + None => { + println!("Queue is closed, worker shutting down."); + return Ok(()); + } + } + } + } + } + }) } /// Ends the worker. @@ -56,12 +178,17 @@ impl Worker { } /// Returns the worker's ID. - pub fn id(&self) -> u16 { + pub fn id(&self) -> usize { self.worker_id } /// Returns the workers current status. - pub fn status(&self) -> WorkerStatus { - self.status.clone() + pub fn status(&self) -> (StatusType, Duration) { + self.status.status() + } + + /// Check the online status on the server. + fn check_online(&self) -> bool { + self.online.load(Ordering::SeqCst) } } diff --git a/zingo-rpc/src/rpc.rs b/zingo-rpc/src/rpc.rs index 0e7f004..8f24463 100644 --- a/zingo-rpc/src/rpc.rs +++ b/zingo-rpc/src/rpc.rs @@ -1,5 +1,7 @@ //! Lightwallet RPC implementations. +use std::sync::{atomic::AtomicBool, Arc}; + #[cfg(not(feature = "nym_poc"))] pub mod service; @@ -7,3 +9,15 @@ pub mod service; pub mod nymwalletservice; pub mod nymservice; + +#[derive(Debug, Clone)] +/// Configuration data for gRPC server. +pub struct GrpcClient { + /// Lightwalletd uri. + /// Used by grpc_passthrough to pass on unimplemented RPCs. + pub lightwalletd_uri: http::Uri, + /// Zebrad uri. + pub zebrad_uri: http::Uri, + /// Represents the Online status of the gRPC server. + pub online: Arc, +} diff --git a/zingo-rpc/src/rpc/nymservice.rs b/zingo-rpc/src/rpc/nymservice.rs index be892bd..5dbcb16 100644 --- a/zingo-rpc/src/rpc/nymservice.rs +++ b/zingo-rpc/src/rpc/nymservice.rs @@ -2,7 +2,7 @@ use prost::Message; -use crate::{primitives::client::ProxyClient, queue::request::ZingoProxyRequest}; +use crate::{queue::request::ZingoProxyRequest, rpc::GrpcClient}; #[cfg(not(feature = "nym_poc"))] use crate::proto::service::compact_tx_streamer_server::CompactTxStreamer; @@ -10,7 +10,7 @@ use crate::proto::service::compact_tx_streamer_server::CompactTxStreamer; #[cfg(feature = "nym_poc")] use zcash_client_backend::proto::service::compact_tx_streamer_server::CompactTxStreamer; -impl ProxyClient { +impl GrpcClient { /// Processes gRPC requests coming from the nym server. pub async fn process_nym_request( &self, diff --git a/zingo-rpc/src/rpc/nymwalletservice.rs b/zingo-rpc/src/rpc/nymwalletservice.rs index bc9b80f..a8cb53a 100644 --- a/zingo-rpc/src/rpc/nymwalletservice.rs +++ b/zingo-rpc/src/rpc/nymwalletservice.rs @@ -7,7 +7,7 @@ use tonic::{async_trait, Request, Response, Status}; use crate::{ nym::client::NymClient, - primitives::client::ProxyClient, + rpc::GrpcClient, walletrpc::utils::{deserialize_response, serialize_request, write_nym_request_data}, }; use zcash_client_backend::proto::{ @@ -55,7 +55,7 @@ macro_rules! define_grpc_passthrough { } #[async_trait] -impl CompactTxStreamer for ProxyClient { +impl CompactTxStreamer for GrpcClient { define_grpc_passthrough!( fn get_latest_block( &self, diff --git a/zingo-rpc/src/rpc/service.rs b/zingo-rpc/src/rpc/service.rs index aa85627..a26e81d 100644 --- a/zingo-rpc/src/rpc/service.rs +++ b/zingo-rpc/src/rpc/service.rs @@ -9,7 +9,6 @@ use crate::{ jsonrpc::{connector::JsonRpcConnector, primitives::GetTransactionResponse}, primitives::{ chain::{ConsensusBranchId, ConsensusBranchIdHex}, - client::ProxyClient, height::ChainHeight, }, proto::{ @@ -22,6 +21,7 @@ use crate::{ TreeState, TxFilter, }, }, + rpc::GrpcClient, utils::get_build_info, }; @@ -87,7 +87,7 @@ impl futures::Stream for CompactBlockStream { } } -impl CompactTxStreamer for ProxyClient { +impl CompactTxStreamer for GrpcClient { /// Return the height of the tip of the best chain. fn get_latest_block<'life0, 'async_trait>( &'life0 self, From ae0a3b246b6816b7f9384ee5cb89857f7c3f311f Mon Sep 17 00:00:00 2001 From: idky137 Date: Fri, 2 Aug 2024 13:39:37 +0100 Subject: [PATCH 03/18] added basic worker --- Cargo.lock | 1 + zingo-proxyd/src/nym_server.rs | 13 ++++--- zingo-rpc/Cargo.toml | 1 + zingo-rpc/src/queue/error.rs | 6 +++- zingo-rpc/src/queue/worker.rs | 64 +++++++++++++++++++++++---------- zingo-rpc/src/rpc/nymservice.rs | 11 +++--- 6 files changed, 65 insertions(+), 31 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 82a2168..f84beea 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -7211,6 +7211,7 @@ dependencies = [ name = "zingo-rpc" version = "0.1.0" dependencies = [ + "async-stream", "base64 0.13.1", "byteorder", "bytes 1.6.0", diff --git a/zingo-proxyd/src/nym_server.rs b/zingo-proxyd/src/nym_server.rs index d1f9846..c3cbc08 100644 --- a/zingo-proxyd/src/nym_server.rs +++ b/zingo-proxyd/src/nym_server.rs @@ -89,10 +89,15 @@ impl NymServer { // --- process request // NOTE: when the queue is added requests will not be processed here but by the queue! - let response = proxy_client - .process_nym_request(&zingo_proxy_request) - .await - .unwrap(); + let response: Vec; + match zingo_proxy_request { + ZingoProxyRequest::NymServerRequest(request) => { + response = proxy_client.process_nym_request(&request).await.unwrap(); + } + _ => { + todo!() + } + } // print response for testing // println!( diff --git a/zingo-rpc/Cargo.toml b/zingo-rpc/Cargo.toml index be83311..1b80a49 100644 --- a/zingo-rpc/Cargo.toml +++ b/zingo-rpc/Cargo.toml @@ -47,6 +47,7 @@ tokio-stream = "0.1" # "0.1.15" futures = "0.3.30" byteorder = "1" # "1.5" sha2 = "0.10" # "0.10.8" +async-stream = "0.3" # "0.3.5" [build-dependencies] tonic-build = { version = "0.10", features = ["prost"] } # "0.12" diff --git a/zingo-rpc/src/queue/error.rs b/zingo-rpc/src/queue/error.rs index fdd8668..fe1b870 100644 --- a/zingo-rpc/src/queue/error.rs +++ b/zingo-rpc/src/queue/error.rs @@ -38,4 +38,8 @@ pub enum IngestorError { /// Zingo-Proxy worker errors. #[derive(Debug, thiserror::Error)] -pub enum WorkerError {} +pub enum WorkerError { + /// Tonic transport error. + #[error("Tonic transport error: {0}")] + TonicTransportError(#[from] tonic::transport::Error), +} diff --git a/zingo-rpc/src/queue/worker.rs b/zingo-rpc/src/queue/worker.rs index 92fbcab..f1ed2de 100644 --- a/zingo-rpc/src/queue/worker.rs +++ b/zingo-rpc/src/queue/worker.rs @@ -14,11 +14,16 @@ use tokio::{ use tonic::transport::Server; use crate::{ - proto::service::compact_tx_streamer_server::CompactTxStreamerServer, queue::{error::WorkerError, request::ZingoProxyRequest}, rpc::GrpcClient, }; +#[cfg(not(feature = "nym_poc"))] +use crate::proto::service::compact_tx_streamer_server::CompactTxStreamerServer; + +#[cfg(feature = "nym_poc")] +use zcash_client_backend::proto::service::compact_tx_streamer_server::CompactTxStreamerServer; + /// Status of the worker. #[derive(Debug, Clone, Copy)] pub enum StatusType { @@ -88,7 +93,7 @@ pub struct Worker { /// Used to pop requests from the queue queue: mpsc::Receiver, /// Used to requeue requests. - requeue: mpsc::Sender, + _requeue: mpsc::Sender, /// Used to send responses to the nym_responder. nym_responder: mpsc::Sender<(Vec, AnonymousSenderTag)>, /// gRPC client used for processing requests received over http. @@ -104,7 +109,7 @@ impl Worker { pub async fn spawn( worker_id: usize, queue: mpsc::Receiver, - requeue: mpsc::Sender, + _requeue: mpsc::Sender, nym_responder: mpsc::Sender<(Vec, AnonymousSenderTag)>, lightwalletd_uri: Uri, zebrad_uri: Uri, @@ -118,7 +123,7 @@ impl Worker { Ok(Worker { worker_id, queue, - requeue, + _requeue, nym_responder, grpc_client, status: WorkerStatus::new(StatusType::Spawning), @@ -126,46 +131,67 @@ impl Worker { }) } - /// Starts queue workers service routine. + /// Starts queue worker service routine. + /// + /// TODO: Add requeue on error. pub async fn serve(mut self) -> tokio::task::JoinHandle> { tokio::task::spawn(async move { // NOTE: This interval may need to be reduced or removed / moved once scale testing begins. let mut interval = tokio::time::interval(tokio::time::Duration::from_millis(50)); let svc = CompactTxStreamerServer::new(self.grpc_client.clone()); - let mut grpc_server = Server::builder().add_service(svc.clone()); + // TODO: create tonic server here for use within loop. + self.status.set(StatusType::Standby); loop { tokio::select! { _ = interval.tick() => { if !self.check_online() { + self.status.set(StatusType::Closing); println!("Worker shutting down."); return Ok(()); } } incoming = self.queue.recv() => { if !self.check_online() { + self.status.set(StatusType::Closing); println!("worker shutting down."); return Ok(()); } + self.status.set(StatusType::Working); match incoming { - Some(ZingoProxyRequest::TcpServerRequest(req)) => { - let stream = req.get_request().get_stream(); - let incoming = async_stream::stream! { - yield Ok::<_, std::io::Error>(stream); - }; - grpc_server - .serve_with_incoming(incoming) - .await?; + Some(ZingoProxyRequest::TcpServerRequest(request)) => { + Server::builder().add_service(svc.clone()) + .serve_with_incoming( async_stream::stream! { + yield Ok::<_, std::io::Error>( + request.get_request().get_stream() + ); + }) + .await?; } - Some(ZingoProxyRequest::NymServerRequest(req)) => { - // Handle NymServerRequest, for example: - // self.process_nym_request(req).await?; - // Or other logic specific to your application + Some(ZingoProxyRequest::NymServerRequest(request)) => { + match self.grpc_client + .process_nym_request(&request) + .await { + Ok(response) => { + if let Err(e) = self.nym_responder.send((response, request.get_request().metadata())).await { + // TODO:: Handle this error! + eprintln!("Failed to send response to nym responder: {}", e); + } + } + Err(e) => { + // TODO:: Handle this error! + eprintln!("Failed to process nym received request: {}", e); + + } + + } } None => { - println!("Queue is closed, worker shutting down."); + self.status.set(StatusType::Closing); + println!("Queue closed, worker shutting down."); return Ok(()); } } + self.status.set(StatusType::Standby); } } } diff --git a/zingo-rpc/src/rpc/nymservice.rs b/zingo-rpc/src/rpc/nymservice.rs index 5dbcb16..d04bfd4 100644 --- a/zingo-rpc/src/rpc/nymservice.rs +++ b/zingo-rpc/src/rpc/nymservice.rs @@ -2,7 +2,7 @@ use prost::Message; -use crate::{queue::request::ZingoProxyRequest, rpc::GrpcClient}; +use crate::{queue::request::NymServerRequest, rpc::GrpcClient}; #[cfg(not(feature = "nym_poc"))] use crate::proto::service::compact_tx_streamer_server::CompactTxStreamer; @@ -14,10 +14,9 @@ impl GrpcClient { /// Processes gRPC requests coming from the nym server. pub async fn process_nym_request( &self, - request: &ZingoProxyRequest, + request: &NymServerRequest, ) -> Result, tonic::Status> { - match request { - ZingoProxyRequest::NymServerRequest(request) => match request.get_request().method().as_str() { + match request.get_request().method().as_str() { "GetLightdInfo" => match prost::Message::decode(&request.get_request().body()[..]) { Ok(input) => { let tonic_request = tonic::Request::new(input); @@ -78,8 +77,6 @@ impl GrpcClient { Err(tonic::Status::unimplemented("RPC not yet implemented over nym. If you require this RPC please open an issue or PR at the Zingo-Proxy github (https://github.com/zingolabs/zingo-proxy).")) }, _ => Err(tonic::Status::invalid_argument("Incorrect Method String")), - }, - _ => Err(tonic::Status::invalid_argument("Incorrect Request Type")), - } + } } } From 3df3b8a20cc8e97c858e9758b13b7544c3584fb0 Mon Sep 17 00:00:00 2001 From: idky137 Date: Fri, 2 Aug 2024 14:04:22 +0100 Subject: [PATCH 04/18] tidying --- zingo-proxyd/src/nym_server.rs | 2 +- zingo-rpc/src/blockcache/block.rs | 2 +- zingo-rpc/src/jsonrpc.rs | 2 +- zingo-rpc/src/jsonrpc/connector.rs | 2 +- zingo-rpc/src/jsonrpc/{primitives.rs => response.rs} | 0 zingo-rpc/src/lib.rs | 2 +- zingo-rpc/src/primitives.rs | 1 - zingo-rpc/src/primitives/client.rs | 1 - zingo-rpc/src/queue/manager.rs | 0 zingo-rpc/src/rpc/nymservice.rs | 2 +- zingo-rpc/src/rpc/service.rs | 2 +- zingo-rpc/src/{queue.rs => server.rs} | 0 zingo-rpc/src/{queue => server}/error.rs | 4 ++-- zingo-rpc/src/{queue => server}/ingestor.rs | 6 +++--- zingo-rpc/src/{queue => server}/request.rs | 2 +- zingo-rpc/src/{queue => server}/worker.rs | 4 ++-- 16 files changed, 15 insertions(+), 17 deletions(-) rename zingo-rpc/src/jsonrpc/{primitives.rs => response.rs} (100%) delete mode 100644 zingo-rpc/src/primitives/client.rs delete mode 100644 zingo-rpc/src/queue/manager.rs rename zingo-rpc/src/{queue.rs => server.rs} (100%) rename zingo-rpc/src/{queue => server}/error.rs (90%) rename zingo-rpc/src/{queue => server}/ingestor.rs (97%) rename zingo-rpc/src/{queue => server}/request.rs (98%) rename zingo-rpc/src/{queue => server}/worker.rs (98%) diff --git a/zingo-proxyd/src/nym_server.rs b/zingo-proxyd/src/nym_server.rs index c3cbc08..5ae7715 100644 --- a/zingo-proxyd/src/nym_server.rs +++ b/zingo-proxyd/src/nym_server.rs @@ -12,7 +12,7 @@ use std::sync::{ use nym_sdk::mixnet::{MixnetMessageSender, ReconstructedMessage}; use nym_sphinx_anonymous_replies::requests::AnonymousSenderTag; -use zingo_rpc::{nym::client::NymClient, queue::request::ZingoProxyRequest, rpc::GrpcClient}; +use zingo_rpc::{nym::client::NymClient, rpc::GrpcClient, server::request::ZingoProxyRequest}; /// Wrapper struct for a Nym client. pub struct NymServer { diff --git a/zingo-rpc/src/blockcache/block.rs b/zingo-rpc/src/blockcache/block.rs index 0265404..f87dde7 100644 --- a/zingo-rpc/src/blockcache/block.rs +++ b/zingo-rpc/src/blockcache/block.rs @@ -9,7 +9,7 @@ use crate::{ CompactSize, ParseFromSlice, }, }, - jsonrpc::{connector::JsonRpcConnector, primitives::GetBlockResponse}, + jsonrpc::{connector::JsonRpcConnector, response::GetBlockResponse}, proto::compact_formats::{ChainMetadata, CompactBlock}, }; use sha2::{Digest, Sha256}; diff --git a/zingo-rpc/src/jsonrpc.rs b/zingo-rpc/src/jsonrpc.rs index 481e80a..b510d84 100644 --- a/zingo-rpc/src/jsonrpc.rs +++ b/zingo-rpc/src/jsonrpc.rs @@ -2,4 +2,4 @@ pub mod connector; pub mod error; -pub mod primitives; +pub mod response; diff --git a/zingo-rpc/src/jsonrpc/connector.rs b/zingo-rpc/src/jsonrpc/connector.rs index b440bec..8690104 100644 --- a/zingo-rpc/src/jsonrpc/connector.rs +++ b/zingo-rpc/src/jsonrpc/connector.rs @@ -11,7 +11,7 @@ use std::sync::atomic::{AtomicI32, Ordering}; use crate::jsonrpc::{ error::JsonRpcConnectorError, - primitives::{ + response::{ BestBlockHashResponse, GetBalanceResponse, GetBlockResponse, GetBlockchainInfoResponse, GetInfoResponse, GetSubtreesResponse, GetTransactionResponse, GetTreestateResponse, GetUtxosResponse, SendTransactionResponse, TxidsResponse, diff --git a/zingo-rpc/src/jsonrpc/primitives.rs b/zingo-rpc/src/jsonrpc/response.rs similarity index 100% rename from zingo-rpc/src/jsonrpc/primitives.rs rename to zingo-rpc/src/jsonrpc/response.rs diff --git a/zingo-rpc/src/lib.rs b/zingo-rpc/src/lib.rs index 1345e0f..4c0d6ac 100644 --- a/zingo-rpc/src/lib.rs +++ b/zingo-rpc/src/lib.rs @@ -8,7 +8,7 @@ pub mod jsonrpc; pub mod nym; pub mod primitives; pub mod proto; -pub mod queue; pub mod rpc; +pub mod server; pub mod utils; pub mod walletrpc; diff --git a/zingo-rpc/src/primitives.rs b/zingo-rpc/src/primitives.rs index 9aabb09..c04b640 100644 --- a/zingo-rpc/src/primitives.rs +++ b/zingo-rpc/src/primitives.rs @@ -5,7 +5,6 @@ pub mod address; pub mod block; pub mod chain; -pub mod client; pub mod error; pub mod height; pub mod transaction; diff --git a/zingo-rpc/src/primitives/client.rs b/zingo-rpc/src/primitives/client.rs deleted file mode 100644 index 8b13789..0000000 --- a/zingo-rpc/src/primitives/client.rs +++ /dev/null @@ -1 +0,0 @@ - diff --git a/zingo-rpc/src/queue/manager.rs b/zingo-rpc/src/queue/manager.rs deleted file mode 100644 index e69de29..0000000 diff --git a/zingo-rpc/src/rpc/nymservice.rs b/zingo-rpc/src/rpc/nymservice.rs index d04bfd4..acc1f9d 100644 --- a/zingo-rpc/src/rpc/nymservice.rs +++ b/zingo-rpc/src/rpc/nymservice.rs @@ -2,7 +2,7 @@ use prost::Message; -use crate::{queue::request::NymServerRequest, rpc::GrpcClient}; +use crate::{rpc::GrpcClient, server::request::NymServerRequest}; #[cfg(not(feature = "nym_poc"))] use crate::proto::service::compact_tx_streamer_server::CompactTxStreamer; diff --git a/zingo-rpc/src/rpc/service.rs b/zingo-rpc/src/rpc/service.rs index a26e81d..ca01a2e 100644 --- a/zingo-rpc/src/rpc/service.rs +++ b/zingo-rpc/src/rpc/service.rs @@ -6,7 +6,7 @@ use tokio_stream::wrappers::ReceiverStream; use crate::{ blockcache::{block::get_block_from_node, mempool::Mempool}, - jsonrpc::{connector::JsonRpcConnector, primitives::GetTransactionResponse}, + jsonrpc::{connector::JsonRpcConnector, response::GetTransactionResponse}, primitives::{ chain::{ConsensusBranchId, ConsensusBranchIdHex}, height::ChainHeight, diff --git a/zingo-rpc/src/queue.rs b/zingo-rpc/src/server.rs similarity index 100% rename from zingo-rpc/src/queue.rs rename to zingo-rpc/src/server.rs diff --git a/zingo-rpc/src/queue/error.rs b/zingo-rpc/src/server/error.rs similarity index 90% rename from zingo-rpc/src/queue/error.rs rename to zingo-rpc/src/server/error.rs index fe1b870..1044948 100644 --- a/zingo-rpc/src/queue/error.rs +++ b/zingo-rpc/src/server/error.rs @@ -1,9 +1,9 @@ -//! Hold error types for the queue and related functionality. +//! Hold error types for the server and related functionality. use std::io; use tokio::sync::mpsc::error::TrySendError; -use crate::{nym::error::NymError, queue::request::ZingoProxyRequest}; +use crate::{nym::error::NymError, server::request::ZingoProxyRequest}; /// Zingo-Proxy request errors. #[derive(Debug, thiserror::Error)] diff --git a/zingo-rpc/src/queue/ingestor.rs b/zingo-rpc/src/server/ingestor.rs similarity index 97% rename from zingo-rpc/src/queue/ingestor.rs rename to zingo-rpc/src/server/ingestor.rs index 13f7ff0..29acc4e 100644 --- a/zingo-rpc/src/queue/ingestor.rs +++ b/zingo-rpc/src/server/ingestor.rs @@ -1,4 +1,4 @@ -//! Holds the ingestor (listener) implementations. +//! Holds the server ingestor (listener) implementations. use std::{ net::SocketAddr, @@ -11,7 +11,7 @@ use tokio::{net::TcpListener, sync::mpsc}; use crate::{ nym::{client::NymClient, error::NymError}, - queue::{error::IngestorError, request::ZingoProxyRequest}, + server::{error::IngestorError, request::ZingoProxyRequest}, }; /// Status of the worker. @@ -21,7 +21,7 @@ use crate::{ pub enum IngestorStatus { /// On hold, due to blockcache / node error. Inactive, - /// Processing requests from the queue. + /// Listening for new requests. Listening, } diff --git a/zingo-rpc/src/queue/request.rs b/zingo-rpc/src/server/request.rs similarity index 98% rename from zingo-rpc/src/queue/request.rs rename to zingo-rpc/src/server/request.rs index 92f8f17..d733c2c 100644 --- a/zingo-rpc/src/queue/request.rs +++ b/zingo-rpc/src/server/request.rs @@ -5,7 +5,7 @@ use std::time::SystemTime; use nym_sphinx_anonymous_replies::requests::AnonymousSenderTag; use tokio::net::TcpStream; -use crate::{nym::utils::read_nym_request_data, queue::error::RequestError}; +use crate::{nym::utils::read_nym_request_data, server::error::RequestError}; /// Requests queuing metadata. #[derive(Debug, Clone)] diff --git a/zingo-rpc/src/queue/worker.rs b/zingo-rpc/src/server/worker.rs similarity index 98% rename from zingo-rpc/src/queue/worker.rs rename to zingo-rpc/src/server/worker.rs index f1ed2de..67b06ea 100644 --- a/zingo-rpc/src/queue/worker.rs +++ b/zingo-rpc/src/server/worker.rs @@ -1,4 +1,4 @@ -//! Holds the queue worker implementation. +//! Holds the server worker implementation. use std::sync::{ atomic::{AtomicBool, Ordering}, @@ -14,8 +14,8 @@ use tokio::{ use tonic::transport::Server; use crate::{ - queue::{error::WorkerError, request::ZingoProxyRequest}, rpc::GrpcClient, + server::{error::WorkerError, request::ZingoProxyRequest}, }; #[cfg(not(feature = "nym_poc"))] From b52664bad8b223ccb355761f57f5af43ae677785 Mon Sep 17 00:00:00 2001 From: idky137 Date: Fri, 2 Aug 2024 15:48:39 +0100 Subject: [PATCH 05/18] added dispatcher --- zingo-rpc/src/server.rs | 40 +++++++++++ zingo-rpc/src/server/dispatcher.rs | 106 +++++++++++++++++++++++++++++ zingo-rpc/src/server/error.rs | 8 +++ zingo-rpc/src/server/ingestor.rs | 11 +-- zingo-rpc/src/server/worker.rs | 14 +++- 5 files changed, 172 insertions(+), 7 deletions(-) create mode 100644 zingo-rpc/src/server/dispatcher.rs diff --git a/zingo-rpc/src/server.rs b/zingo-rpc/src/server.rs index b9b18bf..1f285b8 100644 --- a/zingo-rpc/src/server.rs +++ b/zingo-rpc/src/server.rs @@ -1,6 +1,46 @@ //! Zingo-Proxy client server. +use nym_sphinx_anonymous_replies::requests::AnonymousSenderTag; +use std::sync::{atomic::AtomicBool, Arc}; +use tokio::sync::mpsc; + +use self::{ + dispatcher::NymDispatcher, + ingestor::{NymIngestor, TcpIngestor}, + request::ZingoProxyRequest, + worker::WorkerPool, +}; + +pub mod dispatcher; pub mod error; pub mod ingestor; pub mod request; pub mod worker; + +/// +pub struct Queue { + /// Maximum length of the queue. + max_size: usize, + /// Queue sender. + queue_tx: mpsc::Sender, + /// Queue receiver. + queue_rx: mpsc::Receiver, +} + +/// LightWallet server capable of servicing clients over both http and nym. +pub struct Server { + /// Listens for incoming gRPC requests over HTTP. + tcp_ingestor: TcpIngestor, + /// Listens for incoming gRPC requests over Nym Mixnet. + nym_ingestor: NymIngestor, + /// Sends gRPC responses over Nym Mixnet. + nym_dispatcher: NymDispatcher, + /// Dynamically sized pool of workers. + worker_pool: WorkerPool, + /// Request queue. + request_queue: Queue, + /// Nym response queue. + nym_response_queue: Queue<(Vec, AnonymousSenderTag)>, + /// Represents the Online status of the Server. + pub online: Arc, +} diff --git a/zingo-rpc/src/server/dispatcher.rs b/zingo-rpc/src/server/dispatcher.rs new file mode 100644 index 0000000..985b28a --- /dev/null +++ b/zingo-rpc/src/server/dispatcher.rs @@ -0,0 +1,106 @@ +//! Holds the server dispatcher (replyer) implementations. + +use nym_sdk::mixnet::MixnetMessageSender; +use nym_sphinx_anonymous_replies::requests::AnonymousSenderTag; +use std::sync::{ + atomic::{AtomicBool, Ordering}, + Arc, +}; +use tokio::sync::mpsc; + +use crate::{ + nym::{client::NymClient, error::NymError}, + server::error::DispatcherError, +}; + +/// Status of the worker. +#[derive(Debug, Clone)] +pub enum DispatcherStatus { + /// On hold, due to blockcache / node error. + Inactive, + /// Listening for new requests. + Listening, +} + +/// Sends gRPC responses over Nym Mixnet. +pub struct NymDispatcher { + /// Nym Client + dispatcher: NymClient, + /// Used to send requests to the queue. + response_queue: mpsc::Receiver<(Vec, AnonymousSenderTag)>, + /// Used to send requests to the queue. + response_requeue: mpsc::Sender<(Vec, AnonymousSenderTag)>, + /// Represents the Online status of the gRPC server. + online: Arc, + /// Current status of the ingestor. + status: DispatcherStatus, +} + +impl NymDispatcher { + /// Creates a Nym Ingestor + pub async fn spawn( + nym_conf_path: &str, + response_queue: mpsc::Receiver<(Vec, AnonymousSenderTag)>, + response_requeue: mpsc::Sender<(Vec, AnonymousSenderTag)>, + online: Arc, + ) -> Result { + let client = NymClient::spawn(&format!("{}/dispatcher", nym_conf_path)).await?; + Ok(NymDispatcher { + dispatcher: client, + response_queue, + response_requeue, + online, + status: DispatcherStatus::Inactive, + }) + } + + /// Starts Nym service. + pub async fn serve(mut self) -> tokio::task::JoinHandle> { + tokio::task::spawn(async move { + // NOTE: This interval may need to be reduced or removed / moved once scale testing begins. + let mut interval = tokio::time::interval(tokio::time::Duration::from_millis(50)); + // TODO Check self.status and wait on server / node if on hold. + self.status = DispatcherStatus::Listening; + loop { + tokio::select! { + _ = interval.tick() => { + if !self.check_online() { + println!("Nym dispatcher shutting down."); + return Ok(()); + } + } + incoming = self.response_queue.recv() => { + match incoming { + Some(response) => { + if !self.check_online() { + println!("Nym dispatcher shutting down."); + return Ok(()); + } + if let Err(nym_e) = self.dispatcher + .client + .send_reply(response.1, response.0.clone()) + .await.map_err(NymError::from) { + // TODO: Convert to use try_send(). + if let Err(e) = self.response_requeue.send(response).await { + eprintln!("Failed to send response over nym: {}\nAnd failed to requeue response: {}\nFatal error! Restarting nym dispatcher.", nym_e, e); + // TODO: Handle error. Restart nym dispatcher. + } + eprintln!("Failed to send response over nym: {}\nResponse requeued, restarting nym dispatcher.", nym_e); + // TODO: Handle error. Restart nym dispatcher. + } + } + None => { + println!("Response queue closed, nym dispatcher shutting down."); + return Ok(()); + } + } + } + } + } + }) + } + + fn check_online(&self) -> bool { + self.online.load(Ordering::SeqCst) + } +} diff --git a/zingo-rpc/src/server/error.rs b/zingo-rpc/src/server/error.rs index 1044948..eb61940 100644 --- a/zingo-rpc/src/server/error.rs +++ b/zingo-rpc/src/server/error.rs @@ -36,6 +36,14 @@ pub enum IngestorError { QueuePushError(#[from] TrySendError), } +/// Zingo-Proxy distpater errors. +#[derive(Debug, thiserror::Error)] +pub enum DispatcherError { + /// Nym based errors. + #[error("Nym error: {0}")] + NymError(#[from] NymError), +} + /// Zingo-Proxy worker errors. #[derive(Debug, thiserror::Error)] pub enum WorkerError { diff --git a/zingo-rpc/src/server/ingestor.rs b/zingo-rpc/src/server/ingestor.rs index 29acc4e..e4fe030 100644 --- a/zingo-rpc/src/server/ingestor.rs +++ b/zingo-rpc/src/server/ingestor.rs @@ -15,8 +15,6 @@ use crate::{ }; /// Status of the worker. -/// -/// TODO: Add duration to each variant. #[derive(Debug, Clone)] pub enum IngestorStatus { /// On hold, due to blockcache / node error. @@ -25,7 +23,7 @@ pub enum IngestorStatus { Listening, } -/// Configuration data for gRPC server. +/// Listens for incoming gRPC requests over HTTP. pub struct TcpIngestor { /// Tcp Listener. ingestor: TcpListener, @@ -75,6 +73,7 @@ impl TcpIngestor { println!("Tcp ingestor shutting down."); return Ok(()); } + // TODO: Convert to use try_send(). if let Err(e) = self.queue.send(ZingoProxyRequest::new_from_grpc(stream)).await { // TODO:: Return queue full tonic status over tcpstream and close (that TcpStream..). eprintln!("Failed to send connection: {}", e); @@ -101,7 +100,7 @@ impl TcpIngestor { } } -/// Wrapper struct for a Nym client. +/// Listens for incoming gRPC requests over Nym Mixnet. pub struct NymIngestor { /// Nym Client ingestor: NymClient, @@ -120,7 +119,7 @@ impl NymIngestor { queue: mpsc::Sender, online: Arc, ) -> Result { - let listener = NymClient::spawn(nym_conf_path).await?; + let listener = NymClient::spawn(&format!("{}/ingestor", nym_conf_path)).await?; Ok(NymIngestor { ingestor: listener, queue, @@ -165,12 +164,14 @@ impl NymIngestor { // TODO: Handle RequestError here. let zingo_proxy_request = ZingoProxyRequest::new_from_nym(return_recipient, request_vu8.as_ref())?; + // TODO: Convert to use try_send(). if let Err(e) = self.queue.send(zingo_proxy_request).await { // TODO: Return queue full tonic status over nym mixnet. eprintln!("Failed to send connection: {}", e); } } None => { + // TODO: Error in nym client, handle error here (restart ingestor?). eprintln!("Failed to receive message from Nym network."); if !self.online.load(Ordering::SeqCst) { println!("Nym ingestor shutting down."); diff --git a/zingo-rpc/src/server/worker.rs b/zingo-rpc/src/server/worker.rs index 67b06ea..14c9129 100644 --- a/zingo-rpc/src/server/worker.rs +++ b/zingo-rpc/src/server/worker.rs @@ -100,7 +100,7 @@ pub struct Worker { grpc_client: GrpcClient, /// Workers current status. status: WorkerStatus, - /// Represents the Online status of the gRPC server. + /// Represents the Online status of the Worker. pub online: Arc, } @@ -133,7 +133,7 @@ impl Worker { /// Starts queue worker service routine. /// - /// TODO: Add requeue on error. + /// TODO: Add requeue logic for node errors. pub async fn serve(mut self) -> tokio::task::JoinHandle> { tokio::task::spawn(async move { // NOTE: This interval may need to be reduced or removed / moved once scale testing begins. @@ -218,3 +218,13 @@ impl Worker { self.online.load(Ordering::SeqCst) } } + +/// Dynamically sized pool of workers. +pub struct WorkerPool { + /// Maximun number of concurrent workers allowed. + max_size: usize, + /// Workers currently in the pool + workers: Vec, + /// Represents the Online status of the WorkerPool. + pub online: Arc, +} From f20ea1c62e6823fd60286c9093d8cea021d289e0 Mon Sep 17 00:00:00 2001 From: idky137 Date: Sun, 4 Aug 2024 19:32:36 +0100 Subject: [PATCH 06/18] added server structs --- zingo-rpc/src/server.rs | 63 +++++++++++++++++++++++++++--- zingo-rpc/src/server/dispatcher.rs | 10 +++++ zingo-rpc/src/server/ingestor.rs | 20 ++++++++++ zingo-rpc/src/server/worker.rs | 10 ----- 4 files changed, 87 insertions(+), 16 deletions(-) diff --git a/zingo-rpc/src/server.rs b/zingo-rpc/src/server.rs index 1f285b8..6fbd103 100644 --- a/zingo-rpc/src/server.rs +++ b/zingo-rpc/src/server.rs @@ -8,7 +8,7 @@ use self::{ dispatcher::NymDispatcher, ingestor::{NymIngestor, TcpIngestor}, request::ZingoProxyRequest, - worker::WorkerPool, + worker::Worker, }; pub mod dispatcher; @@ -17,7 +17,7 @@ pub mod ingestor; pub mod request; pub mod worker; -/// +/// Queue with max length. pub struct Queue { /// Maximum length of the queue. max_size: usize, @@ -27,20 +27,71 @@ pub struct Queue { queue_rx: mpsc::Receiver, } +impl Queue { + // Creates a new queue + // pub fn spawn(max_size) -> Self {} + + // Returns a queue transmitter + // pub fn tx(&self) -> mpsc::Sender {} + + // Returns a queue receiver + // pub fn rx(&self) -> mpsc::Receiver {} + + // Returns the current length of the queue + // pub fn length(&self) -> usize {} +} + +/// Dynamically sized pool of workers. +pub struct WorkerPool { + /// Maximun number of concurrent workers allowed. + max_size: usize, + /// Minimum number of workers kept running on stanby. + idle_size: usize, + /// Workers currently in the pool + workers: Vec, + /// Represents the Online status of the WorkerPool. + pub online: Arc, +} + +impl WorkerPool { + // Creates a new worker pool with idle_workers in it. + // pub fn spawn(max_size, idle_size, online) -> Self {} + + // Sets workers in the worker pool to start servicing the queue. + // pub fn serve(&self) -> Vec>> {} + + // Adds a worker to the worker pool, returns error if the pool is already at max size. + // pub fn add_worker(&self) -> tokio::task::JoinHandle> {} + + // Checks workers on standby, closes workers that have been on standby for longer than 30s(may need to change). + // pub fn check_workers(&self) {} +} + /// LightWallet server capable of servicing clients over both http and nym. pub struct Server { /// Listens for incoming gRPC requests over HTTP. - tcp_ingestor: TcpIngestor, + tcp_ingestor: Option, /// Listens for incoming gRPC requests over Nym Mixnet. - nym_ingestor: NymIngestor, + nym_ingestor: Option, /// Sends gRPC responses over Nym Mixnet. - nym_dispatcher: NymDispatcher, + nym_dispatcher: Option, /// Dynamically sized pool of workers. worker_pool: WorkerPool, /// Request queue. request_queue: Queue, /// Nym response queue. - nym_response_queue: Queue<(Vec, AnonymousSenderTag)>, + nym_response_queue: Option, AnonymousSenderTag)>>, /// Represents the Online status of the Server. pub online: Arc, } + +impl Server { + // Spawns a new server. + // pub fn Spawn() -> Self {} + + // Starts the server. + // pub fn Start(&self) {} + + // Returns the status of the server and its parts, to be consumed by system printout. + // pub fn Status(&self) {} +} diff --git a/zingo-rpc/src/server/dispatcher.rs b/zingo-rpc/src/server/dispatcher.rs index 985b28a..a29599a 100644 --- a/zingo-rpc/src/server/dispatcher.rs +++ b/zingo-rpc/src/server/dispatcher.rs @@ -100,6 +100,16 @@ impl NymDispatcher { }) } + /// Ends the dispatcher. + pub async fn shutdown(self) { + todo!() + } + + /// Returns the dispatchers current status. + pub fn status(&self) -> DispatcherStatus { + self.status.clone() + } + fn check_online(&self) -> bool { self.online.load(Ordering::SeqCst) } diff --git a/zingo-rpc/src/server/ingestor.rs b/zingo-rpc/src/server/ingestor.rs index e4fe030..09b6aa1 100644 --- a/zingo-rpc/src/server/ingestor.rs +++ b/zingo-rpc/src/server/ingestor.rs @@ -95,6 +95,16 @@ impl TcpIngestor { }) } + /// Ends the ingestor. + pub async fn shutdown(self) { + todo!() + } + + /// Returns the ingestor current status. + pub fn status(&self) -> IngestorStatus { + self.status.clone() + } + fn check_online(&self) -> bool { self.online.load(Ordering::SeqCst) } @@ -185,6 +195,16 @@ impl NymIngestor { }) } + /// Ends the ingestor. + pub async fn shutdown(self) { + todo!() + } + + /// Returns the ingestor current status. + pub fn status(&self) -> IngestorStatus { + self.status.clone() + } + fn check_online(&self) -> bool { self.online.load(Ordering::SeqCst) } diff --git a/zingo-rpc/src/server/worker.rs b/zingo-rpc/src/server/worker.rs index 14c9129..f676695 100644 --- a/zingo-rpc/src/server/worker.rs +++ b/zingo-rpc/src/server/worker.rs @@ -218,13 +218,3 @@ impl Worker { self.online.load(Ordering::SeqCst) } } - -/// Dynamically sized pool of workers. -pub struct WorkerPool { - /// Maximun number of concurrent workers allowed. - max_size: usize, - /// Workers currently in the pool - workers: Vec, - /// Represents the Online status of the WorkerPool. - pub online: Arc, -} From 2d55d1cd7b948020013f51b646f776dbf1c7b04e Mon Sep 17 00:00:00 2001 From: idky137 Date: Mon, 5 Aug 2024 14:55:54 +0100 Subject: [PATCH 07/18] started adding queue --- zingo-rpc/src/server.rs | 19 ++++++++++++++----- 1 file changed, 14 insertions(+), 5 deletions(-) diff --git a/zingo-rpc/src/server.rs b/zingo-rpc/src/server.rs index 6fbd103..7a93bd3 100644 --- a/zingo-rpc/src/server.rs +++ b/zingo-rpc/src/server.rs @@ -2,7 +2,7 @@ use nym_sphinx_anonymous_replies::requests::AnonymousSenderTag; use std::sync::{atomic::AtomicBool, Arc}; -use tokio::sync::mpsc; +use tokio::sync::{mpsc, Semaphore}; use self::{ dispatcher::NymDispatcher, @@ -19,8 +19,8 @@ pub mod worker; /// Queue with max length. pub struct Queue { - /// Maximum length of the queue. - max_size: usize, + /// Used to count current messages in queue. + semaphore: Arc, /// Queue sender. queue_tx: mpsc::Sender, /// Queue receiver. @@ -28,8 +28,17 @@ pub struct Queue { } impl Queue { - // Creates a new queue - // pub fn spawn(max_size) -> Self {} + /// Creates a new queue + pub fn spawn(max_size: usize) -> Self { + let (queue_tx, queue_rx) = mpsc::channel(max_size); + let semaphore = Arc::new(Semaphore::new(max_size)); + + Queue { + queue_tx, + queue_rx, + semaphore, + } + } // Returns a queue transmitter // pub fn tx(&self) -> mpsc::Sender {} From 329f20dad2205f5c5bd2b564eeedee419e0a6992 Mon Sep 17 00:00:00 2001 From: idky137 Date: Mon, 5 Aug 2024 20:44:30 +0100 Subject: [PATCH 08/18] implemented queue.rs, reworked ingestor and dispatcher to use QueueSender and QueueReceiver. --- Cargo.lock | 1 + zingo-rpc/Cargo.toml | 1 + zingo-rpc/src/server.rs | 75 ++------------- zingo-rpc/src/server/dispatcher.rs | 48 ++++++---- zingo-rpc/src/server/error.rs | 14 +++ zingo-rpc/src/server/ingestor.rs | 62 +++++++----- zingo-rpc/src/server/queue.rs | 149 +++++++++++++++++++++++++++++ zingo-rpc/src/server/worker.rs | 16 ++-- zingo-rpc/src/server/workerpool.rs | 54 +++++++++++ 9 files changed, 302 insertions(+), 118 deletions(-) create mode 100644 zingo-rpc/src/server/queue.rs create mode 100644 zingo-rpc/src/server/workerpool.rs diff --git a/Cargo.lock b/Cargo.lock index f84beea..5044fe0 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -7215,6 +7215,7 @@ dependencies = [ "base64 0.13.1", "byteorder", "bytes 1.6.0", + "crossbeam-channel", "futures", "hex 0.4.3", "http", diff --git a/zingo-rpc/Cargo.toml b/zingo-rpc/Cargo.toml index 1b80a49..3076dcc 100644 --- a/zingo-rpc/Cargo.toml +++ b/zingo-rpc/Cargo.toml @@ -48,6 +48,7 @@ futures = "0.3.30" byteorder = "1" # "1.5" sha2 = "0.10" # "0.10.8" async-stream = "0.3" # "0.3.5" +crossbeam-channel = "0.5" [build-dependencies] tonic-build = { version = "0.10", features = ["prost"] } # "0.12" diff --git a/zingo-rpc/src/server.rs b/zingo-rpc/src/server.rs index 7a93bd3..86b4dcd 100644 --- a/zingo-rpc/src/server.rs +++ b/zingo-rpc/src/server.rs @@ -2,79 +2,22 @@ use nym_sphinx_anonymous_replies::requests::AnonymousSenderTag; use std::sync::{atomic::AtomicBool, Arc}; -use tokio::sync::{mpsc, Semaphore}; - -use self::{ - dispatcher::NymDispatcher, - ingestor::{NymIngestor, TcpIngestor}, - request::ZingoProxyRequest, - worker::Worker, -}; pub mod dispatcher; pub mod error; pub mod ingestor; +pub mod queue; pub mod request; pub mod worker; +pub mod workerpool; -/// Queue with max length. -pub struct Queue { - /// Used to count current messages in queue. - semaphore: Arc, - /// Queue sender. - queue_tx: mpsc::Sender, - /// Queue receiver. - queue_rx: mpsc::Receiver, -} - -impl Queue { - /// Creates a new queue - pub fn spawn(max_size: usize) -> Self { - let (queue_tx, queue_rx) = mpsc::channel(max_size); - let semaphore = Arc::new(Semaphore::new(max_size)); - - Queue { - queue_tx, - queue_rx, - semaphore, - } - } - - // Returns a queue transmitter - // pub fn tx(&self) -> mpsc::Sender {} - - // Returns a queue receiver - // pub fn rx(&self) -> mpsc::Receiver {} - - // Returns the current length of the queue - // pub fn length(&self) -> usize {} -} - -/// Dynamically sized pool of workers. -pub struct WorkerPool { - /// Maximun number of concurrent workers allowed. - max_size: usize, - /// Minimum number of workers kept running on stanby. - idle_size: usize, - /// Workers currently in the pool - workers: Vec, - /// Represents the Online status of the WorkerPool. - pub online: Arc, -} - -impl WorkerPool { - // Creates a new worker pool with idle_workers in it. - // pub fn spawn(max_size, idle_size, online) -> Self {} - - // Sets workers in the worker pool to start servicing the queue. - // pub fn serve(&self) -> Vec>> {} - - // Adds a worker to the worker pool, returns error if the pool is already at max size. - // pub fn add_worker(&self) -> tokio::task::JoinHandle> {} - - // Checks workers on standby, closes workers that have been on standby for longer than 30s(may need to change). - // pub fn check_workers(&self) {} -} +use self::{ + dispatcher::NymDispatcher, + ingestor::{NymIngestor, TcpIngestor}, + queue::Queue, + request::ZingoProxyRequest, + workerpool::WorkerPool, +}; /// LightWallet server capable of servicing clients over both http and nym. pub struct Server { diff --git a/zingo-rpc/src/server/dispatcher.rs b/zingo-rpc/src/server/dispatcher.rs index a29599a..b072cfc 100644 --- a/zingo-rpc/src/server/dispatcher.rs +++ b/zingo-rpc/src/server/dispatcher.rs @@ -10,7 +10,8 @@ use tokio::sync::mpsc; use crate::{ nym::{client::NymClient, error::NymError}, - server::error::DispatcherError, + server::error::{DispatcherError, QueueError}, + server::queue::{QueueReceiver, QueueSender}, }; /// Status of the worker. @@ -27,9 +28,9 @@ pub struct NymDispatcher { /// Nym Client dispatcher: NymClient, /// Used to send requests to the queue. - response_queue: mpsc::Receiver<(Vec, AnonymousSenderTag)>, + response_queue: QueueReceiver<(Vec, AnonymousSenderTag)>, /// Used to send requests to the queue. - response_requeue: mpsc::Sender<(Vec, AnonymousSenderTag)>, + response_requeue: QueueSender<(Vec, AnonymousSenderTag)>, /// Represents the Online status of the gRPC server. online: Arc, /// Current status of the ingestor. @@ -40,8 +41,8 @@ impl NymDispatcher { /// Creates a Nym Ingestor pub async fn spawn( nym_conf_path: &str, - response_queue: mpsc::Receiver<(Vec, AnonymousSenderTag)>, - response_requeue: mpsc::Sender<(Vec, AnonymousSenderTag)>, + response_queue: QueueReceiver<(Vec, AnonymousSenderTag)>, + response_requeue: QueueSender<(Vec, AnonymousSenderTag)>, online: Arc, ) -> Result { let client = NymClient::spawn(&format!("{}/dispatcher", nym_conf_path)).await?; @@ -57,7 +58,7 @@ impl NymDispatcher { /// Starts Nym service. pub async fn serve(mut self) -> tokio::task::JoinHandle> { tokio::task::spawn(async move { - // NOTE: This interval may need to be reduced or removed / moved once scale testing begins. + // NOTE: This interval may need to be changed or removed / moved once scale testing begins. let mut interval = tokio::time::interval(tokio::time::Duration::from_millis(50)); // TODO Check self.status and wait on server / node if on hold. self.status = DispatcherStatus::Listening; @@ -69,29 +70,34 @@ impl NymDispatcher { return Ok(()); } } - incoming = self.response_queue.recv() => { + incoming = self.response_queue.listen() => { match incoming { - Some(response) => { - if !self.check_online() { - println!("Nym dispatcher shutting down."); - return Ok(()); - } + Ok(response) => { if let Err(nym_e) = self.dispatcher .client .send_reply(response.1, response.0.clone()) .await.map_err(NymError::from) { - // TODO: Convert to use try_send(). - if let Err(e) = self.response_requeue.send(response).await { - eprintln!("Failed to send response over nym: {}\nAnd failed to requeue response: {}\nFatal error! Restarting nym dispatcher.", nym_e, e); - // TODO: Handle error. Restart nym dispatcher. + match self.response_requeue.try_send(response) { + Ok(_) => { + eprintln!("Failed to send response over nym: {}\nResponse requeued, restarting nym dispatcher.", nym_e); + // TODO: Handle error. Restart nym dispatcher. + } + Err(QueueError::QueueFull(_request)) => { + eprintln!("Failed to send response over nym: {}\nAnd failed to requeue response due to full response queue.\nFatal error! Restarting nym dispatcher.", nym_e); + // TODO: Handle queue full error here (start up second dispatcher?). Restart nym dispatcher + } + Err(_e) => { + eprintln!("Failed to send response over nym: {}\nAnd failed to requeue response due to the queue being closed.\nFatal error! Nym dispatcher shutting down..", nym_e); + // TODO: Handle queue closed error here. (return correct error type?) + return Ok(()); //Return Err! + } } - eprintln!("Failed to send response over nym: {}\nResponse requeued, restarting nym dispatcher.", nym_e); - // TODO: Handle error. Restart nym dispatcher. } } - None => { - println!("Response queue closed, nym dispatcher shutting down."); - return Ok(()); + Err(_e) => { + //TODO: Handle this error here (return correct error type?) + eprintln!("Response queue closed, nym dispatcher shutting down."); + return Ok(()); // Return Err! } } } diff --git a/zingo-rpc/src/server/error.rs b/zingo-rpc/src/server/error.rs index eb61940..e5272f8 100644 --- a/zingo-rpc/src/server/error.rs +++ b/zingo-rpc/src/server/error.rs @@ -5,6 +5,20 @@ use tokio::sync::mpsc::error::TrySendError; use crate::{nym::error::NymError, server::request::ZingoProxyRequest}; +/// Zingo-Proxy queue errors. +#[derive(Debug, thiserror::Error)] +pub enum QueueError { + /// Returned when a requests is pushed to a full queue. + #[error("Queue Full")] + QueueFull(T), + /// Returned when a worker or dispatcher tries to receive from an empty queue. + #[error("Queue Empty")] + QueueEmpty, + /// Returned when a worker or dispatcher tries to receive from a closed queue. + #[error("Queue Disconnected")] + QueueClosed, +} + /// Zingo-Proxy request errors. #[derive(Debug, thiserror::Error)] pub enum RequestError { diff --git a/zingo-rpc/src/server/ingestor.rs b/zingo-rpc/src/server/ingestor.rs index 09b6aa1..4436ef2 100644 --- a/zingo-rpc/src/server/ingestor.rs +++ b/zingo-rpc/src/server/ingestor.rs @@ -7,11 +7,15 @@ use std::{ Arc, }, }; -use tokio::{net::TcpListener, sync::mpsc}; +use tokio::net::TcpListener; use crate::{ nym::{client::NymClient, error::NymError}, - server::{error::IngestorError, request::ZingoProxyRequest}, + server::{ + error::{IngestorError, QueueError}, + queue::QueueSender, + request::ZingoProxyRequest, + }, }; /// Status of the worker. @@ -28,7 +32,7 @@ pub struct TcpIngestor { /// Tcp Listener. ingestor: TcpListener, /// Used to send requests to the queue. - queue: mpsc::Sender, + queue: QueueSender, /// Represents the Online status of the gRPC server. online: Arc, /// Current status of the ingestor. @@ -39,7 +43,7 @@ impl TcpIngestor { /// Creates a Tcp Ingestor. pub async fn spawn( listen_addr: SocketAddr, - queue: mpsc::Sender, + queue: QueueSender, online: Arc, ) -> Result { let listener = TcpListener::bind(listen_addr).await?; @@ -54,9 +58,9 @@ impl TcpIngestor { /// Starts Tcp service. pub fn serve(mut self) -> tokio::task::JoinHandle> { tokio::task::spawn(async move { - // NOTE: This interval may need to be reduced or removed / moved once scale testing begins. + // NOTE: This interval may need to be changed or removed / moved once scale testing begins. let mut interval = tokio::time::interval(tokio::time::Duration::from_millis(50)); - // TODO Check self.status and wait on server / node if on hold. + // TODO Check blockcache sync status and wait on server / node if on hold. self.status = IngestorStatus::Listening; loop { tokio::select! { @@ -67,25 +71,31 @@ impl TcpIngestor { } } incoming = self.ingestor.accept() => { + if !self.check_online() { + println!("Tcp ingestor shutting down."); + return Ok(()); + } match incoming { Ok((stream, _)) => { if !self.check_online() { println!("Tcp ingestor shutting down."); return Ok(()); } - // TODO: Convert to use try_send(). - if let Err(e) = self.queue.send(ZingoProxyRequest::new_from_grpc(stream)).await { - // TODO:: Return queue full tonic status over tcpstream and close (that TcpStream..). - eprintln!("Failed to send connection: {}", e); + match self.queue.try_send(ZingoProxyRequest::new_from_grpc(stream)) { + Ok(_) => {} + Err(QueueError::QueueFull(_request)) => { + eprintln!("Queue Full."); + // TODO: Return queue full tonic status over tcpstream and close (that TcpStream..). + } + Err(e) => { + eprintln!("Queue Closed. Failed to send request to queue: {}", e); + // TODO: Handle queue closed error here. + } } } Err(e) => { - // TODO: Handle error here (count errors and restart ingestor / proxy or initiate shotdown?) eprintln!("Failed to accept connection with client: {}", e); - if !self.check_online() { - println!("Tcp ingestor shutting down."); - return Ok(()); - } + // TODO: Handle failed connection errors here (count errors and restart ingestor / proxy or initiate shotdown?) continue; } } @@ -115,7 +125,7 @@ pub struct NymIngestor { /// Nym Client ingestor: NymClient, /// Used to send requests to the queue. - queue: mpsc::Sender, + queue: QueueSender, /// Represents the Online status of the gRPC server. online: Arc, /// Current status of the ingestor. @@ -126,7 +136,7 @@ impl NymIngestor { /// Creates a Nym Ingestor pub async fn spawn( nym_conf_path: &str, - queue: mpsc::Sender, + queue: QueueSender, online: Arc, ) -> Result { let listener = NymClient::spawn(&format!("{}/ingestor", nym_conf_path)).await?; @@ -143,7 +153,7 @@ impl NymIngestor { tokio::task::spawn(async move { // NOTE: This interval may need to be reduced or removed / moved once scale testing begins. let mut interval = tokio::time::interval(tokio::time::Duration::from_millis(50)); - // TODO Check self.status and wait on server / node if on hold. + // TODO Check blockcache sync status and wait on server / node if on hold. self.status = IngestorStatus::Listening; loop { @@ -161,7 +171,7 @@ impl NymIngestor { println!("Nym ingestor shutting down."); return Ok(()); } - // NOTE / TODO: POC server checked for empty emssages here (if request.is_empty()..). Could be required here. + // NOTE / TODO: POC server checked for empty emssages here (if request.is_empty()). Could be required here... // TODO: Handle EmptyMessageError here. let request_vu8 = request .first() @@ -174,10 +184,16 @@ impl NymIngestor { // TODO: Handle RequestError here. let zingo_proxy_request = ZingoProxyRequest::new_from_nym(return_recipient, request_vu8.as_ref())?; - // TODO: Convert to use try_send(). - if let Err(e) = self.queue.send(zingo_proxy_request).await { - // TODO: Return queue full tonic status over nym mixnet. - eprintln!("Failed to send connection: {}", e); + match self.queue.try_send(zingo_proxy_request) { + Ok(_) => {} + Err(QueueError::QueueFull(_request)) => { + eprintln!("Queue Full."); + // TODO: Return queue full tonic status over mixnet. + } + Err(e) => { + eprintln!("Queue Closed. Failed to send request to queue: {}", e); + // TODO: Handle queue closed error here. + } } } None => { diff --git a/zingo-rpc/src/server/queue.rs b/zingo-rpc/src/server/queue.rs new file mode 100644 index 0000000..d615a56 --- /dev/null +++ b/zingo-rpc/src/server/queue.rs @@ -0,0 +1,149 @@ +//! Zingo-Indexer queue implementation. + +use crossbeam_channel::{bounded, Receiver, Sender}; +use std::sync::{ + atomic::{AtomicUsize, Ordering}, + Arc, +}; + +use crate::server::error::QueueError; + +/// Queue with max length. +#[derive(Debug, Clone)] +pub struct Queue { + /// Used to track current messages in the queue. + message_count: Arc, + /// Queue sender. + queue_tx: QueueSender, + /// Queue receiver. + queue_rx: QueueReceiver, +} + +impl Queue { + /// Creates a new queue with a maximum size. + pub fn new(max_length: usize) -> Self { + let (queue_tx, queue_rx) = bounded(max_length); + let message_count = Arc::new(AtomicUsize::new(0)); + + Queue { + message_count: message_count.clone(), + queue_tx: QueueSender { + inner: queue_tx, + message_count: message_count.clone(), + }, + queue_rx: QueueReceiver { + inner: queue_rx, + message_count, + }, + } + } + + /// Returns a queue transmitter. + pub fn tx(&self) -> QueueSender { + self.queue_tx.clone() + } + + /// Returns a queue receiver. + pub fn rx(&self) -> QueueReceiver { + self.queue_rx.clone() + } + + /// Returns the current length of the queue. + pub fn queue_length(&self) -> usize { + self.message_count.load(Ordering::SeqCst) + } +} + +/// Sends messages to a queue. +#[derive(Debug)] +pub struct QueueSender { + /// Crossbeam_Channel Sender. + inner: Sender, + /// Used to track current messages in the queue. + message_count: Arc, +} + +impl Clone for QueueSender { + fn clone(&self) -> Self { + Self { + inner: self.inner.clone(), + message_count: Arc::clone(&self.message_count), + } + } +} + +impl QueueSender { + /// Tries to add a request to the queue, updating the queue size. + pub fn try_send(&self, message: T) -> Result<(), QueueError> { + match self.inner.try_send(message) { + Ok(_) => { + self.message_count.fetch_add(1, Ordering::SeqCst); + Ok(()) + } + Err(crossbeam_channel::TrySendError::Full(t)) => Err(QueueError::QueueFull(t)), + Err(crossbeam_channel::TrySendError::Disconnected(_)) => Err(QueueError::QueueClosed), + } + } + + /// Returns the current length of the queue. + pub fn queue_length(&self) -> usize { + self.message_count.load(Ordering::SeqCst) + } +} + +/// Receives messages from a queue. +#[derive(Debug)] +pub struct QueueReceiver { + /// Crossbeam_Channel Receiver. + inner: Receiver, + /// Used to track current messages in the queue. + message_count: Arc, +} + +impl Clone for QueueReceiver { + fn clone(&self) -> Self { + Self { + inner: self.inner.clone(), + message_count: Arc::clone(&self.message_count), + } + } +} + +impl QueueReceiver { + /// Try to receive a request from the queue, updatig queue size. + pub fn try_recv(&self) -> Result> { + match self.inner.try_recv() { + Ok(message) => { + self.message_count.fetch_sub(1, Ordering::SeqCst); + Ok(message) + } + Err(crossbeam_channel::TryRecvError::Empty) => Err(QueueError::QueueEmpty), + Err(crossbeam_channel::TryRecvError::Disconnected) => Err(QueueError::QueueClosed), + } + } + + /// Listens indefinately for an incoming message on the queue. Returns message if received or error if queue is closed. + pub async fn listen(&self) -> Result> { + // NOTE: This interval may need to be reduced or removed / moved once scale testing begins. + let mut interval = tokio::time::interval(tokio::time::Duration::from_millis(50)); + loop { + match self.try_recv() { + Ok(message) => { + return Ok(message); + } + Err(QueueError::QueueEmpty) => { + interval.tick(); + continue; + } + Err(e) => { + return Err(e); + } + } + } + } + + /// Returns the current length of the queue. + pub fn queue_length(&self) -> usize { + self.message_count.load(Ordering::SeqCst) + } +} diff --git a/zingo-rpc/src/server/worker.rs b/zingo-rpc/src/server/worker.rs index f676695..7a6060a 100644 --- a/zingo-rpc/src/server/worker.rs +++ b/zingo-rpc/src/server/worker.rs @@ -94,8 +94,8 @@ pub struct Worker { queue: mpsc::Receiver, /// Used to requeue requests. _requeue: mpsc::Sender, - /// Used to send responses to the nym_responder. - nym_responder: mpsc::Sender<(Vec, AnonymousSenderTag)>, + /// Used to send responses to the nym_dispatcher. + nym_response_queue: mpsc::Sender<(Vec, AnonymousSenderTag)>, /// gRPC client used for processing requests received over http. grpc_client: GrpcClient, /// Workers current status. @@ -110,25 +110,25 @@ impl Worker { worker_id: usize, queue: mpsc::Receiver, _requeue: mpsc::Sender, - nym_responder: mpsc::Sender<(Vec, AnonymousSenderTag)>, + nym_response_queue: mpsc::Sender<(Vec, AnonymousSenderTag)>, lightwalletd_uri: Uri, zebrad_uri: Uri, online: Arc, - ) -> Result { + ) -> Self { let grpc_client = GrpcClient { lightwalletd_uri, zebrad_uri, online: online.clone(), }; - Ok(Worker { + Worker { worker_id, queue, _requeue, - nym_responder, + nym_response_queue, grpc_client, status: WorkerStatus::new(StatusType::Spawning), online, - }) + } } /// Starts queue worker service routine. @@ -172,7 +172,7 @@ impl Worker { .process_nym_request(&request) .await { Ok(response) => { - if let Err(e) = self.nym_responder.send((response, request.get_request().metadata())).await { + if let Err(e) = self.nym_response_queue.send((response, request.get_request().metadata())).await { // TODO:: Handle this error! eprintln!("Failed to send response to nym responder: {}", e); } diff --git a/zingo-rpc/src/server/workerpool.rs b/zingo-rpc/src/server/workerpool.rs new file mode 100644 index 0000000..ff6ce82 --- /dev/null +++ b/zingo-rpc/src/server/workerpool.rs @@ -0,0 +1,54 @@ +//! Holds the server workerpool implementation. + +use std::sync::{atomic::AtomicBool, Arc}; + +use crate::server::worker::Worker; + +/// Dynamically sized pool of workers. +pub struct WorkerPool { + /// Maximun number of concurrent workers allowed. + max_size: usize, + /// Minimum number of workers kept running on stanby. + idle_size: usize, + /// Workers currently in the pool + workers: Vec, + /// Represents the Online status of the WorkerPool. + pub online: Arc, +} + +impl WorkerPool { + // /// Creates a new worker pool with idle_workers in it. + // pub async fn spawn(max_size: usize, idle_size: usize, online: Arc) -> Self { + // let workers: Vec = Vec::with_capacity(max_size); + // for i in 0..idle_size { + // workers.push( + // Worker::spawn( + // i, + // queue, + // _requeue, + // nym_response_queue, + // lightwalletd_uri, + // zebrad_uri, + // online.clone(), + // ) + // .await, + // ); + // } + + // WorkerPool { + // max_size, + // idle_size, + // workers, + // online, + // } + // } + + // Sets workers in the worker pool to start servicing the queue. + // pub fn serve(&self) -> Vec>> {} + + // Adds a worker to the worker pool, returns error if the pool is already at max size. + // pub fn add_worker(&self) -> tokio::task::JoinHandle> {} + + // Checks workers on standby, closes workers that have been on standby for longer than 30s(may need to change). + // pub fn check_workers(&self) {} +} From 3f7c7c25ed910549cb341017378023a03a965992 Mon Sep 17 00:00:00 2001 From: idky137 Date: Wed, 7 Aug 2024 01:10:37 +0100 Subject: [PATCH 09/18] adding server --- zingo-rpc/src/server.rs | 335 ++++++++++++++++++++++++++- zingo-rpc/src/server/dispatcher.rs | 33 ++- zingo-rpc/src/server/error.rs | 32 +++ zingo-rpc/src/server/ingestor.rs | 73 +++--- zingo-rpc/src/server/queue.rs | 10 +- zingo-rpc/src/server/worker.rs | 357 +++++++++++++++++++++++------ zingo-rpc/src/server/workerpool.rs | 54 ----- 7 files changed, 719 insertions(+), 175 deletions(-) delete mode 100644 zingo-rpc/src/server/workerpool.rs diff --git a/zingo-rpc/src/server.rs b/zingo-rpc/src/server.rs index 86b4dcd..268b30d 100644 --- a/zingo-rpc/src/server.rs +++ b/zingo-rpc/src/server.rs @@ -1,7 +1,14 @@ //! Zingo-Proxy client server. +use http::Uri; use nym_sphinx_anonymous_replies::requests::AnonymousSenderTag; -use std::sync::{atomic::AtomicBool, Arc}; +use std::{ + net::SocketAddr, + sync::{ + atomic::{AtomicBool, Ordering}, + Arc, + }, +}; pub mod dispatcher; pub mod error; @@ -9,16 +16,39 @@ pub mod ingestor; pub mod queue; pub mod request; pub mod worker; -pub mod workerpool; use self::{ - dispatcher::NymDispatcher, - ingestor::{NymIngestor, TcpIngestor}, + dispatcher::{DispatcherStatus, NymDispatcher}, + error::{DispatcherError, IngestorError, ServerError, WorkerError}, + ingestor::{IngestorStatus, NymIngestor, TcpIngestor}, queue::Queue, request::ZingoProxyRequest, - workerpool::WorkerPool, + worker::{WorkerPool, WorkerStatusType}, }; +/// +#[derive(Debug, PartialEq, Clone)] +pub struct ServerStatus { + tcp_ingestor_status: IngestorStatus, + nym_ingestor_status: IngestorStatus, + nym_dispatcher_status: DispatcherStatus, + workerpool_status: (usize, usize, Vec), + request_queue_status: (usize, usize), + nym_response_queue_status: (usize, usize), +} +/// Status of the server. +#[derive(Debug, PartialEq, Clone)] +pub enum ServerStatusType { + /// Running initial startup routine. + Spawning(ServerStatus), + /// Processing incoming requests. + Active(ServerStatus), + /// Waiting for node / blockcache to sync. + Hold(ServerStatus), + /// Running shutdown routine. + Closing(ServerStatus), +} + /// LightWallet server capable of servicing clients over both http and nym. pub struct Server { /// Listens for incoming gRPC requests over HTTP. @@ -32,18 +62,299 @@ pub struct Server { /// Request queue. request_queue: Queue, /// Nym response queue. - nym_response_queue: Option, AnonymousSenderTag)>>, + nym_response_queue: Queue<(Vec, AnonymousSenderTag)>, + /// Servers current status. + status: ServerStatusType, /// Represents the Online status of the Server. pub online: Arc, } impl Server { - // Spawns a new server. - // pub fn Spawn() -> Self {} + /// Spawns a new server. + pub async fn spawn( + tcp_active: bool, + tcp_ingestor_listen_addr: SocketAddr, + nym_active: bool, + nym_conf_path: &str, + lightwalletd_uri: Uri, + zebrad_uri: Uri, + max_queue_size: usize, + max_worker_pool_size: usize, + idle_worker_pool_size: usize, + online: Arc, + ) -> Result { + let ( + request_queue, + nym_response_queue, + tcp_ingestor, + nym_ingestor, + nym_dispatcher, + worker_pool, + ) = match (tcp_active, nym_active) { + (false, false) => Err(ServerError::ServerConfigError( + "Cannot start server with no ingestors selected, at least one of nym or tcp must be set to active in conf.".to_string(), + )), + (false, true) => { + let request_queue = Queue::new(max_queue_size); + let nym_response_queue = Queue::new(max_queue_size); + let nym_ingestor = Some( + NymIngestor::spawn(nym_conf_path, request_queue.tx().clone(), online.clone()) + .await?, + ); + let nym_dispatcher = Some( + NymDispatcher::spawn( + nym_conf_path, + nym_response_queue.rx().clone(), + nym_response_queue.tx().clone(), + online.clone(), + ) + .await?, + ); + let worker_pool = WorkerPool::spawn( + max_worker_pool_size, + idle_worker_pool_size, + request_queue.rx().clone(), + request_queue.tx().clone(), + nym_response_queue.tx().clone(), + lightwalletd_uri, + zebrad_uri, + online.clone(), + ) + .await; + Ok(( + request_queue, + nym_response_queue, + None, + nym_ingestor, + nym_dispatcher, + worker_pool, + )) + } + (true, false) => { + let request_queue = Queue::new(max_queue_size); + let nym_response_queue = Queue::new(max_queue_size); + let tcp_ingestor = Some( + TcpIngestor::spawn( + tcp_ingestor_listen_addr, + request_queue.tx().clone(), + online.clone(), + ) + .await?, + ); + let worker_pool = WorkerPool::spawn( + max_worker_pool_size, + idle_worker_pool_size, + request_queue.rx().clone(), + request_queue.tx().clone(), + nym_response_queue.tx().clone(), + lightwalletd_uri, + zebrad_uri, + online.clone(), + ) + .await; + Ok(( + request_queue, + nym_response_queue, + tcp_ingestor, + None, + None, + worker_pool, + )) + } + (true, true) => { + let request_queue = Queue::new(max_queue_size); + let nym_response_queue = Queue::new(max_queue_size); + let tcp_ingestor = Some( + TcpIngestor::spawn( + tcp_ingestor_listen_addr, + request_queue.tx().clone(), + online.clone(), + ) + .await?, + ); + let nym_ingestor = Some( + NymIngestor::spawn(nym_conf_path, request_queue.tx().clone(), online.clone()) + .await?, + ); + let nym_dispatcher = Some( + NymDispatcher::spawn( + nym_conf_path, + nym_response_queue.rx().clone(), + nym_response_queue.tx().clone(), + online.clone(), + ) + .await?, + ); + let worker_pool = WorkerPool::spawn( + max_worker_pool_size, + idle_worker_pool_size, + request_queue.rx().clone(), + request_queue.tx().clone(), + nym_response_queue.tx().clone(), + lightwalletd_uri, + zebrad_uri, + online.clone(), + ) + .await; + Ok(( + request_queue, + nym_response_queue, + tcp_ingestor, + nym_ingestor, + nym_dispatcher, + worker_pool, + )) + } + }?; + let status = ServerStatusType::Spawning(ServerStatus { + tcp_ingestor_status: IngestorStatus::Inactive, + nym_ingestor_status: IngestorStatus::Inactive, + nym_dispatcher_status: DispatcherStatus::Inactive, + workerpool_status: ( + idle_worker_pool_size, + max_worker_pool_size, + vec![WorkerStatusType::Spawning; worker_pool.workers()], + ), + request_queue_status: (0, max_queue_size), + nym_response_queue_status: (0, max_queue_size), + }); + Ok(Server { + tcp_ingestor, + nym_ingestor, + nym_dispatcher, + worker_pool, + request_queue, + nym_response_queue, + status, + online, + }) + } + + /// Starts the server. + pub async fn serve(mut self) -> tokio::task::JoinHandle> { + tokio::task::spawn(async move { + // NOTE: This interval may need to be reduced or removed / moved once scale testing begins. + let mut interval = tokio::time::interval(tokio::time::Duration::from_millis(50)); + let mut nym_dispatcher_handle = None; + let mut nym_ingestor_handle = None; + let mut tcp_ingestor_handle = None; + let mut worker_handles; + if let Some(dispatcher) = self.nym_dispatcher { + nym_dispatcher_handle = Some(dispatcher.serve().await); + } + if let Some(ingestor) = self.nym_ingestor { + nym_ingestor_handle = Some(ingestor.serve().await); + } + if let Some(ingestor) = self.tcp_ingestor { + tcp_ingestor_handle = Some(ingestor.serve().await); + } + worker_handles = self.worker_pool.clone().serve().await; + loop { + if self.request_queue.queue_length() >= (self.request_queue.max_length() / 2) { + match self.worker_pool.push_worker().await { + Ok(handle) => { + worker_handles.push(handle); + } + Err(_e) => { + eprintln!("WorkerPool at capacity"); + } + } + } else { + let excess_workers: usize = if (self.worker_pool.workers() + - self.worker_pool.check_long_standby()) + < self.worker_pool.idle_size() + { + self.worker_pool.workers() - self.worker_pool.idle_size() + } else { + self.worker_pool.check_long_standby() + }; + for i in ((self.worker_pool.workers() - excess_workers) + ..self.worker_pool.workers()) + .rev() + { + let handle = worker_handles.remove(i); + match self.worker_pool.pop_worker(handle).await { + Ok(_) => {} + Err(e) => { + eprintln!("Failed to pop worker from pool: {}", e); + // TODO: Handle this error. + } + } + } + } + // self.check_statuses(); + // if self.check_for_shutdown().await { + // let worker_handle_options: Vec< + // Option>>, + // > = worker_handles.into_iter().map(Some).collect(); + // self.shutdown_components( + // tcp_ingestor_handle, + // nym_ingestor_handle, + // nym_dispatcher_handle, + // worker_handle_options, + // ) + // .await; + // return Ok(()); + // } + interval.tick().await; + } + }) + } + + /// Checks indexers online status and servers internal status for closure signal. + pub async fn check_for_shutdown(&self) -> bool { + if let ServerStatusType::Closing(_) = self.status { + return true; + } + if !self.check_online() { + return true; + } + return false; + } + + /// Sets the server's components to close gracefully. + async fn shutdown_components( + &mut self, + tcp_ingestor_handle: Option>>, + nym_ingestor_handle: Option>>, + nym_dispatcher_handle: Option>>, + mut worker_handles: Vec>>>, + ) { + if let Some(ingestor) = self.tcp_ingestor.as_mut() { + ingestor.shutdown().await; + if let Some(handle) = tcp_ingestor_handle { + handle.await.ok(); + } + } + if let Some(ingestor) = self.nym_ingestor.as_mut() { + ingestor.shutdown().await; + if let Some(handle) = nym_ingestor_handle { + handle.await.ok(); + } + } + self.worker_pool.shutdown(&mut worker_handles).await; + if let Some(dispatcher) = self.nym_dispatcher.as_mut() { + dispatcher.shutdown().await; + if let Some(handle) = nym_dispatcher_handle { + handle.await.ok(); + } + } + self.online + .store(false, std::sync::atomic::Ordering::SeqCst); + } + + /// Returns the status of the server and its parts, to be consumed by system printout. + pub async fn status(&self) -> ServerStatus { + todo!() + } - // Starts the server. - // pub fn Start(&self) {} + /// Checks status, handling errors. Returns ServerStatus. + pub async fn check_statuses(&self) -> ServerStatus { + todo!() + } - // Returns the status of the server and its parts, to be consumed by system printout. - // pub fn Status(&self) {} + /// Check the online status on the indexer. + fn check_online(&self) -> bool { + self.online.load(Ordering::SeqCst) + } } diff --git a/zingo-rpc/src/server/dispatcher.rs b/zingo-rpc/src/server/dispatcher.rs index b072cfc..8ec6283 100644 --- a/zingo-rpc/src/server/dispatcher.rs +++ b/zingo-rpc/src/server/dispatcher.rs @@ -6,7 +6,6 @@ use std::sync::{ atomic::{AtomicBool, Ordering}, Arc, }; -use tokio::sync::mpsc; use crate::{ nym::{client::NymClient, error::NymError}, @@ -15,12 +14,16 @@ use crate::{ }; /// Status of the worker. -#[derive(Debug, Clone)] +#[derive(Debug, PartialEq, Clone)] pub enum DispatcherStatus { /// On hold, due to blockcache / node error. Inactive, /// Listening for new requests. Listening, + /// Running shutdown routine. + Closing, + /// Offline. + Offline, } /// Sends gRPC responses over Nym Mixnet. @@ -65,14 +68,17 @@ impl NymDispatcher { loop { tokio::select! { _ = interval.tick() => { - if !self.check_online() { - println!("Nym dispatcher shutting down."); + if self.check_for_shutdown().await { return Ok(()); } } incoming = self.response_queue.listen() => { match incoming { Ok(response) => { + // NOTE: This may need to be removed / moved for scale use. + if self.check_for_shutdown().await { + return Ok(()); + } if let Err(nym_e) = self.dispatcher .client .send_reply(response.1, response.0.clone()) @@ -95,8 +101,8 @@ impl NymDispatcher { } } Err(_e) => { - //TODO: Handle this error here (return correct error type?) eprintln!("Response queue closed, nym dispatcher shutting down."); + //TODO: Handle this error here (return correct error type?) return Ok(()); // Return Err! } } @@ -106,9 +112,20 @@ impl NymDispatcher { }) } - /// Ends the dispatcher. - pub async fn shutdown(self) { - todo!() + /// Checks indexers online status and ingestors internal status for closure signal. + pub async fn check_for_shutdown(&self) -> bool { + if let DispatcherStatus::Closing = self.status { + return true; + } + if !self.check_online() { + return true; + } + return false; + } + + /// Sets the dispatcher to close gracefully. + pub async fn shutdown(&mut self) { + self.status = DispatcherStatus::Closing } /// Returns the dispatchers current status. diff --git a/zingo-rpc/src/server/error.rs b/zingo-rpc/src/server/error.rs index e5272f8..87cb096 100644 --- a/zingo-rpc/src/server/error.rs +++ b/zingo-rpc/src/server/error.rs @@ -64,4 +64,36 @@ pub enum WorkerError { /// Tonic transport error. #[error("Tonic transport error: {0}")] TonicTransportError(#[from] tonic::transport::Error), + /// Tokio join error. + #[error("Tokio join error: {0}")] + TokioJoinError(#[from] tokio::task::JoinError), + /// Worker Pool Full. + #[error("Worker Pool Full")] + WorkerPoolFull, + /// Worker Pool at idle. + #[error("Worker Pool a idle")] + WorkerPoolIdle, +} + +/// Zingo-Proxy server errors. +#[derive(Debug, thiserror::Error)] +pub enum ServerError { + /// Request based errors. + #[error("Request error: {0}")] + RequestError(#[from] RequestError), + /// Nym based errors. + #[error("Nym error: {0}")] + NymError(#[from] NymError), + /// Ingestor based errors. + #[error("Ingestor error: {0}")] + IngestorError(#[from] IngestorError), + /// Dispatcher based errors. + #[error("Dispatcher error: {0}")] + DispatcherError(#[from] DispatcherError), + /// Worker based errors. + #[error("Worker error: {0}")] + WorkerError(#[from] WorkerError), + /// Server configuration errors. + #[error("Server configuration error: {0}")] + ServerConfigError(String), } diff --git a/zingo-rpc/src/server/ingestor.rs b/zingo-rpc/src/server/ingestor.rs index 4436ef2..961d24e 100644 --- a/zingo-rpc/src/server/ingestor.rs +++ b/zingo-rpc/src/server/ingestor.rs @@ -19,12 +19,16 @@ use crate::{ }; /// Status of the worker. -#[derive(Debug, Clone)] +#[derive(Debug, PartialEq, Clone)] pub enum IngestorStatus { /// On hold, due to blockcache / node error. Inactive, /// Listening for new requests. Listening, + /// Running shutdown routine. + Closing, + /// Offline. + Offline, } /// Listens for incoming gRPC requests over HTTP. @@ -56,7 +60,7 @@ impl TcpIngestor { } /// Starts Tcp service. - pub fn serve(mut self) -> tokio::task::JoinHandle> { + pub async fn serve(mut self) -> tokio::task::JoinHandle> { tokio::task::spawn(async move { // NOTE: This interval may need to be changed or removed / moved once scale testing begins. let mut interval = tokio::time::interval(tokio::time::Duration::from_millis(50)); @@ -65,22 +69,17 @@ impl TcpIngestor { loop { tokio::select! { _ = interval.tick() => { - if !self.check_online() { - println!("Tcp ingestor shutting down."); + if self.check_for_shutdown().await { return Ok(()); } } incoming = self.ingestor.accept() => { - if !self.check_online() { - println!("Tcp ingestor shutting down."); + // NOTE: This may need to be removed / moved for scale use. + if self.check_for_shutdown().await { return Ok(()); } match incoming { Ok((stream, _)) => { - if !self.check_online() { - println!("Tcp ingestor shutting down."); - return Ok(()); - } match self.queue.try_send(ZingoProxyRequest::new_from_grpc(stream)) { Ok(_) => {} Err(QueueError::QueueFull(_request)) => { @@ -96,7 +95,6 @@ impl TcpIngestor { Err(e) => { eprintln!("Failed to accept connection with client: {}", e); // TODO: Handle failed connection errors here (count errors and restart ingestor / proxy or initiate shotdown?) - continue; } } } @@ -105,9 +103,20 @@ impl TcpIngestor { }) } - /// Ends the ingestor. - pub async fn shutdown(self) { - todo!() + /// Checks indexers online status and ingestors internal status for closure signal. + pub async fn check_for_shutdown(&self) -> bool { + if let IngestorStatus::Closing = self.status { + return true; + } + if !self.check_online() { + return true; + } + return false; + } + + /// Sets the ingestor to close gracefully. + pub async fn shutdown(&mut self) { + self.status = IngestorStatus::Closing } /// Returns the ingestor current status. @@ -159,18 +168,17 @@ impl NymIngestor { loop { tokio::select! { _ = interval.tick() => { - if !self.check_online() { - println!("Nym ingestor shutting down."); - return Ok(()); + if self.check_for_shutdown().await { + return Ok(()) } } incoming = self.ingestor.client.wait_for_messages() => { + // NOTE: This may need to be removed /moved for scale use. + if self.check_for_shutdown().await { + return Ok(()) + } match incoming { Some(request) => { - if !self.check_online() { - println!("Nym ingestor shutting down."); - return Ok(()); - } // NOTE / TODO: POC server checked for empty emssages here (if request.is_empty()). Could be required here... // TODO: Handle EmptyMessageError here. let request_vu8 = request @@ -197,12 +205,8 @@ impl NymIngestor { } } None => { - // TODO: Error in nym client, handle error here (restart ingestor?). eprintln!("Failed to receive message from Nym network."); - if !self.online.load(Ordering::SeqCst) { - println!("Nym ingestor shutting down."); - return Ok(()); - } + // TODO: Error in nym client, handle error here (restart ingestor?). } } } @@ -211,9 +215,20 @@ impl NymIngestor { }) } - /// Ends the ingestor. - pub async fn shutdown(self) { - todo!() + /// Checks indexers online status and ingestors internal status for closure signal. + pub async fn check_for_shutdown(&self) -> bool { + if let IngestorStatus::Closing = self.status { + return true; + } + if !self.check_online() { + return true; + } + return false; + } + + /// Sets the ingestor to close gracefully. + pub async fn shutdown(&mut self) { + self.status = IngestorStatus::Closing } /// Returns the ingestor current status. diff --git a/zingo-rpc/src/server/queue.rs b/zingo-rpc/src/server/queue.rs index d615a56..641500d 100644 --- a/zingo-rpc/src/server/queue.rs +++ b/zingo-rpc/src/server/queue.rs @@ -11,6 +11,8 @@ use crate::server::error::QueueError; /// Queue with max length. #[derive(Debug, Clone)] pub struct Queue { + /// Max number of messages allowed in the queue. + max_length: usize, /// Used to track current messages in the queue. message_count: Arc, /// Queue sender. @@ -26,6 +28,7 @@ impl Queue { let message_count = Arc::new(AtomicUsize::new(0)); Queue { + max_length, message_count: message_count.clone(), queue_tx: QueueSender { inner: queue_tx, @@ -48,6 +51,11 @@ impl Queue { self.queue_rx.clone() } + /// Returns the max length of the queue. + pub fn max_length(&self) -> usize { + self.max_length + } + /// Returns the current length of the queue. pub fn queue_length(&self) -> usize { self.message_count.load(Ordering::SeqCst) @@ -132,7 +140,7 @@ impl QueueReceiver { return Ok(message); } Err(QueueError::QueueEmpty) => { - interval.tick(); + interval.tick().await; continue; } Err(e) => { diff --git a/zingo-rpc/src/server/worker.rs b/zingo-rpc/src/server/worker.rs index 7a6060a..ab8037e 100644 --- a/zingo-rpc/src/server/worker.rs +++ b/zingo-rpc/src/server/worker.rs @@ -7,15 +7,16 @@ use std::sync::{ use http::Uri; use nym_sphinx_anonymous_replies::requests::AnonymousSenderTag; -use tokio::{ - sync::mpsc, - time::{Duration, Instant}, -}; +use tokio::time::{Duration, Instant}; use tonic::transport::Server; use crate::{ rpc::GrpcClient, - server::{error::WorkerError, request::ZingoProxyRequest}, + server::{ + error::{QueueError, WorkerError}, + queue::{QueueReceiver, QueueSender}, + request::ZingoProxyRequest, + }, }; #[cfg(not(feature = "nym_poc"))] @@ -25,8 +26,8 @@ use crate::proto::service::compact_tx_streamer_server::CompactTxStreamerServer; use zcash_client_backend::proto::service::compact_tx_streamer_server::CompactTxStreamerServer; /// Status of the worker. -#[derive(Debug, Clone, Copy)] -pub enum StatusType { +#[derive(Debug, PartialEq, Clone, Copy)] +pub enum WorkerStatusType { /// Running initial startup routine. Spawning, /// Processing requests from the queue. @@ -52,32 +53,32 @@ pub enum WorkerStatus { impl WorkerStatus { /// Create a new status with the current timestamp. - pub fn new(status: StatusType) -> WorkerStatus { + pub fn new(status: WorkerStatusType) -> WorkerStatus { match status { - StatusType::Spawning => WorkerStatus::Spawning(Instant::now()), - StatusType::Working => WorkerStatus::Working(Instant::now()), - StatusType::Standby => WorkerStatus::Standby(Instant::now()), - StatusType::Closing => WorkerStatus::Closing(Instant::now()), + WorkerStatusType::Spawning => WorkerStatus::Spawning(Instant::now()), + WorkerStatusType::Working => WorkerStatus::Working(Instant::now()), + WorkerStatusType::Standby => WorkerStatus::Standby(Instant::now()), + WorkerStatusType::Closing => WorkerStatus::Closing(Instant::now()), } } /// Return the current status type and the duration the worker has been in this status. - pub fn status(&self) -> (StatusType, Duration) { + pub fn status(&self) -> (WorkerStatusType, Duration) { match self { - WorkerStatus::Spawning(timestamp) => (StatusType::Spawning, timestamp.elapsed()), - WorkerStatus::Working(timestamp) => (StatusType::Working, timestamp.elapsed()), - WorkerStatus::Standby(timestamp) => (StatusType::Standby, timestamp.elapsed()), - WorkerStatus::Closing(timestamp) => (StatusType::Closing, timestamp.elapsed()), + WorkerStatus::Spawning(timestamp) => (WorkerStatusType::Spawning, timestamp.elapsed()), + WorkerStatus::Working(timestamp) => (WorkerStatusType::Working, timestamp.elapsed()), + WorkerStatus::Standby(timestamp) => (WorkerStatusType::Standby, timestamp.elapsed()), + WorkerStatus::Closing(timestamp) => (WorkerStatusType::Closing, timestamp.elapsed()), } } /// Update the status to a new one, resetting the timestamp. - pub fn set(&mut self, new_status: StatusType) { + pub fn set(&mut self, new_status: WorkerStatusType) { *self = match new_status { - StatusType::Spawning => WorkerStatus::Spawning(Instant::now()), - StatusType::Working => WorkerStatus::Working(Instant::now()), - StatusType::Standby => WorkerStatus::Standby(Instant::now()), - StatusType::Closing => WorkerStatus::Closing(Instant::now()), + WorkerStatusType::Spawning => WorkerStatus::Spawning(Instant::now()), + WorkerStatusType::Working => WorkerStatus::Working(Instant::now()), + WorkerStatusType::Standby => WorkerStatus::Standby(Instant::now()), + WorkerStatusType::Closing => WorkerStatus::Closing(Instant::now()), } } } @@ -86,16 +87,16 @@ impl WorkerStatus { /// /// TODO: - Add JsonRpcConnector to worker and pass to underlying RPC services. /// - Currently a new JsonRpcConnector is spawned for every new RPC serviced. -#[derive(Debug)] +#[derive(Debug, Clone)] pub struct Worker { /// Worker ID. worker_id: usize, /// Used to pop requests from the queue - queue: mpsc::Receiver, + queue: QueueReceiver, /// Used to requeue requests. - _requeue: mpsc::Sender, + requeue: QueueSender, /// Used to send responses to the nym_dispatcher. - nym_response_queue: mpsc::Sender<(Vec, AnonymousSenderTag)>, + nym_response_queue: QueueSender<(Vec, AnonymousSenderTag)>, /// gRPC client used for processing requests received over http. grpc_client: GrpcClient, /// Workers current status. @@ -108,9 +109,9 @@ impl Worker { /// Creates a new queue worker. pub async fn spawn( worker_id: usize, - queue: mpsc::Receiver, - _requeue: mpsc::Sender, - nym_response_queue: mpsc::Sender<(Vec, AnonymousSenderTag)>, + queue: QueueReceiver, + requeue: QueueSender, + nym_response_queue: QueueSender<(Vec, AnonymousSenderTag)>, lightwalletd_uri: Uri, zebrad_uri: Uri, online: Arc, @@ -123,10 +124,10 @@ impl Worker { Worker { worker_id, queue, - _requeue, + requeue, nym_response_queue, grpc_client, - status: WorkerStatus::new(StatusType::Spawning), + status: WorkerStatus::new(WorkerStatusType::Spawning), online, } } @@ -140,67 +141,106 @@ impl Worker { let mut interval = tokio::time::interval(tokio::time::Duration::from_millis(50)); let svc = CompactTxStreamerServer::new(self.grpc_client.clone()); // TODO: create tonic server here for use within loop. - self.status.set(StatusType::Standby); + self.status.set(WorkerStatusType::Standby); loop { tokio::select! { _ = interval.tick() => { - if !self.check_online() { - self.status.set(StatusType::Closing); - println!("Worker shutting down."); + if self.check_for_shutdown().await { return Ok(()); } } - incoming = self.queue.recv() => { - if !self.check_online() { - self.status.set(StatusType::Closing); - println!("worker shutting down."); - return Ok(()); - } - self.status.set(StatusType::Working); + incoming = self.queue.listen() => { match incoming { - Some(ZingoProxyRequest::TcpServerRequest(request)) => { - Server::builder().add_service(svc.clone()) - .serve_with_incoming( async_stream::stream! { - yield Ok::<_, std::io::Error>( - request.get_request().get_stream() - ); - }) - .await?; - } - Some(ZingoProxyRequest::NymServerRequest(request)) => { - match self.grpc_client - .process_nym_request(&request) - .await { - Ok(response) => { - if let Err(e) = self.nym_response_queue.send((response, request.get_request().metadata())).await { - // TODO:: Handle this error! - eprintln!("Failed to send response to nym responder: {}", e); + Ok(request) => { + // NOTE: This may need to be removed / moved for scale use (possible it should be moved to after the request is serviced?). + if self.check_for_shutdown().await { + match self.requeue.try_send(request) { + Ok(_) => { + return Ok(()); + } + Err(QueueError::QueueFull(_request)) => { + eprintln!("Request Queue Full. Failed to send response to queue.\nWorker shutting down."); + // TODO: Handle this error! (cancel shutdown?). + return Ok(()); + } + Err(e) => { + self.status.set(WorkerStatusType::Closing); + eprintln!("Request Queue Closed. Failed to send response to queue: {}\nWorker shutting down.", e); + // TODO: Handle queue closed error here. (return correct error?) + return Ok(()); } } - Err(e) => { - // TODO:: Handle this error! - eprintln!("Failed to process nym received request: {}", e); + } else { + self.status.set(WorkerStatusType::Working); + match request { + ZingoProxyRequest::TcpServerRequest(request) => { + Server::builder().add_service(svc.clone()) + .serve_with_incoming( async_stream::stream! { + yield Ok::<_, std::io::Error>( + request.get_request().get_stream() + ); + } + ) + .await?; + } + ZingoProxyRequest::NymServerRequest(request) => { + match self.grpc_client + .process_nym_request(&request) + .await { + Ok(response) => { + match self.nym_response_queue.try_send((response, request.get_request().metadata())) { + Ok(_) => {} + Err(QueueError::QueueFull(_request)) => { + eprintln!("Response Queue Full."); + // TODO: Handle this error! (open second nym responder?). + } + Err(e) => { + self.status.set(WorkerStatusType::Closing); + eprintln!("Response Queue Closed. Failed to send response to queue: {}\nWorker shutting down.", e); + // TODO: Handle queue closed error here. (return correct error?) + return Ok(()); + } + } + } + Err(e) => { + eprintln!("Failed to process nym received request: {}", e); + // TODO:: Handle this error! - } + } + } + } + } + self.status.set(WorkerStatusType::Standby); } } - None => { - self.status.set(StatusType::Closing); - println!("Queue closed, worker shutting down."); + Err(_e) => { + self.status.set(WorkerStatusType::Closing); + eprintln!("Queue closed, worker shutting down."); + // TODO: Handle queue closed error here. (return correct error?) return Ok(()); } } - self.status.set(StatusType::Standby); } } } }) } - /// Ends the worker. - pub async fn shutdown(self) { - todo!() + /// Checks indexers online status and workers internal status for closure signal. + pub async fn check_for_shutdown(&self) -> bool { + if let WorkerStatus::Closing(_) = self.status { + return true; + } + if !self.check_online() { + return true; + } + return false; + } + + /// Sets the worker to close gracefully. + pub async fn shutdown(&mut self) { + self.status.set(WorkerStatusType::Closing) } /// Returns the worker's ID. @@ -209,7 +249,7 @@ impl Worker { } /// Returns the workers current status. - pub fn status(&self) -> (StatusType, Duration) { + pub fn status(&self) -> (WorkerStatusType, Duration) { self.status.status() } @@ -218,3 +258,178 @@ impl Worker { self.online.load(Ordering::SeqCst) } } + +/// Dynamically sized pool of workers. +#[derive(Debug, Clone)] +pub struct WorkerPool { + /// Maximun number of concurrent workers allowed. + max_size: usize, + /// Minimum number of workers kept running on stanby. + idle_size: usize, + /// Workers currently in the pool + workers: Vec, + /// Represents the Online status of the WorkerPool. + pub online: Arc, +} + +impl WorkerPool { + /// Creates a new worker pool containing [idle_workers] workers. + pub async fn spawn( + max_size: usize, + idle_size: usize, + queue: QueueReceiver, + _requeue: QueueSender, + nym_response_queue: QueueSender<(Vec, AnonymousSenderTag)>, + lightwalletd_uri: Uri, + zebrad_uri: Uri, + online: Arc, + ) -> Self { + let mut workers: Vec = Vec::with_capacity(max_size); + for _ in 0..idle_size { + workers.push( + Worker::spawn( + workers.len(), + queue.clone(), + _requeue.clone(), + nym_response_queue.clone(), + lightwalletd_uri.clone(), + zebrad_uri.clone(), + online.clone(), + ) + .await, + ); + } + + WorkerPool { + max_size, + idle_size, + workers, + online, + } + } + + /// Sets workers in the worker pool to start servicing the queue. + pub async fn serve(self) -> Vec>> { + let mut worker_handles = Vec::new(); + for worker in self.workers { + worker_handles.push(worker.serve().await); + } + worker_handles + } + + /// Adds a worker to the worker pool, returns error if the pool is already at max size. + pub async fn push_worker( + &mut self, + ) -> Result>, WorkerError> { + if self.workers.len() >= self.max_size { + Err(WorkerError::WorkerPoolFull) + } else { + self.workers.push( + Worker::spawn( + self.workers.len(), + self.workers[0].queue.clone(), + self.workers[0].requeue.clone(), + self.workers[0].nym_response_queue.clone(), + self.workers[0].grpc_client.lightwalletd_uri.clone(), + self.workers[0].grpc_client.zebrad_uri.clone(), + self.online.clone(), + ) + .await, + ); + Ok(self.workers[self.workers.len()].clone().serve().await) + } + } + + /// Removes a worker from the worker pool, returns error if the pool is already at idle size. + pub async fn pop_worker( + &mut self, + worker_handle: tokio::task::JoinHandle>, + ) -> Result<(), WorkerError> { + if self.workers.len() <= self.idle_size { + Err(WorkerError::WorkerPoolIdle) + } else { + let worker_index = self.workers.len() - 1; + self.workers[worker_index].shutdown().await; + match worker_handle.await { + Ok(worker) => match worker { + Ok(()) => { + self.workers.pop(); + return Ok(()); + } + Err(e) => { + eprintln!("Worker returned error on shutdown: {}", e); + // TODO: Handle the inner WorkerError + return Ok(()); + } + }, + Err(e) => { + eprintln!("Worker returned error on shutdown: {}", e); + // TODO: Handle the JoinError + return Ok(()); + } + }; + } + } + + /// Returns the max size of the pool + pub fn max_size(&self) -> usize { + self.max_size + } + + /// Returns the idle size of the pool + pub fn idle_size(&self) -> usize { + self.idle_size + } + + /// Returns the current number of workers in the pool. + pub fn workers(&self) -> usize { + self.workers.len() + } + + /// Returns the statuses of all the workers in the workerpool. + pub fn statuses(&self) -> Vec<(WorkerStatusType, Duration)> { + let mut worker_statuses = Vec::new(); + for i in 0..self.workers.len() { + worker_statuses.push(self.workers[i].status()) + } + worker_statuses + } + + /// Returns the number of workers in Standby mode for 30 seconds or longer. + pub fn check_long_standby(&self) -> usize { + let statuses = self.statuses(); + statuses + .iter() + .filter(|(status, duration)| { + *status == WorkerStatusType::Standby && *duration >= Duration::from_secs(30) + }) + .count() + } + + /// Shuts down all the workers in the pool. + pub async fn shutdown( + &mut self, + worker_handles: &mut Vec>>>, + ) { + for i in (0..self.workers.len()).rev() { + self.workers[i].shutdown().await; + if let Some(worker_handle) = worker_handles[i].take() { + match worker_handle.await { + Ok(worker) => match worker { + Ok(()) => { + self.workers.pop(); + } + Err(e) => { + eprintln!("Worker returned error on shutdown: {}", e); + // TODO: Handle the inner WorkerError + } + }, + Err(e) => { + eprintln!("Worker returned error on shutdown: {}", e); + // TODO: Handle the JoinError + } + }; + } + } + } +} diff --git a/zingo-rpc/src/server/workerpool.rs b/zingo-rpc/src/server/workerpool.rs deleted file mode 100644 index ff6ce82..0000000 --- a/zingo-rpc/src/server/workerpool.rs +++ /dev/null @@ -1,54 +0,0 @@ -//! Holds the server workerpool implementation. - -use std::sync::{atomic::AtomicBool, Arc}; - -use crate::server::worker::Worker; - -/// Dynamically sized pool of workers. -pub struct WorkerPool { - /// Maximun number of concurrent workers allowed. - max_size: usize, - /// Minimum number of workers kept running on stanby. - idle_size: usize, - /// Workers currently in the pool - workers: Vec, - /// Represents the Online status of the WorkerPool. - pub online: Arc, -} - -impl WorkerPool { - // /// Creates a new worker pool with idle_workers in it. - // pub async fn spawn(max_size: usize, idle_size: usize, online: Arc) -> Self { - // let workers: Vec = Vec::with_capacity(max_size); - // for i in 0..idle_size { - // workers.push( - // Worker::spawn( - // i, - // queue, - // _requeue, - // nym_response_queue, - // lightwalletd_uri, - // zebrad_uri, - // online.clone(), - // ) - // .await, - // ); - // } - - // WorkerPool { - // max_size, - // idle_size, - // workers, - // online, - // } - // } - - // Sets workers in the worker pool to start servicing the queue. - // pub fn serve(&self) -> Vec>> {} - - // Adds a worker to the worker pool, returns error if the pool is already at max size. - // pub fn add_worker(&self) -> tokio::task::JoinHandle> {} - - // Checks workers on standby, closes workers that have been on standby for longer than 30s(may need to change). - // pub fn check_workers(&self) {} -} From ad01cc4cf35842dd7b7568010a0cf41a4798ebb8 Mon Sep 17 00:00:00 2001 From: idky137 Date: Thu, 8 Aug 2024 18:13:22 +0100 Subject: [PATCH 10/18] implemented Server --- zingo-rpc/src/server.rs | 452 +++++++++++++++-------------- zingo-rpc/src/server/dispatcher.rs | 53 ++-- zingo-rpc/src/server/ingestor.rs | 74 ++--- zingo-rpc/src/server/queue.rs | 31 +- zingo-rpc/src/server/worker.rs | 156 ++++------ 5 files changed, 382 insertions(+), 384 deletions(-) diff --git a/zingo-rpc/src/server.rs b/zingo-rpc/src/server.rs index 268b30d..c9591ac 100644 --- a/zingo-rpc/src/server.rs +++ b/zingo-rpc/src/server.rs @@ -5,7 +5,7 @@ use nym_sphinx_anonymous_replies::requests::AnonymousSenderTag; use std::{ net::SocketAddr, sync::{ - atomic::{AtomicBool, Ordering}, + atomic::{AtomicBool, AtomicUsize, Ordering}, Arc, }, }; @@ -18,35 +18,99 @@ pub mod request; pub mod worker; use self::{ - dispatcher::{DispatcherStatus, NymDispatcher}, + dispatcher::NymDispatcher, error::{DispatcherError, IngestorError, ServerError, WorkerError}, - ingestor::{IngestorStatus, NymIngestor, TcpIngestor}, + ingestor::{NymIngestor, TcpIngestor}, queue::Queue, request::ZingoProxyRequest, - worker::{WorkerPool, WorkerStatusType}, + worker::{WorkerPool, WorkerPoolStatus}, }; -/// -#[derive(Debug, PartialEq, Clone)] -pub struct ServerStatus { - tcp_ingestor_status: IngestorStatus, - nym_ingestor_status: IngestorStatus, - nym_dispatcher_status: DispatcherStatus, - workerpool_status: (usize, usize, Vec), - request_queue_status: (usize, usize), - nym_response_queue_status: (usize, usize), +/// Holds a thread safe reperesentation of a StatusType. +/// Possible values: +/// - [0: Spawning] +/// - [1: Listening] +/// - [2: Working] +/// - [3: Inactive] +/// - [4: Closing]. +/// - [>=5: Offline]. +/// - [>=6: Error]. +/// TODO: Define error code spec. +#[derive(Debug, Clone)] +pub struct AtomicStatus(Arc); + +impl AtomicStatus { + /// Creates a new AtomicStatus + pub fn new(status: usize) -> Self { + Self(Arc::new(AtomicUsize::new(status))) + } + + /// Loads the value held in the AtomicStatus + pub fn load(&self) -> usize { + self.0.load(Ordering::SeqCst) + } + + /// Sets the value held in the AtomicStatus + pub fn store(&self, status: usize) { + self.0.store(status, Ordering::SeqCst); + } } + /// Status of the server. #[derive(Debug, PartialEq, Clone)] -pub enum ServerStatusType { +pub enum StatusType { /// Running initial startup routine. - Spawning(ServerStatus), - /// Processing incoming requests. - Active(ServerStatus), - /// Waiting for node / blockcache to sync. - Hold(ServerStatus), + Spawning = 0, + /// Waiting for requests from the queue. + Listening = 1, + /// Processing requests from the queue.StatusType + Working = 2, + /// On hold, due to blockcache / node error. + Inactive = 3, /// Running shutdown routine. - Closing(ServerStatus), + Closing = 4, + /// Offline. + Offline = 5, + /// Offline. + Error = 6, +} + +impl From for StatusType { + fn from(value: usize) -> Self { + match value { + 0 => StatusType::Spawning, + 1 => StatusType::Listening, + 2 => StatusType::Working, + 3 => StatusType::Inactive, + 4 => StatusType::Closing, + 5 => StatusType::Offline, + _ => StatusType::Error, + } + } +} + +impl From for usize { + fn from(status: StatusType) -> Self { + status as usize + } +} + +impl From for StatusType { + fn from(status: AtomicStatus) -> Self { + status.load().into() + } +} + +/// Holds the status of the server and all its components. +#[derive(Debug, Clone)] +pub struct ServerStatus { + server_status: AtomicStatus, + tcp_ingestor_status: AtomicStatus, + nym_ingestor_status: AtomicStatus, + nym_dispatcher_status: AtomicStatus, + workerpool_status: WorkerPoolStatus, + request_queue_status: Arc, + nym_response_queue_status: Arc, } /// LightWallet server capable of servicing clients over both http and nym. @@ -64,160 +128,104 @@ pub struct Server { /// Nym response queue. nym_response_queue: Queue<(Vec, AnonymousSenderTag)>, /// Servers current status. - status: ServerStatusType, + status: ServerStatus, /// Represents the Online status of the Server. pub online: Arc, } impl Server { - /// Spawns a new server. + /// Spawns a new Server. pub async fn spawn( tcp_active: bool, - tcp_ingestor_listen_addr: SocketAddr, + tcp_ingestor_listen_addr: Option, nym_active: bool, - nym_conf_path: &str, + nym_conf_path: Option<&str>, lightwalletd_uri: Uri, zebrad_uri: Uri, max_queue_size: usize, max_worker_pool_size: usize, idle_worker_pool_size: usize, + status: ServerStatus, online: Arc, ) -> Result { - let ( - request_queue, - nym_response_queue, - tcp_ingestor, - nym_ingestor, - nym_dispatcher, - worker_pool, - ) = match (tcp_active, nym_active) { - (false, false) => Err(ServerError::ServerConfigError( - "Cannot start server with no ingestors selected, at least one of nym or tcp must be set to active in conf.".to_string(), - )), - (false, true) => { - let request_queue = Queue::new(max_queue_size); - let nym_response_queue = Queue::new(max_queue_size); - let nym_ingestor = Some( - NymIngestor::spawn(nym_conf_path, request_queue.tx().clone(), online.clone()) - .await?, - ); - let nym_dispatcher = Some( - NymDispatcher::spawn( - nym_conf_path, - nym_response_queue.rx().clone(), - nym_response_queue.tx().clone(), - online.clone(), - ) - .await?, - ); - let worker_pool = WorkerPool::spawn( - max_worker_pool_size, - idle_worker_pool_size, - request_queue.rx().clone(), + if !(tcp_active && nym_active) { + return Err(ServerError::ServerConfigError( + "Cannot start server with no ingestors selected, at least one of either nym or tcp must be set to active in conf.".to_string(), + )); + } + if tcp_active && tcp_ingestor_listen_addr.is_none() { + return Err(ServerError::ServerConfigError( + "TCP is active but no address provided.".to_string(), + )); + } + if nym_active && nym_conf_path.is_none() { + return Err(ServerError::ServerConfigError( + "NYM is active but no conf path provided.".to_string(), + )); + } + status.server_status.store(0); + let request_queue: Queue = + Queue::new(max_queue_size, status.request_queue_status.clone()); + status.request_queue_status.store(0, Ordering::SeqCst); + let nym_response_queue: Queue<(Vec, AnonymousSenderTag)> = + Queue::new(max_queue_size, status.nym_response_queue_status.clone()); + status.nym_response_queue_status.store(0, Ordering::SeqCst); + let tcp_ingestor = if tcp_active { + Some( + TcpIngestor::spawn( + tcp_ingestor_listen_addr.expect( + "tcp_ingestor_listen_addr returned none when used, after checks made.", + ), request_queue.tx().clone(), - nym_response_queue.tx().clone(), - lightwalletd_uri, - zebrad_uri, + status.tcp_ingestor_status.clone(), online.clone(), ) - .await; - Ok(( - request_queue, - nym_response_queue, - None, - nym_ingestor, - nym_dispatcher, - worker_pool, - )) - } - (true, false) => { - let request_queue = Queue::new(max_queue_size); - let nym_response_queue = Queue::new(max_queue_size); - let tcp_ingestor = Some( - TcpIngestor::spawn( - tcp_ingestor_listen_addr, - request_queue.tx().clone(), - online.clone(), - ) - .await?, - ); - let worker_pool = WorkerPool::spawn( - max_worker_pool_size, - idle_worker_pool_size, - request_queue.rx().clone(), + .await?, + ) + } else { + None + }; + let nym_ingestor = if nym_active { + Some( + NymIngestor::spawn( + nym_conf_path + .expect("nym_conf_path returned none when used, after checks made."), request_queue.tx().clone(), - nym_response_queue.tx().clone(), - lightwalletd_uri, - zebrad_uri, + status.nym_ingestor_status.clone(), online.clone(), ) - .await; - Ok(( - request_queue, - nym_response_queue, - tcp_ingestor, - None, - None, - worker_pool, - )) - } - (true, true) => { - let request_queue = Queue::new(max_queue_size); - let nym_response_queue = Queue::new(max_queue_size); - let tcp_ingestor = Some( - TcpIngestor::spawn( - tcp_ingestor_listen_addr, - request_queue.tx().clone(), - online.clone(), - ) - .await?, - ); - let nym_ingestor = Some( - NymIngestor::spawn(nym_conf_path, request_queue.tx().clone(), online.clone()) - .await?, - ); - let nym_dispatcher = Some( - NymDispatcher::spawn( - nym_conf_path, - nym_response_queue.rx().clone(), - nym_response_queue.tx().clone(), - online.clone(), - ) - .await?, - ); - let worker_pool = WorkerPool::spawn( - max_worker_pool_size, - idle_worker_pool_size, - request_queue.rx().clone(), - request_queue.tx().clone(), + .await?, + ) + } else { + None + }; + let nym_dispatcher = if nym_active { + Some( + NymDispatcher::spawn( + nym_conf_path + .expect("nym_conf_path returned none when used, after checks made."), + nym_response_queue.rx().clone(), nym_response_queue.tx().clone(), - lightwalletd_uri, - zebrad_uri, + status.nym_dispatcher_status.clone(), online.clone(), ) - .await; - Ok(( - request_queue, - nym_response_queue, - tcp_ingestor, - nym_ingestor, - nym_dispatcher, - worker_pool, - )) - } - }?; - let status = ServerStatusType::Spawning(ServerStatus { - tcp_ingestor_status: IngestorStatus::Inactive, - nym_ingestor_status: IngestorStatus::Inactive, - nym_dispatcher_status: DispatcherStatus::Inactive, - workerpool_status: ( - idle_worker_pool_size, - max_worker_pool_size, - vec![WorkerStatusType::Spawning; worker_pool.workers()], - ), - request_queue_status: (0, max_queue_size), - nym_response_queue_status: (0, max_queue_size), - }); + .await?, + ) + } else { + None + }; + let worker_pool = WorkerPool::spawn( + max_worker_pool_size, + idle_worker_pool_size, + request_queue.rx().clone(), + request_queue.tx().clone(), + nym_response_queue.tx().clone(), + lightwalletd_uri, + zebrad_uri, + status.workerpool_status.clone(), + online.clone(), + ) + .await; Ok(Server { tcp_ingestor, nym_ingestor, @@ -225,12 +233,17 @@ impl Server { worker_pool, request_queue, nym_response_queue, - status, + status: status.clone(), online, }) } - /// Starts the server. + /// Starts the gRPC service. + /// + /// Launches all components then enters command loop: + /// - Checks request queue and workerpool to spawn / despawn workers as required. + /// - Updates the ServerStatus. + /// - Checks for shutdown signal, shutting down server if received. pub async fn serve(mut self) -> tokio::task::JoinHandle> { tokio::task::spawn(async move { // NOTE: This interval may need to be reduced or removed / moved once scale testing begins. @@ -239,18 +252,21 @@ impl Server { let mut nym_ingestor_handle = None; let mut tcp_ingestor_handle = None; let mut worker_handles; - if let Some(dispatcher) = self.nym_dispatcher { + if let Some(dispatcher) = self.nym_dispatcher.take() { nym_dispatcher_handle = Some(dispatcher.serve().await); } - if let Some(ingestor) = self.nym_ingestor { + if let Some(ingestor) = self.nym_ingestor.take() { nym_ingestor_handle = Some(ingestor.serve().await); } - if let Some(ingestor) = self.tcp_ingestor { + if let Some(ingestor) = self.tcp_ingestor.take() { tcp_ingestor_handle = Some(ingestor.serve().await); } worker_handles = self.worker_pool.clone().serve().await; + self.status.server_status.store(1); loop { - if self.request_queue.queue_length() >= (self.request_queue.max_length() / 2) { + if self.request_queue.queue_length() >= (self.request_queue.max_length() / 2) + && (self.worker_pool.workers() < self.worker_pool.max_size()) + { match self.worker_pool.push_worker().await { Ok(handle) => { worker_handles.push(handle); @@ -259,43 +275,35 @@ impl Server { eprintln!("WorkerPool at capacity"); } } - } else { - let excess_workers: usize = if (self.worker_pool.workers() - - self.worker_pool.check_long_standby()) - < self.worker_pool.idle_size() - { - self.worker_pool.workers() - self.worker_pool.idle_size() - } else { - self.worker_pool.check_long_standby() - }; - for i in ((self.worker_pool.workers() - excess_workers) - ..self.worker_pool.workers()) - .rev() - { - let handle = worker_handles.remove(i); - match self.worker_pool.pop_worker(handle).await { - Ok(_) => {} - Err(e) => { - eprintln!("Failed to pop worker from pool: {}", e); - // TODO: Handle this error. - } + } else if (self.request_queue.queue_length() <= 1) + && (self.worker_pool.workers() > self.worker_pool.idle_size()) + { + let worker_index = self.worker_pool.workers() - 1; + let worker_handle = worker_handles.remove(worker_index); + match self.worker_pool.pop_worker(worker_handle).await { + Ok(_) => {} + Err(e) => { + eprintln!("Failed to pop worker from pool: {}", e); + // TODO: Handle this error. } } } - // self.check_statuses(); - // if self.check_for_shutdown().await { - // let worker_handle_options: Vec< - // Option>>, - // > = worker_handles.into_iter().map(Some).collect(); - // self.shutdown_components( - // tcp_ingestor_handle, - // nym_ingestor_handle, - // nym_dispatcher_handle, - // worker_handle_options, - // ) - // .await; - // return Ok(()); - // } + self.statuses(); + // TODO: Implement check_statuses() and run here. + if self.check_for_shutdown().await { + let worker_handle_options: Vec< + Option>>, + > = worker_handles.into_iter().map(Some).collect(); + self.shutdown_components( + tcp_ingestor_handle, + nym_ingestor_handle, + nym_dispatcher_handle, + worker_handle_options, + ) + .await; + self.status.server_status.store(5); + return Ok(()); + } interval.tick().await; } }) @@ -303,13 +311,18 @@ impl Server { /// Checks indexers online status and servers internal status for closure signal. pub async fn check_for_shutdown(&self) -> bool { - if let ServerStatusType::Closing(_) = self.status { + if self.status() >= 4 { return true; } if !self.check_online() { return true; } - return false; + false + } + + /// Sets the servers to close gracefully. + pub async fn shutdown(&mut self) { + self.status.server_status.store(4) } /// Sets the server's components to close gracefully. @@ -320,36 +333,51 @@ impl Server { nym_dispatcher_handle: Option>>, mut worker_handles: Vec>>>, ) { - if let Some(ingestor) = self.tcp_ingestor.as_mut() { - ingestor.shutdown().await; - if let Some(handle) = tcp_ingestor_handle { - handle.await.ok(); - } + if let Some(handle) = tcp_ingestor_handle { + self.status.tcp_ingestor_status.store(4); + handle.await.ok(); } - if let Some(ingestor) = self.nym_ingestor.as_mut() { - ingestor.shutdown().await; - if let Some(handle) = nym_ingestor_handle { - handle.await.ok(); - } + if let Some(handle) = nym_ingestor_handle { + self.status.nym_ingestor_status.store(4); + handle.await.ok(); } self.worker_pool.shutdown(&mut worker_handles).await; - if let Some(dispatcher) = self.nym_dispatcher.as_mut() { - dispatcher.shutdown().await; - if let Some(handle) = nym_dispatcher_handle { - handle.await.ok(); - } + if let Some(handle) = nym_dispatcher_handle { + self.status.nym_dispatcher_status.store(4); + handle.await.ok(); } self.online .store(false, std::sync::atomic::Ordering::SeqCst); } - /// Returns the status of the server and its parts, to be consumed by system printout. - pub async fn status(&self) -> ServerStatus { - todo!() + /// Returns the servers current status usize. + pub fn status(&self) -> usize { + self.status.server_status.load() + } + + /// Returns the servers current statustype. + pub fn statustype(&self) -> StatusType { + StatusType::from(self.status()) + } + + /// Updates and returns the status of the server and its parts. + pub fn statuses(&mut self) -> ServerStatus { + self.status.server_status.load(); + self.status.tcp_ingestor_status.load(); + self.status.nym_ingestor_status.load(); + self.status.nym_dispatcher_status.load(); + self.status + .request_queue_status + .store(self.request_queue.queue_length(), Ordering::SeqCst); + self.status + .nym_response_queue_status + .store(self.nym_response_queue.queue_length(), Ordering::SeqCst); + self.worker_pool.status(); + self.status.clone() } - /// Checks status, handling errors. Returns ServerStatus. - pub async fn check_statuses(&self) -> ServerStatus { + /// Checks statuses, handling errors. + pub async fn check_statuses(&mut self) { todo!() } diff --git a/zingo-rpc/src/server/dispatcher.rs b/zingo-rpc/src/server/dispatcher.rs index 8ec6283..09c37ef 100644 --- a/zingo-rpc/src/server/dispatcher.rs +++ b/zingo-rpc/src/server/dispatcher.rs @@ -9,23 +9,13 @@ use std::sync::{ use crate::{ nym::{client::NymClient, error::NymError}, - server::error::{DispatcherError, QueueError}, - server::queue::{QueueReceiver, QueueSender}, + server::{ + error::{DispatcherError, QueueError}, + queue::{QueueReceiver, QueueSender}, + AtomicStatus, StatusType, + }, }; -/// Status of the worker. -#[derive(Debug, PartialEq, Clone)] -pub enum DispatcherStatus { - /// On hold, due to blockcache / node error. - Inactive, - /// Listening for new requests. - Listening, - /// Running shutdown routine. - Closing, - /// Offline. - Offline, -} - /// Sends gRPC responses over Nym Mixnet. pub struct NymDispatcher { /// Nym Client @@ -34,10 +24,10 @@ pub struct NymDispatcher { response_queue: QueueReceiver<(Vec, AnonymousSenderTag)>, /// Used to send requests to the queue. response_requeue: QueueSender<(Vec, AnonymousSenderTag)>, + /// Current status of the ingestor. + status: AtomicStatus, /// Represents the Online status of the gRPC server. online: Arc, - /// Current status of the ingestor. - status: DispatcherStatus, } impl NymDispatcher { @@ -46,15 +36,17 @@ impl NymDispatcher { nym_conf_path: &str, response_queue: QueueReceiver<(Vec, AnonymousSenderTag)>, response_requeue: QueueSender<(Vec, AnonymousSenderTag)>, + status: AtomicStatus, online: Arc, ) -> Result { + status.store(0); let client = NymClient::spawn(&format!("{}/dispatcher", nym_conf_path)).await?; Ok(NymDispatcher { dispatcher: client, response_queue, response_requeue, online, - status: DispatcherStatus::Inactive, + status, }) } @@ -63,12 +55,13 @@ impl NymDispatcher { tokio::task::spawn(async move { // NOTE: This interval may need to be changed or removed / moved once scale testing begins. let mut interval = tokio::time::interval(tokio::time::Duration::from_millis(50)); - // TODO Check self.status and wait on server / node if on hold. - self.status = DispatcherStatus::Listening; + // TODO Check blockcache sync status and wait on server / node if on hold. + self.status.store(1); loop { tokio::select! { _ = interval.tick() => { if self.check_for_shutdown().await { + self.status.store(5); return Ok(()); } } @@ -77,6 +70,7 @@ impl NymDispatcher { Ok(response) => { // NOTE: This may need to be removed / moved for scale use. if self.check_for_shutdown().await { + self.status.store(5); return Ok(()); } if let Err(nym_e) = self.dispatcher @@ -95,6 +89,7 @@ impl NymDispatcher { Err(_e) => { eprintln!("Failed to send response over nym: {}\nAnd failed to requeue response due to the queue being closed.\nFatal error! Nym dispatcher shutting down..", nym_e); // TODO: Handle queue closed error here. (return correct error type?) + self.status.store(6); return Ok(()); //Return Err! } } @@ -103,6 +98,7 @@ impl NymDispatcher { Err(_e) => { eprintln!("Response queue closed, nym dispatcher shutting down."); //TODO: Handle this error here (return correct error type?) + self.status.store(6); return Ok(()); // Return Err! } } @@ -114,23 +110,28 @@ impl NymDispatcher { /// Checks indexers online status and ingestors internal status for closure signal. pub async fn check_for_shutdown(&self) -> bool { - if let DispatcherStatus::Closing = self.status { + if self.status() >= 4 { return true; } if !self.check_online() { return true; } - return false; + false } /// Sets the dispatcher to close gracefully. pub async fn shutdown(&mut self) { - self.status = DispatcherStatus::Closing + self.status.store(4) + } + + /// Returns the dispatchers current status usize. + pub fn status(&self) -> usize { + self.status.load() } - /// Returns the dispatchers current status. - pub fn status(&self) -> DispatcherStatus { - self.status.clone() + /// Returns the dispatchers current statustype. + pub fn statustype(&self) -> StatusType { + StatusType::from(self.status()) } fn check_online(&self) -> bool { diff --git a/zingo-rpc/src/server/ingestor.rs b/zingo-rpc/src/server/ingestor.rs index 961d24e..22bd9ca 100644 --- a/zingo-rpc/src/server/ingestor.rs +++ b/zingo-rpc/src/server/ingestor.rs @@ -15,32 +15,20 @@ use crate::{ error::{IngestorError, QueueError}, queue::QueueSender, request::ZingoProxyRequest, + AtomicStatus, StatusType, }, }; -/// Status of the worker. -#[derive(Debug, PartialEq, Clone)] -pub enum IngestorStatus { - /// On hold, due to blockcache / node error. - Inactive, - /// Listening for new requests. - Listening, - /// Running shutdown routine. - Closing, - /// Offline. - Offline, -} - /// Listens for incoming gRPC requests over HTTP. pub struct TcpIngestor { /// Tcp Listener. ingestor: TcpListener, /// Used to send requests to the queue. queue: QueueSender, + /// Current status of the ingestor. + status: AtomicStatus, /// Represents the Online status of the gRPC server. online: Arc, - /// Current status of the ingestor. - status: IngestorStatus, } impl TcpIngestor { @@ -48,14 +36,16 @@ impl TcpIngestor { pub async fn spawn( listen_addr: SocketAddr, queue: QueueSender, + status: AtomicStatus, online: Arc, ) -> Result { + status.store(0); let listener = TcpListener::bind(listen_addr).await?; Ok(TcpIngestor { ingestor: listener, queue, online, - status: IngestorStatus::Inactive, + status, }) } @@ -65,17 +55,19 @@ impl TcpIngestor { // NOTE: This interval may need to be changed or removed / moved once scale testing begins. let mut interval = tokio::time::interval(tokio::time::Duration::from_millis(50)); // TODO Check blockcache sync status and wait on server / node if on hold. - self.status = IngestorStatus::Listening; + self.status.store(1); loop { tokio::select! { _ = interval.tick() => { if self.check_for_shutdown().await { + self.status.store(5); return Ok(()); } } incoming = self.ingestor.accept() => { // NOTE: This may need to be removed / moved for scale use. if self.check_for_shutdown().await { + self.status.store(5); return Ok(()); } match incoming { @@ -105,23 +97,28 @@ impl TcpIngestor { /// Checks indexers online status and ingestors internal status for closure signal. pub async fn check_for_shutdown(&self) -> bool { - if let IngestorStatus::Closing = self.status { + if self.status() >= 4 { return true; } if !self.check_online() { return true; } - return false; + false } /// Sets the ingestor to close gracefully. pub async fn shutdown(&mut self) { - self.status = IngestorStatus::Closing + self.status.store(4) + } + + /// Returns the ingestor current status usize. + pub fn status(&self) -> usize { + self.status.load() } - /// Returns the ingestor current status. - pub fn status(&self) -> IngestorStatus { - self.status.clone() + /// Returns the ingestor current statustype. + pub fn statustype(&self) -> StatusType { + StatusType::from(self.status()) } fn check_online(&self) -> bool { @@ -135,10 +132,10 @@ pub struct NymIngestor { ingestor: NymClient, /// Used to send requests to the queue. queue: QueueSender, + /// Current status of the ingestor. + status: AtomicStatus, /// Represents the Online status of the gRPC server. online: Arc, - /// Current status of the ingestor. - status: IngestorStatus, } impl NymIngestor { @@ -146,14 +143,17 @@ impl NymIngestor { pub async fn spawn( nym_conf_path: &str, queue: QueueSender, + status: AtomicStatus, online: Arc, ) -> Result { + status.store(0); + // TODO: HANDLE THESE ERRORS TO SMOOTH MIXNET CLIENT SPAWN PROCESS! let listener = NymClient::spawn(&format!("{}/ingestor", nym_conf_path)).await?; Ok(NymIngestor { ingestor: listener, queue, online, - status: IngestorStatus::Inactive, + status, }) } @@ -163,18 +163,19 @@ impl NymIngestor { // NOTE: This interval may need to be reduced or removed / moved once scale testing begins. let mut interval = tokio::time::interval(tokio::time::Duration::from_millis(50)); // TODO Check blockcache sync status and wait on server / node if on hold. - self.status = IngestorStatus::Listening; - + self.status.store(1); loop { tokio::select! { _ = interval.tick() => { if self.check_for_shutdown().await { + self.status.store(5); return Ok(()) } } incoming = self.ingestor.client.wait_for_messages() => { // NOTE: This may need to be removed /moved for scale use. if self.check_for_shutdown().await { + self.status.store(5); return Ok(()) } match incoming { @@ -217,23 +218,28 @@ impl NymIngestor { /// Checks indexers online status and ingestors internal status for closure signal. pub async fn check_for_shutdown(&self) -> bool { - if let IngestorStatus::Closing = self.status { + if self.status() >= 4 { return true; } if !self.check_online() { return true; } - return false; + false } /// Sets the ingestor to close gracefully. pub async fn shutdown(&mut self) { - self.status = IngestorStatus::Closing + self.status.store(4) + } + + /// Returns the ingestor current status usize. + pub fn status(&self) -> usize { + self.status.load() } - /// Returns the ingestor current status. - pub fn status(&self) -> IngestorStatus { - self.status.clone() + /// Returns the ingestor current statustype. + pub fn statustype(&self) -> StatusType { + StatusType::from(self.status()) } fn check_online(&self) -> bool { diff --git a/zingo-rpc/src/server/queue.rs b/zingo-rpc/src/server/queue.rs index 641500d..38b2c3f 100644 --- a/zingo-rpc/src/server/queue.rs +++ b/zingo-rpc/src/server/queue.rs @@ -14,7 +14,7 @@ pub struct Queue { /// Max number of messages allowed in the queue. max_length: usize, /// Used to track current messages in the queue. - message_count: Arc, + queue_status: Arc, /// Queue sender. queue_tx: QueueSender, /// Queue receiver. @@ -23,20 +23,19 @@ pub struct Queue { impl Queue { /// Creates a new queue with a maximum size. - pub fn new(max_length: usize) -> Self { + pub fn new(max_length: usize, queue_status: Arc) -> Self { let (queue_tx, queue_rx) = bounded(max_length); - let message_count = Arc::new(AtomicUsize::new(0)); - + queue_status.store(0, Ordering::SeqCst); Queue { max_length, - message_count: message_count.clone(), + queue_status: queue_status.clone(), queue_tx: QueueSender { inner: queue_tx, - message_count: message_count.clone(), + queue_status: queue_status.clone(), }, queue_rx: QueueReceiver { inner: queue_rx, - message_count, + queue_status, }, } } @@ -58,7 +57,7 @@ impl Queue { /// Returns the current length of the queue. pub fn queue_length(&self) -> usize { - self.message_count.load(Ordering::SeqCst) + self.queue_status.load(Ordering::SeqCst) } } @@ -68,14 +67,14 @@ pub struct QueueSender { /// Crossbeam_Channel Sender. inner: Sender, /// Used to track current messages in the queue. - message_count: Arc, + queue_status: Arc, } impl Clone for QueueSender { fn clone(&self) -> Self { Self { inner: self.inner.clone(), - message_count: Arc::clone(&self.message_count), + queue_status: Arc::clone(&self.queue_status), } } } @@ -85,7 +84,7 @@ impl QueueSender { pub fn try_send(&self, message: T) -> Result<(), QueueError> { match self.inner.try_send(message) { Ok(_) => { - self.message_count.fetch_add(1, Ordering::SeqCst); + self.queue_status.fetch_add(1, Ordering::SeqCst); Ok(()) } Err(crossbeam_channel::TrySendError::Full(t)) => Err(QueueError::QueueFull(t)), @@ -95,7 +94,7 @@ impl QueueSender { /// Returns the current length of the queue. pub fn queue_length(&self) -> usize { - self.message_count.load(Ordering::SeqCst) + self.queue_status.load(Ordering::SeqCst) } } @@ -105,14 +104,14 @@ pub struct QueueReceiver { /// Crossbeam_Channel Receiver. inner: Receiver, /// Used to track current messages in the queue. - message_count: Arc, + queue_status: Arc, } impl Clone for QueueReceiver { fn clone(&self) -> Self { Self { inner: self.inner.clone(), - message_count: Arc::clone(&self.message_count), + queue_status: Arc::clone(&self.queue_status), } } } @@ -122,7 +121,7 @@ impl QueueReceiver { pub fn try_recv(&self) -> Result> { match self.inner.try_recv() { Ok(message) => { - self.message_count.fetch_sub(1, Ordering::SeqCst); + self.queue_status.fetch_sub(1, Ordering::SeqCst); Ok(message) } Err(crossbeam_channel::TryRecvError::Empty) => Err(QueueError::QueueEmpty), @@ -152,6 +151,6 @@ impl QueueReceiver { /// Returns the current length of the queue. pub fn queue_length(&self) -> usize { - self.message_count.load(Ordering::SeqCst) + self.queue_status.load(Ordering::SeqCst) } } diff --git a/zingo-rpc/src/server/worker.rs b/zingo-rpc/src/server/worker.rs index ab8037e..4f35c26 100644 --- a/zingo-rpc/src/server/worker.rs +++ b/zingo-rpc/src/server/worker.rs @@ -1,13 +1,12 @@ //! Holds the server worker implementation. use std::sync::{ - atomic::{AtomicBool, Ordering}, + atomic::{AtomicBool, AtomicUsize, Ordering}, Arc, }; use http::Uri; use nym_sphinx_anonymous_replies::requests::AnonymousSenderTag; -use tokio::time::{Duration, Instant}; use tonic::transport::Server; use crate::{ @@ -16,6 +15,7 @@ use crate::{ error::{QueueError, WorkerError}, queue::{QueueReceiver, QueueSender}, request::ZingoProxyRequest, + AtomicStatus, }, }; @@ -25,64 +25,6 @@ use crate::proto::service::compact_tx_streamer_server::CompactTxStreamerServer; #[cfg(feature = "nym_poc")] use zcash_client_backend::proto::service::compact_tx_streamer_server::CompactTxStreamerServer; -/// Status of the worker. -#[derive(Debug, PartialEq, Clone, Copy)] -pub enum WorkerStatusType { - /// Running initial startup routine. - Spawning, - /// Processing requests from the queue. - Working, - /// Waiting for requests from the queue. - Standby, - /// Running shutdown routine. - Closing, -} - -/// Wrapper for StatusType that also holds initiation time, used for standby monitoring. -#[derive(Debug, Clone)] -pub enum WorkerStatus { - /// Running initial startup routine. - Spawning(Instant), - /// Processing requests from the queue. - Working(Instant), - /// Waiting for requests from the queue. - Standby(Instant), - /// Running shutdown routine. - Closing(Instant), -} - -impl WorkerStatus { - /// Create a new status with the current timestamp. - pub fn new(status: WorkerStatusType) -> WorkerStatus { - match status { - WorkerStatusType::Spawning => WorkerStatus::Spawning(Instant::now()), - WorkerStatusType::Working => WorkerStatus::Working(Instant::now()), - WorkerStatusType::Standby => WorkerStatus::Standby(Instant::now()), - WorkerStatusType::Closing => WorkerStatus::Closing(Instant::now()), - } - } - - /// Return the current status type and the duration the worker has been in this status. - pub fn status(&self) -> (WorkerStatusType, Duration) { - match self { - WorkerStatus::Spawning(timestamp) => (WorkerStatusType::Spawning, timestamp.elapsed()), - WorkerStatus::Working(timestamp) => (WorkerStatusType::Working, timestamp.elapsed()), - WorkerStatus::Standby(timestamp) => (WorkerStatusType::Standby, timestamp.elapsed()), - WorkerStatus::Closing(timestamp) => (WorkerStatusType::Closing, timestamp.elapsed()), - } - } - - /// Update the status to a new one, resetting the timestamp. - pub fn set(&mut self, new_status: WorkerStatusType) { - *self = match new_status { - WorkerStatusType::Spawning => WorkerStatus::Spawning(Instant::now()), - WorkerStatusType::Working => WorkerStatus::Working(Instant::now()), - WorkerStatusType::Standby => WorkerStatus::Standby(Instant::now()), - WorkerStatusType::Closing => WorkerStatus::Closing(Instant::now()), - } - } -} - /// A queue working is the entity that takes requests from the queue and processes them. /// /// TODO: - Add JsonRpcConnector to worker and pass to underlying RPC services. @@ -99,8 +41,10 @@ pub struct Worker { nym_response_queue: QueueSender<(Vec, AnonymousSenderTag)>, /// gRPC client used for processing requests received over http. grpc_client: GrpcClient, - /// Workers current status. - status: WorkerStatus, + // /// Workers current status, includes timestamp for despawning inactive workers.. + // worker_status: WorkerStatus, + /// Thread safe worker status. + atomic_status: AtomicStatus, /// Represents the Online status of the Worker. pub online: Arc, } @@ -114,6 +58,7 @@ impl Worker { nym_response_queue: QueueSender<(Vec, AnonymousSenderTag)>, lightwalletd_uri: Uri, zebrad_uri: Uri, + atomic_status: AtomicStatus, online: Arc, ) -> Self { let grpc_client = GrpcClient { @@ -127,7 +72,7 @@ impl Worker { requeue, nym_response_queue, grpc_client, - status: WorkerStatus::new(WorkerStatusType::Spawning), + atomic_status, online, } } @@ -141,7 +86,7 @@ impl Worker { let mut interval = tokio::time::interval(tokio::time::Duration::from_millis(50)); let svc = CompactTxStreamerServer::new(self.grpc_client.clone()); // TODO: create tonic server here for use within loop. - self.status.set(WorkerStatusType::Standby); + self.atomic_status.store(1); loop { tokio::select! { _ = interval.tick() => { @@ -159,19 +104,20 @@ impl Worker { return Ok(()); } Err(QueueError::QueueFull(_request)) => { + self.atomic_status.store(5); eprintln!("Request Queue Full. Failed to send response to queue.\nWorker shutting down."); // TODO: Handle this error! (cancel shutdown?). return Ok(()); } Err(e) => { - self.status.set(WorkerStatusType::Closing); + self.atomic_status.store(5); eprintln!("Request Queue Closed. Failed to send response to queue: {}\nWorker shutting down.", e); // TODO: Handle queue closed error here. (return correct error?) return Ok(()); } } } else { - self.status.set(WorkerStatusType::Working); + self.atomic_status.store(2); match request { ZingoProxyRequest::TcpServerRequest(request) => { Server::builder().add_service(svc.clone()) @@ -195,7 +141,7 @@ impl Worker { // TODO: Handle this error! (open second nym responder?). } Err(e) => { - self.status.set(WorkerStatusType::Closing); + self.atomic_status.store(5); eprintln!("Response Queue Closed. Failed to send response to queue: {}\nWorker shutting down.", e); // TODO: Handle queue closed error here. (return correct error?) return Ok(()); @@ -211,11 +157,11 @@ impl Worker { } } } - self.status.set(WorkerStatusType::Standby); + self.atomic_status.store(1); } } Err(_e) => { - self.status.set(WorkerStatusType::Closing); + self.atomic_status.store(5); eprintln!("Queue closed, worker shutting down."); // TODO: Handle queue closed error here. (return correct error?) return Ok(()); @@ -227,20 +173,23 @@ impl Worker { }) } - /// Checks indexers online status and workers internal status for closure signal. + /// Checks for closure signals. + /// + /// Checks AtomicStatus for closure signal. + /// Checks (online) AtomicBool for fatal error signal. pub async fn check_for_shutdown(&self) -> bool { - if let WorkerStatus::Closing(_) = self.status { + if self.atomic_status() >= 4 { return true; } if !self.check_online() { return true; } - return false; + false } /// Sets the worker to close gracefully. pub async fn shutdown(&mut self) { - self.status.set(WorkerStatusType::Closing) + self.atomic_status.store(4) } /// Returns the worker's ID. @@ -248,9 +197,9 @@ impl Worker { self.worker_id } - /// Returns the workers current status. - pub fn status(&self) -> (WorkerStatusType, Duration) { - self.status.status() + /// Loads the workers current atomic status. + pub fn atomic_status(&self) -> usize { + self.atomic_status.load() } /// Check the online status on the server. @@ -259,6 +208,13 @@ impl Worker { } } +/// Holds the status of the worker pool and its workers. +#[derive(Debug, Clone)] +pub struct WorkerPoolStatus { + workers: Arc, + statuses: Vec, +} + /// Dynamically sized pool of workers. #[derive(Debug, Clone)] pub struct WorkerPool { @@ -268,6 +224,8 @@ pub struct WorkerPool { idle_size: usize, /// Workers currently in the pool workers: Vec, + /// Status of the workerpool and its workers. + status: WorkerPoolStatus, /// Represents the Online status of the WorkerPool. pub online: Arc, } @@ -282,6 +240,7 @@ impl WorkerPool { nym_response_queue: QueueSender<(Vec, AnonymousSenderTag)>, lightwalletd_uri: Uri, zebrad_uri: Uri, + status: WorkerPoolStatus, online: Arc, ) -> Self { let mut workers: Vec = Vec::with_capacity(max_size); @@ -294,16 +253,18 @@ impl WorkerPool { nym_response_queue.clone(), lightwalletd_uri.clone(), zebrad_uri.clone(), + status.statuses[workers.len()].clone(), online.clone(), ) .await, ); } - + status.workers.store(idle_size, Ordering::SeqCst); WorkerPool { max_size, idle_size, workers, + status, online, } } @@ -332,10 +293,12 @@ impl WorkerPool { self.workers[0].nym_response_queue.clone(), self.workers[0].grpc_client.lightwalletd_uri.clone(), self.workers[0].grpc_client.zebrad_uri.clone(), + self.status.statuses[self.workers.len()].clone(), self.online.clone(), ) .await, ); + self.status.workers.fetch_add(1, Ordering::SeqCst); Ok(self.workers[self.workers.len()].clone().serve().await) } } @@ -353,18 +316,24 @@ impl WorkerPool { match worker_handle.await { Ok(worker) => match worker { Ok(()) => { + self.status.statuses[worker_index].store(5); self.workers.pop(); + self.status.workers.fetch_sub(1, Ordering::SeqCst); return Ok(()); } Err(e) => { + self.status.statuses[worker_index].store(6); eprintln!("Worker returned error on shutdown: {}", e); - // TODO: Handle the inner WorkerError + // TODO: Handle the inner WorkerError. Return error. + self.status.workers.fetch_sub(1, Ordering::SeqCst); return Ok(()); } }, Err(e) => { + self.status.statuses[worker_index].store(6); eprintln!("Worker returned error on shutdown: {}", e); - // TODO: Handle the JoinError + // TODO: Handle the JoinError. Return error. + self.status.workers.fetch_sub(1, Ordering::SeqCst); return Ok(()); } }; @@ -386,24 +355,13 @@ impl WorkerPool { self.workers.len() } - /// Returns the statuses of all the workers in the workerpool. - pub fn statuses(&self) -> Vec<(WorkerStatusType, Duration)> { - let mut worker_statuses = Vec::new(); - for i in 0..self.workers.len() { - worker_statuses.push(self.workers[i].status()) + /// Fetches and returns the status of the workerpool and its workers. + pub fn status(&self) -> WorkerPoolStatus { + self.status.workers.load(Ordering::SeqCst); + for i in 0..self.workers() { + self.status.statuses[i].load(); } - worker_statuses - } - - /// Returns the number of workers in Standby mode for 30 seconds or longer. - pub fn check_long_standby(&self) -> usize { - let statuses = self.statuses(); - statuses - .iter() - .filter(|(status, duration)| { - *status == WorkerStatusType::Standby && *duration >= Duration::from_secs(30) - }) - .count() + self.status.clone() } /// Shuts down all the workers in the pool. @@ -417,16 +375,22 @@ impl WorkerPool { match worker_handle.await { Ok(worker) => match worker { Ok(()) => { + self.status.statuses[i].store(5); self.workers.pop(); + self.status.workers.fetch_sub(1, Ordering::SeqCst); } Err(e) => { + self.status.statuses[i].store(6); eprintln!("Worker returned error on shutdown: {}", e); // TODO: Handle the inner WorkerError + self.status.workers.fetch_sub(1, Ordering::SeqCst); } }, Err(e) => { + self.status.statuses[i].store(6); eprintln!("Worker returned error on shutdown: {}", e); // TODO: Handle the JoinError + self.status.workers.fetch_sub(1, Ordering::SeqCst); } }; } From 4f7b4d1e29592714e29021b81d570f5decd6bd49 Mon Sep 17 00:00:00 2001 From: idky137 Date: Thu, 8 Aug 2024 22:40:33 +0100 Subject: [PATCH 11/18] added Indexer --- Cargo.lock | 1 + zingo-proxyd/Cargo.toml | 1 + zingo-proxyd/src/config.rs | 54 +++++++++ zingo-proxyd/src/error.rs | 20 ++++ zingo-proxyd/src/indexer.rs | 176 +++++++++++++++++++++++++++++ zingo-proxyd/src/lib.rs | 3 + zingo-proxyd/src/proxy.rs | 82 +++++--------- zingo-rpc/src/jsonrpc/connector.rs | 18 +-- zingo-rpc/src/server.rs | 110 +++++++++++------- zingo-rpc/src/server/worker.rs | 39 +++++-- 10 files changed, 387 insertions(+), 117 deletions(-) create mode 100644 zingo-proxyd/src/config.rs create mode 100644 zingo-proxyd/src/error.rs create mode 100644 zingo-proxyd/src/indexer.rs diff --git a/Cargo.lock b/Cargo.lock index 5044fe0..39a1f3f 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -7199,6 +7199,7 @@ dependencies = [ "nym-bin-common", "nym-sdk", "nym-sphinx-anonymous-replies", + "thiserror", "tokio", "tonic", "tower", diff --git a/zingo-proxyd/Cargo.toml b/zingo-proxyd/Cargo.toml index 42636e5..af53215 100644 --- a/zingo-proxyd/Cargo.toml +++ b/zingo-proxyd/Cargo.toml @@ -39,6 +39,7 @@ nym-sphinx-anonymous-replies = { workspace = true } tokio = { workspace = true, features = ["full"] } tonic = { workspace = true } http = { workspace = true } +thiserror = { workspace = true } # Miscellaneous Crate tower = { version = "0.4.13" } diff --git a/zingo-proxyd/src/config.rs b/zingo-proxyd/src/config.rs new file mode 100644 index 0000000..29a555e --- /dev/null +++ b/zingo-proxyd/src/config.rs @@ -0,0 +1,54 @@ +//! Zingo-Indexer config. + +use crate::error::IndexerError; + +/// Config information required for Zingo-Indexer. +#[derive(Debug, Clone)] +pub struct IndexerConfig { + pub tcp_active: bool, + pub listen_port: Option, + pub nym_active: bool, + pub nym_conf_path: Option, + pub lightwalletd_port: u16, + pub zebrad_port: u16, + pub max_queue_size: u16, + pub max_worker_pool_size: u16, + pub idle_worker_pool_size: u16, +} + +impl IndexerConfig { + pub fn check_config(&self) -> Result<(), IndexerError> { + if !(self.tcp_active && self.nym_active) { + return Err(IndexerError::ConfigError( + "Cannot start server with no ingestors selected, at least one of either nym or tcp must be set to active in conf.".to_string(), + )); + } + if self.tcp_active && self.listen_port.is_none() { + return Err(IndexerError::ConfigError( + "TCP is active but no address provided.".to_string(), + )); + } + if self.nym_active && self.nym_conf_path.is_none() { + return Err(IndexerError::ConfigError( + "NYM is active but no conf path provided.".to_string(), + )); + } + Ok(()) + } +} + +impl Default for IndexerConfig { + fn default() -> Self { + Self { + tcp_active: true, + listen_port: Some(8080), + nym_active: true, + nym_conf_path: Some("/tmp/nym_server".to_string()), + lightwalletd_port: 9067, + zebrad_port: 18232, + max_queue_size: 100, + max_worker_pool_size: 16, + idle_worker_pool_size: 2, + } + } +} diff --git a/zingo-proxyd/src/error.rs b/zingo-proxyd/src/error.rs new file mode 100644 index 0000000..b7a9ee6 --- /dev/null +++ b/zingo-proxyd/src/error.rs @@ -0,0 +1,20 @@ +//! Hold error types for the Indexer and related functionality. + +use zingo_rpc::{jsonrpc::error::JsonRpcConnectorError, server::error::ServerError}; + +/// Zingo-Proxy server errors. +#[derive(Debug, thiserror::Error)] +pub enum IndexerError { + /// Server based errors. + #[error("Server error: {0}")] + ServerError(#[from] ServerError), + /// Configuration errors. + #[error("Configuration error: {0}")] + ConfigError(String), + /// JSON RPC connector errors. + #[error("JSON RPC connector error: {0}")] + JsonRpcConnectorError(#[from] JsonRpcConnectorError), + /// HTTP related errors due to invalid URI. + #[error("HTTP error: Invalid URI {0}")] + HttpError(#[from] http::Error), +} diff --git a/zingo-proxyd/src/indexer.rs b/zingo-proxyd/src/indexer.rs new file mode 100644 index 0000000..a05c916 --- /dev/null +++ b/zingo-proxyd/src/indexer.rs @@ -0,0 +1,176 @@ +//! Zingo-Indexer implementation. + +use std::{ + net::{Ipv4Addr, SocketAddr}, + process, + sync::{ + atomic::{AtomicBool, Ordering}, + Arc, + }, +}; + +use http::Uri; +use zingo_rpc::{ + jsonrpc::connector::test_node_and_return_uri, + server::{AtomicStatus, Server, ServerStatus}, +}; + +use crate::{config::IndexerConfig, error::IndexerError}; + +/// Holds the status of the server and all its components. +#[derive(Debug, Clone)] +pub struct IndexerStatus { + indexer_status: AtomicStatus, + server_status: ServerStatus, + // block_cache_status: BlockCacheStatus, +} + +impl IndexerStatus { + /// Creates a new IndexerStatus. + pub fn new(max_workers: u16) -> Self { + IndexerStatus { + indexer_status: AtomicStatus::new(5), + server_status: ServerStatus::new(max_workers), + } + } + + /// Returns the IndexerStatus. + pub fn load(&self) -> IndexerStatus { + self.indexer_status.load(); + self.server_status.load(); + self.clone() + } +} + +/// Zingo-Indexer. +pub struct Indexer { + /// Indexer onfuguration data. + config: IndexerConfig, + /// GRPC server. + server: Server, + // Internal block cache. + // block_cache: BlockCache, + /// Indexers status. + status: IndexerStatus, + /// Online status of the indexer. + online: Arc, +} + +impl Indexer { + /// Creates a new Indexer. + /// + /// Currently only takes an IndexerConfig. + async fn new(config: IndexerConfig, online: Arc) -> Result { + config.check_config()?; + let status = IndexerStatus::new(config.max_worker_pool_size); + let tcp_ingestor_listen_addr: Option = config + .listen_port + .map(|port| SocketAddr::new(std::net::IpAddr::V4(std::net::Ipv4Addr::LOCALHOST), port)); + let lightwalletd_uri = Uri::builder() + .scheme("http") + .authority(format!("localhost:{}", config.lightwalletd_port)) + .path_and_query("/") + .build()?; + println!("Checking connection with node.."); + let zebrad_uri = test_node_and_return_uri( + &config.zebrad_port, + Some("xxxxxx".to_string()), + Some("xxxxxx".to_string()), + ) + .await?; + status.indexer_status.store(0); + let server = Server::spawn( + config.tcp_active, + tcp_ingestor_listen_addr, + config.nym_active, + config.nym_conf_path.clone(), + lightwalletd_uri, + zebrad_uri, + config.max_queue_size, + config.max_worker_pool_size, + config.idle_worker_pool_size, + status.server_status.clone(), + online.clone(), + ) + .await?; + Ok(Indexer { + config, + server, + status, + online, + }) + } + + /// Start an Indexer service. + /// + /// Currently only takes an IndexerConfig. + pub async fn start(config: IndexerConfig) -> Result<(), IndexerError> { + // NOTE: This interval may need to be reduced or removed / moved once scale testing begins. + let mut interval = tokio::time::interval(tokio::time::Duration::from_millis(50)); + + let online = Arc::new(AtomicBool::new(true)); + set_ctrlc(online.clone()); + nym_bin_common::logging::setup_logging(); + + startup_message(); + + println!("Launching Zingdexer!"); + let indexer: Indexer = Indexer::new(config, online.clone()).await?; + let server_handle = indexer.server.serve().await; + + indexer.status.indexer_status.store(2); + while online.load(Ordering::SeqCst) { + indexer.status.load(); + //printout statuses + //check for shutdown + interval.tick().await; + } + Ok(()) + } + + // /// Closes the Indexer Gracefully. + // pub async fn shutdown(&self) ( + + // ) +} + +fn set_ctrlc(online: Arc) { + ctrlc::set_handler(move || { + online.store(false, Ordering::SeqCst); + process::exit(0); + }) + .expect("Error setting Ctrl-C handler"); +} + +fn startup_message() { + let welcome_message = r#" + ░░░░░░░▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒░░░▒▒░░░░░ + ░░░░▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒████▓░▒▒▒░░ + ░░▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒████▓▒▒▒▒▒▒ + ▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒░▒▒▒▒▒▒▒▒ + ▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▓▓▓▓▒▒▒▒▒▒▒▒▒▒▒▒▓▓▒▒▒▒▒▒▒▒ + ▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▒▒▒▒▒██▓▒▒▒▒▒▒▒ + ▒▒▒▒▒▒▒▒▒▒▒▒▒▒▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▒▒██▓▒▒▒▒▒▒▒ + ▒▒▒▒▒▒▒▒▒▒▒▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓███▓██▓▒▒▒▒▒▒▒ + ▒▒▒▒▒▒▒▒▒▒▓▓▓▓▒███▓░▒▓▓████████████████▓▓▒▒▒▒▒▒▒▒▒ + ▒▒▒▒▒▒▒▒▒▓▓▓▓▒▓████▓▓███████████████████▓▒▓▓▒▒▒▒▒▒ + ▒▒▒▒▒▒▒▒▓▓▓▓▓▒▒▓▓▓▓████████████████████▓▒▓▓▓▒▒▒▒▒▒ + ▒▒▒▒▒▒▒▒▓▓▓▓▓█████████████████████████▓▒▓▓▓▓▓▒▒▒▒▒ + ▒▒▒▒▒▒▒▓▓▓▒▓█████████████████████████▓▓▓▓▓▓▓▓▒▒▒▒▒ + ▒▒▒▒▒▒▒▒▓▓▓████████████████████████▓▓▓▓▓▓▓▓▓▒▒▒▒▒▒ + ▒▒▒▒▒▒▒▒▓▒███████████████████████▒▓▓▓▓▓▓▓▓▓▓▒▒▒▒▒▒ + ▒▒▒▒▒▒▒▒▒▓███████████████████▓▓▓▓▓▓▓▓▓▓▓▓▓▓▒▒▒▒▒▒▒ + ▒▒▒▒▒▒▒▒▒▓███████████████▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▒▒▒▒▒▒▒▒ + ▒▒▒▒▒▒▒▒▒▓██████████▓▓▒▒▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▒▒▒▒▒▒▒▒▒▒ + ▒▒▒▒▒▒▒███▓▒▓▓▓▓▓▒▒▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▒▒▒▒▒▒▒▒▒▒▒▒▒ + ▒▒▒▒▒▒▓████▒▒▒▒▒▒▒▒▓▓▓▓▓▓▓▓▓▓▓▓▓▓▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒ + ▒▒▒▒▒▒▒░▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒ + Thank you for using ZingoLabs Zingdexer! + + - Donate to us at https://free2z.cash/zingolabs. + - Submit any security conserns to us at zingodisclosure@proton.me. + +****** Please note Zingdexer is currently in development and should not be used to run mainnet nodes. ****** + "#; + println!("{}", welcome_message); +} diff --git a/zingo-proxyd/src/lib.rs b/zingo-proxyd/src/lib.rs index 54ae008..2b6ef41 100644 --- a/zingo-proxyd/src/lib.rs +++ b/zingo-proxyd/src/lib.rs @@ -4,6 +4,9 @@ #![warn(missing_docs)] #![forbid(unsafe_code)] +pub mod config; +pub mod error; +pub mod indexer; pub mod nym_server; pub mod proxy; pub mod server; diff --git a/zingo-proxyd/src/proxy.rs b/zingo-proxyd/src/proxy.rs index 3b8442e..43c1eca 100644 --- a/zingo-proxyd/src/proxy.rs +++ b/zingo-proxyd/src/proxy.rs @@ -140,61 +140,33 @@ async fn wait_on_grpc_startup(proxy_port: &u16, online: Arc) { fn startup_message() { let welcome_message = r#" -@@@@@@@@@@@@@@@&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&@@@@@@@@@ -@@@@@@@@@@@@@&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&@@@@@@@ -@@@@@@@@@&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&@@%(**/#@@@&&&&&&&&&&&&@@@ -@@@@@@&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&@&. /@&&&&&&&&&&&&&@ -@@@&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%&&&&&&&&&&&&@@ (@&&&&&&&&&&&&& -&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%&&&&&@@. (@&&&&&&&&&&&&& -&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%&@@, . %@&&&&&&&&&&&&&& -&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%&@@%@@#&@@&&&&&&&&&&&&&&&& -&&&&&&&&&&&&&&&&&&&&&&&&&&%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%&@@&&&&&&&&&&&&&&&&&&&& -&&&&&&&&&&&&&&&&&&&&&&&%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%&&%%&&&&&&&&&&&&&&&&&& -&&&&&&&&&&&&&&&&&&&&%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%&# %&%%%%&&&&&&&&&&&&&& -&&&&&&&&&&&&&&&&&%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%&( /@%%%%&&&&&&&&&&&& -&&&&&&&&&&&&&&&&%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%@&# %@@&%%%%&&&&&&&&&& -&&&&&&&&&&&&&%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%#####################%%%%%%%%%%%%%%%%%%%%%& %&%%%%%%%%&&&&&&&& -&&&&&&&&&&&&%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%##################################%%%%%%%%%%%%%%&& %&%%%%%%%%%%&&&&&& -&&&&&&&&&&%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%###########################################%%%%%%%%@&#& %&%%%%%%%%%%%&&&&& -&&&&&&&&%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%############################################%%&@@&%#(((((@ %&%%%%%%%%%%%%%&&& -&&&&&&&%%%%%%%%%%%%%%%%%%%%%%%%%%%%#############################%&@@@@@&%%##(((((((((((((((((((@ %&%%%%%%%%%%%%%%&& -&&&&&&%%%%%%%%%%%%%%%%%%%%%%%%%%#%###%@@@@@&%########%&@&%#((((((((((((((((((((((((((((((((((((@ #&%%%%%%%%%%%%%%%& -&&&&&%%%%%%%%%%%%%%%%%%%%%%%%%%###%@# .&@@@@@&(((((((((((((((((((((((((((((((((((((((((((#%%%@&&&%%%%%%%%%%%%%%%%% -&&&&%%%%%%%%%%%%%%%%%%%%%%%%%%###@& ,*@%((((((((((((((((((((((((((((((((((((((((((((((((((@%%%%%%%%%%%%%%%%%%%% -&&&%%%%%%%%%%%%%%%%%%%%%%%%%####%@/ %@(((((((((((((((((((((((((((((((((((((((((((((((((%&%%%%%%%%%%%%%%%%%%%% -&&&%%%%%%%%%%%%%%%%%%%%%%%%%#####&@. *@#((((((((((((((((((((((((((((((((((((((((((((((((#@%%%%%%%%%%%%%%%%%%%%% -&&%%%%%%%%%%%%%%%%%%%%%%%%#######@@@&. *@&((((((((((((((((((((((((((((((((((((((((((((((((((&%%%%%%%%%%%%%%%%%%%%%% -%&%%%%%%%%%%%%%%%%%%%%%%%%#####%@@%(((#&&&%#((((((((((((((((((((((((((((((((((((((((((((((((((((&%##%%%%%%%%%%%%%%%%%%%% -&%%%%%%%%%%%%%%%%%%%%%%%%#####%@(((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((&%####%%%%%%%%%%%%%%%%%%% -&%%%%%%%%%%%%%%%%%%%%%%%%####&%(((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((&%#####%%%%%%%%%%%%%%%%%%% -%%%%%%%%%%%%%%%%%%%%%%%%###%@((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((@%######%%%%%%%%%%%%%%%%%%% -%%%%%%%%%%%%%%%%%%%%%%%%##%@(((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((&&########%%%%%%%%%%%%%%%%%%% -&%%%%%%%%%%%%%%%%%%%%%%%#%@((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((#&##########%%%%%%%%%%%%%%%%%%% -&%%%%%%%%%%%%%%%%%%%%%%%%@(((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((#@############%%%%%%%%%%%%%%%%%%% -%%%%%%%%%%%%%%%%%%%%%%%%@#(((((((((((((((((((((((((((((((((((((((((((((((((((((((((((%@#############%%%%%%%%%%%%%%%%%%%% -&&%%%%%%%%%%%%%%%%%%%%%&&((((((((((((((((((((((((((((((((((((((((((((((((((((((((((&&################%%%%%%%%%%%%%%%%%%% -&&%%%%%%%%%%%%%%%%%%%%%@((((((((((((((((((((((((((((((((((((((((((((((((((((((((%@#################%%%%%%%%%%%%%%%%%%%%% -&&&%%%%%%%%%%%%%%%%%%%@%(((((((((((((((((((((((((((((((((((((((((((((((((((((%@%##################%%%%%%%%%%%%%%%%%%%%%% -&&&%%%%%%%%%%%%%%%%%%%@(((((((((((((((((((((((((((((((((((((((((((((((((((&&#####################%%%%%%%%%%%%%%%%%%%%%%% -&&&&&%%%%%%%%%%%%%%%%&%(((((((((((((((((((((((((((((((((((((((((((((((%@%######################%%%%%%%%%%%%%%%%%%%%%%%%% -&&&&&&%%%%%%%%%%%%%%%@#(((((((((((((((((((((((((((((((((((((((((((%@&#########################%%%%%%%%%%%%%%%%%%%%%%%%%% -&&&&&&%%%%%%%%%%%%%%%@(((((((((((((((((((((((((((((((((((((((#&@%###########################%%%%%%%%%%%%%%%%%%%%%%%%%%%& -&&&&&&&&%%%%%%%%%%%%&@((((((((((((((((((((((((((((((((((#&@&#############################%%%%%%%%%%%%%%%%%%%%%%%%%%%%%&& -&&&&&&&&&%%%%%%%%%%%&%((((((((((((((((((((((((((((#&@&%###############################%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%&&& -&&&&&&&&&&%%%%%%%&@@@&@@@%((((((((((((((((((#&@&%%##################################%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%&&&&& -&&&&&&&&&&&&%%&@( #@#((((((((#&@&%%%%%%###############################%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%&&&&&&& -&&&&&&&&&&&&&&@. *@%&@@&%%%%%%%%%%%%%%%%%%%%%%%#%#%####%%##%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%&&&&&&&&& -&&&&&&&&&&&&&&@ .@&%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%&&&&&&&&&&& -&&&&&&&&&&&&&&@& .@@%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%&&&&&&&&&&&&& -&&&&&&&&&&&&&&&&@@#, ,#@@%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%&&&&&&&&&&&&&&&& -&&&&&&&&&&&&&&&&&&&&&&&&%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%&&&&&&&&&&&&&&&&&& -Thank you for using ZingoLabs ZingoProxyD! -- Donate to us at https://free2z.cash/zingolabs. -- Submit any security conserns to us at zingodisclosure@proton.me. - -****** Please note ZingoProxyD is currently in development and should not be used to run mainnet nodes. ****** - -****** Currently LightwalletD is required for full functionality. ****** + ░░░░░░░▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒░░░▒▒░░░░░ + ░░░░▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒████▓░▒▒▒░░ + ░░▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒████▓▒▒▒▒▒▒ + ▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒░▒▒▒▒▒▒▒▒ + ▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▓▓▓▓▒▒▒▒▒▒▒▒▒▒▒▒▓▓▒▒▒▒▒▒▒▒ + ▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▒▒▒▒▒██▓▒▒▒▒▒▒▒ + ▒▒▒▒▒▒▒▒▒▒▒▒▒▒▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▒▒██▓▒▒▒▒▒▒▒ + ▒▒▒▒▒▒▒▒▒▒▒▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓███▓██▓▒▒▒▒▒▒▒ + ▒▒▒▒▒▒▒▒▒▒▓▓▓▓▒███▓░▒▓▓████████████████▓▓▒▒▒▒▒▒▒▒▒ + ▒▒▒▒▒▒▒▒▒▓▓▓▓▒▓████▓▓███████████████████▓▒▓▓▒▒▒▒▒▒ + ▒▒▒▒▒▒▒▒▓▓▓▓▓▒▒▓▓▓▓████████████████████▓▒▓▓▓▒▒▒▒▒▒ + ▒▒▒▒▒▒▒▒▓▓▓▓▓█████████████████████████▓▒▓▓▓▓▓▒▒▒▒▒ + ▒▒▒▒▒▒▒▓▓▓▒▓█████████████████████████▓▓▓▓▓▓▓▓▒▒▒▒▒ + ▒▒▒▒▒▒▒▒▓▓▓████████████████████████▓▓▓▓▓▓▓▓▓▒▒▒▒▒▒ + ▒▒▒▒▒▒▒▒▓▒███████████████████████▒▓▓▓▓▓▓▓▓▓▓▒▒▒▒▒▒ + ▒▒▒▒▒▒▒▒▒▓███████████████████▓▓▓▓▓▓▓▓▓▓▓▓▓▓▒▒▒▒▒▒▒ + ▒▒▒▒▒▒▒▒▒▓███████████████▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▒▒▒▒▒▒▒▒ + ▒▒▒▒▒▒▒▒▒▓██████████▓▓▒▒▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▒▒▒▒▒▒▒▒▒▒ + ▒▒▒▒▒▒▒███▓▒▓▓▓▓▓▒▒▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▒▒▒▒▒▒▒▒▒▒▒▒▒ + ▒▒▒▒▒▒▓████▒▒▒▒▒▒▒▒▓▓▓▓▓▓▓▓▓▓▓▓▓▓▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒ + ▒▒▒▒▒▒▒░▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒ + Thank you for using ZingoLabs Zingdexer! + + - Donate to us at https://free2z.cash/zingolabs. + - Submit any security conserns to us at zingodisclosure@proton.me. + +****** Please note Zingdexer is currently in development and should not be used to run mainnet nodes. ****** "#; println!("{}", welcome_message); } diff --git a/zingo-rpc/src/jsonrpc/connector.rs b/zingo-rpc/src/jsonrpc/connector.rs index 8690104..95475d9 100644 --- a/zingo-rpc/src/jsonrpc/connector.rs +++ b/zingo-rpc/src/jsonrpc/connector.rs @@ -413,27 +413,21 @@ pub async fn test_node_and_return_uri( .map_err(JsonRpcConnectorError::InvalidUriError)?; let mut interval = tokio::time::interval(tokio::time::Duration::from_millis(500)); for _ in 0..3 { - println!("@zingoproxyd: Trying connection on IPv4."); + println!("Trying connection on IPv4.."); match test_node_connection(ipv4_uri.clone(), user.clone(), password.clone()).await { Ok(_) => { - println!( - "@zingoproxyd: Connected to node using IPv4 at address {}.", - ipv4_uri - ); + println!("Connected to node using IPv4 at address {}.", ipv4_uri); return Ok(ipv4_uri); } Err(e_ipv4) => { - eprintln!("@zingoproxyd: Failed to connect to node using IPv4 with error: {}\n@zingoproxyd: Trying connection on IPv6.", e_ipv4); + eprintln!("Failed to connect to node using IPv4 with error: {}\nTrying connection on IPv6..", e_ipv4); match test_node_connection(ipv6_uri.clone(), user.clone(), password.clone()).await { Ok(_) => { - println!( - "@zingoproxyd: Connected to node using IPv6 at address {}.", - ipv6_uri - ); + println!("Connected to node using IPv6 at address {}.", ipv6_uri); return Ok(ipv6_uri); } Err(e_ipv6) => { - eprintln!("@zingoproxyd: Failed to connect to node using IPv6 with error: {}.\n@zingoproxyd: Connection not established. Retrying..", e_ipv6); + eprintln!("Failed to connect to node using IPv6 with error: {}.\nConnection not established. Retrying..", e_ipv6); tokio::time::sleep(std::time::Duration::from_secs(3)).await; } } @@ -441,6 +435,6 @@ pub async fn test_node_and_return_uri( } interval.tick().await; } - eprintln!("@zingoproxyd: Could not establish connection with node. \n@zingoproxyd: Please check config and confirm node is listening at the correct address and the correct authorisation details have been entered. \n@zingoproxyd: Exiting.."); + eprintln!("Could not establish connection with node. \nPlease check config and confirm node is listening at the correct address and the correct authorisation details have been entered. \nExiting.."); std::process::exit(1); } diff --git a/zingo-rpc/src/server.rs b/zingo-rpc/src/server.rs index c9591ac..02da42d 100644 --- a/zingo-rpc/src/server.rs +++ b/zingo-rpc/src/server.rs @@ -41,8 +41,8 @@ pub struct AtomicStatus(Arc); impl AtomicStatus { /// Creates a new AtomicStatus - pub fn new(status: usize) -> Self { - Self(Arc::new(AtomicUsize::new(status))) + pub fn new(status: u16) -> Self { + Self(Arc::new(AtomicUsize::new(status as usize))) } /// Loads the value held in the AtomicStatus @@ -113,6 +113,33 @@ pub struct ServerStatus { nym_response_queue_status: Arc, } +impl ServerStatus { + /// Creates a ServerStatus. + pub fn new(max_workers: u16) -> Self { + ServerStatus { + server_status: AtomicStatus::new(5), + tcp_ingestor_status: AtomicStatus::new(5), + nym_ingestor_status: AtomicStatus::new(5), + nym_dispatcher_status: AtomicStatus::new(5), + workerpool_status: WorkerPoolStatus::new(max_workers), + request_queue_status: Arc::new(AtomicUsize::new(0)), + nym_response_queue_status: Arc::new(AtomicUsize::new(0)), + } + } + + /// Returns the ServerStatus. + pub fn load(&self) -> ServerStatus { + self.server_status.load(); + self.tcp_ingestor_status.load(); + self.nym_ingestor_status.load(); + self.nym_dispatcher_status.load(); + self.workerpool_status.load(); + self.request_queue_status.load(Ordering::SeqCst); + self.nym_response_queue_status.load(Ordering::SeqCst); + self.clone() + } +} + /// LightWallet server capable of servicing clients over both http and nym. pub struct Server { /// Listens for incoming gRPC requests over HTTP. @@ -139,12 +166,12 @@ impl Server { tcp_active: bool, tcp_ingestor_listen_addr: Option, nym_active: bool, - nym_conf_path: Option<&str>, + nym_conf_path: Option, lightwalletd_uri: Uri, zebrad_uri: Uri, - max_queue_size: usize, - max_worker_pool_size: usize, - idle_worker_pool_size: usize, + max_queue_size: u16, + max_worker_pool_size: u16, + idle_worker_pool_size: u16, status: ServerStatus, online: Arc, ) -> Result { @@ -163,19 +190,22 @@ impl Server { "NYM is active but no conf path provided.".to_string(), )); } + println!("Launching Server!\n"); status.server_status.store(0); let request_queue: Queue = - Queue::new(max_queue_size, status.request_queue_status.clone()); + Queue::new(max_queue_size as usize, status.request_queue_status.clone()); status.request_queue_status.store(0, Ordering::SeqCst); - let nym_response_queue: Queue<(Vec, AnonymousSenderTag)> = - Queue::new(max_queue_size, status.nym_response_queue_status.clone()); + let nym_response_queue: Queue<(Vec, AnonymousSenderTag)> = Queue::new( + max_queue_size as usize, + status.nym_response_queue_status.clone(), + ); status.nym_response_queue_status.store(0, Ordering::SeqCst); let tcp_ingestor = if tcp_active { + println!("Launching TcpIngestor.."); Some( TcpIngestor::spawn( - tcp_ingestor_listen_addr.expect( - "tcp_ingestor_listen_addr returned none when used, after checks made.", - ), + tcp_ingestor_listen_addr + .expect("tcp_ingestor_listen_addr returned none when used."), request_queue.tx().clone(), status.tcp_ingestor_status.clone(), online.clone(), @@ -185,35 +215,35 @@ impl Server { } else { None }; - let nym_ingestor = if nym_active { - Some( - NymIngestor::spawn( - nym_conf_path - .expect("nym_conf_path returned none when used, after checks made."), - request_queue.tx().clone(), - status.nym_ingestor_status.clone(), - online.clone(), - ) - .await?, - ) - } else { - None - }; - let nym_dispatcher = if nym_active { - Some( - NymDispatcher::spawn( - nym_conf_path - .expect("nym_conf_path returned none when used, after checks made."), - nym_response_queue.rx().clone(), - nym_response_queue.tx().clone(), - status.nym_dispatcher_status.clone(), - online.clone(), - ) - .await?, + let (nym_ingestor, nym_dispatcher) = if nym_active { + println!("Launching NymIngestor and Nymdispatcher.."); + let nym_conf_path_string = + nym_conf_path.expect("nym_conf_path returned none when used."); + ( + Some( + NymIngestor::spawn( + nym_conf_path_string.clone().as_str(), + request_queue.tx().clone(), + status.nym_ingestor_status.clone(), + online.clone(), + ) + .await?, + ), + Some( + NymDispatcher::spawn( + nym_conf_path_string.as_str(), + nym_response_queue.rx().clone(), + nym_response_queue.tx().clone(), + status.nym_dispatcher_status.clone(), + online.clone(), + ) + .await?, + ), ) } else { - None + (None, None) }; + println!("Launching WorkerPool.."); let worker_pool = WorkerPool::spawn( max_worker_pool_size, idle_worker_pool_size, @@ -265,7 +295,7 @@ impl Server { self.status.server_status.store(1); loop { if self.request_queue.queue_length() >= (self.request_queue.max_length() / 2) - && (self.worker_pool.workers() < self.worker_pool.max_size()) + && (self.worker_pool.workers() < self.worker_pool.max_size() as usize) { match self.worker_pool.push_worker().await { Ok(handle) => { @@ -276,7 +306,7 @@ impl Server { } } } else if (self.request_queue.queue_length() <= 1) - && (self.worker_pool.workers() > self.worker_pool.idle_size()) + && (self.worker_pool.workers() > self.worker_pool.idle_size() as usize) { let worker_index = self.worker_pool.workers() - 1; let worker_handle = worker_handles.remove(worker_index); diff --git a/zingo-rpc/src/server/worker.rs b/zingo-rpc/src/server/worker.rs index 4f35c26..8e1726c 100644 --- a/zingo-rpc/src/server/worker.rs +++ b/zingo-rpc/src/server/worker.rs @@ -215,13 +215,32 @@ pub struct WorkerPoolStatus { statuses: Vec, } +impl WorkerPoolStatus { + /// Creates a WorkerPoolStatus. + pub fn new(max_workers: u16) -> Self { + WorkerPoolStatus { + workers: Arc::new(AtomicUsize::new(0)), + statuses: vec![AtomicStatus::new(5); max_workers as usize], + } + } + + /// Returns the WorkerPoolStatus. + pub fn load(&self) -> WorkerPoolStatus { + self.workers.load(Ordering::SeqCst); + for i in 0..self.statuses.len() { + self.statuses[i].load(); + } + self.clone() + } +} + /// Dynamically sized pool of workers. #[derive(Debug, Clone)] pub struct WorkerPool { /// Maximun number of concurrent workers allowed. - max_size: usize, + max_size: u16, /// Minimum number of workers kept running on stanby. - idle_size: usize, + idle_size: u16, /// Workers currently in the pool workers: Vec, /// Status of the workerpool and its workers. @@ -233,8 +252,8 @@ pub struct WorkerPool { impl WorkerPool { /// Creates a new worker pool containing [idle_workers] workers. pub async fn spawn( - max_size: usize, - idle_size: usize, + max_size: u16, + idle_size: u16, queue: QueueReceiver, _requeue: QueueSender, nym_response_queue: QueueSender<(Vec, AnonymousSenderTag)>, @@ -243,7 +262,7 @@ impl WorkerPool { status: WorkerPoolStatus, online: Arc, ) -> Self { - let mut workers: Vec = Vec::with_capacity(max_size); + let mut workers: Vec = Vec::with_capacity(max_size as usize); for _ in 0..idle_size { workers.push( Worker::spawn( @@ -259,7 +278,7 @@ impl WorkerPool { .await, ); } - status.workers.store(idle_size, Ordering::SeqCst); + status.workers.store(idle_size as usize, Ordering::SeqCst); WorkerPool { max_size, idle_size, @@ -282,7 +301,7 @@ impl WorkerPool { pub async fn push_worker( &mut self, ) -> Result>, WorkerError> { - if self.workers.len() >= self.max_size { + if self.workers.len() >= self.max_size as usize { Err(WorkerError::WorkerPoolFull) } else { self.workers.push( @@ -308,7 +327,7 @@ impl WorkerPool { &mut self, worker_handle: tokio::task::JoinHandle>, ) -> Result<(), WorkerError> { - if self.workers.len() <= self.idle_size { + if self.workers.len() <= self.idle_size as usize { Err(WorkerError::WorkerPoolIdle) } else { let worker_index = self.workers.len() - 1; @@ -341,12 +360,12 @@ impl WorkerPool { } /// Returns the max size of the pool - pub fn max_size(&self) -> usize { + pub fn max_size(&self) -> u16 { self.max_size } /// Returns the idle size of the pool - pub fn idle_size(&self) -> usize { + pub fn idle_size(&self) -> u16 { self.idle_size } From c451324b9001d241d9f7e3af9737134eb7320ec2 Mon Sep 17 00:00:00 2001 From: idky137 Date: Thu, 8 Aug 2024 23:34:41 +0100 Subject: [PATCH 12/18] added Indexer shutdown --- zingo-proxyd/src/error.rs | 3 + zingo-proxyd/src/indexer.rs | 141 +++++++++++++++++++++++++----------- zingo-rpc/src/server.rs | 6 +- 3 files changed, 105 insertions(+), 45 deletions(-) diff --git a/zingo-proxyd/src/error.rs b/zingo-proxyd/src/error.rs index b7a9ee6..c8fcbac 100644 --- a/zingo-proxyd/src/error.rs +++ b/zingo-proxyd/src/error.rs @@ -17,4 +17,7 @@ pub enum IndexerError { /// HTTP related errors due to invalid URI. #[error("HTTP error: Invalid URI {0}")] HttpError(#[from] http::Error), + /// Custom indexor errors. + #[error("Misc indexer error: {0}")] + MiscIndexerError(String), } diff --git a/zingo-proxyd/src/indexer.rs b/zingo-proxyd/src/indexer.rs index a05c916..d3a5849 100644 --- a/zingo-proxyd/src/indexer.rs +++ b/zingo-proxyd/src/indexer.rs @@ -1,7 +1,7 @@ //! Zingo-Indexer implementation. use std::{ - net::{Ipv4Addr, SocketAddr}, + net::SocketAddr, process, sync::{ atomic::{AtomicBool, Ordering}, @@ -12,7 +12,7 @@ use std::{ use http::Uri; use zingo_rpc::{ jsonrpc::connector::test_node_and_return_uri, - server::{AtomicStatus, Server, ServerStatus}, + server::{error::ServerError, AtomicStatus, Server, ServerStatus, StatusType}, }; use crate::{config::IndexerConfig, error::IndexerError}; @@ -44,10 +44,10 @@ impl IndexerStatus { /// Zingo-Indexer. pub struct Indexer { - /// Indexer onfuguration data. + /// Indexer configuration data. config: IndexerConfig, /// GRPC server. - server: Server, + server: Option, // Internal block cache. // block_cache: BlockCache, /// Indexers status. @@ -57,6 +57,45 @@ pub struct Indexer { } impl Indexer { + /// Start an Indexer service. + /// + /// Currently only takes an IndexerConfig. + pub async fn start(config: IndexerConfig) -> Result<(), IndexerError> { + // NOTE: This interval may need to be reduced or removed / moved once scale testing begins. + let mut interval = tokio::time::interval(tokio::time::Duration::from_millis(50)); + + let online = Arc::new(AtomicBool::new(true)); + set_ctrlc(online.clone()); + nym_bin_common::logging::setup_logging(); + + startup_message(); + + println!("Launching Zingdexer!"); + let mut indexer: Indexer = Indexer::new(config, online.clone()).await?; + let status = indexer.status.clone(); + + let server_handle = if let Some(server) = indexer.server.take() { + Some(server.serve().await) + } else { + return Err(IndexerError::MiscIndexerError( + "Server Missing! Fatal Error!.".to_string(), + )); + }; + + indexer.status.indexer_status.store(2); + loop { + indexer.status.load(); + // printout(); + if indexer.check_for_shutdown() { + status.indexer_status.store(4); + indexer.shutdown_components(server_handle).await; + status.indexer_status.store(5); + return Ok(()); + } + interval.tick().await; + } + } + /// Creates a new Indexer. /// /// Currently only takes an IndexerConfig. @@ -79,20 +118,22 @@ impl Indexer { ) .await?; status.indexer_status.store(0); - let server = Server::spawn( - config.tcp_active, - tcp_ingestor_listen_addr, - config.nym_active, - config.nym_conf_path.clone(), - lightwalletd_uri, - zebrad_uri, - config.max_queue_size, - config.max_worker_pool_size, - config.idle_worker_pool_size, - status.server_status.clone(), - online.clone(), - ) - .await?; + let server = Some( + Server::spawn( + config.tcp_active, + tcp_ingestor_listen_addr, + config.nym_active, + config.nym_conf_path.clone(), + lightwalletd_uri, + zebrad_uri, + config.max_queue_size, + config.max_worker_pool_size, + config.idle_worker_pool_size, + status.server_status.clone(), + online.clone(), + ) + .await?, + ); Ok(Indexer { config, server, @@ -101,37 +142,53 @@ impl Indexer { }) } - /// Start an Indexer service. - /// - /// Currently only takes an IndexerConfig. - pub async fn start(config: IndexerConfig) -> Result<(), IndexerError> { - // NOTE: This interval may need to be reduced or removed / moved once scale testing begins. - let mut interval = tokio::time::interval(tokio::time::Duration::from_millis(50)); + /// Checks indexers online status and servers internal status for closure signal. + pub fn check_for_shutdown(&self) -> bool { + if self.status() >= 4 { + return true; + } + if !self.check_online() { + return true; + } + false + } - let online = Arc::new(AtomicBool::new(true)); - set_ctrlc(online.clone()); - nym_bin_common::logging::setup_logging(); + /// Sets the servers to close gracefully. + pub fn shutdown(&mut self) { + self.status.indexer_status.store(4) + } - startup_message(); + /// Sets the server's components to close gracefully. + async fn shutdown_components( + &mut self, + server_handle: Option>>, + ) { + if let Some(handle) = server_handle { + self.status.server_status.server_status.store(4); + handle.await.ok(); + } + } - println!("Launching Zingdexer!"); - let indexer: Indexer = Indexer::new(config, online.clone()).await?; - let server_handle = indexer.server.serve().await; + /// Returns the indexers current status usize. + pub fn status(&self) -> usize { + self.status.indexer_status.load() + } - indexer.status.indexer_status.store(2); - while online.load(Ordering::SeqCst) { - indexer.status.load(); - //printout statuses - //check for shutdown - interval.tick().await; - } - Ok(()) + /// Returns the indexers current statustype. + pub fn statustype(&self) -> StatusType { + StatusType::from(self.status()) } - // /// Closes the Indexer Gracefully. - // pub async fn shutdown(&self) ( + /// Returns the status of the indexer and its parts. + pub fn statuses(&mut self) -> IndexerStatus { + self.status.load(); + self.status.clone() + } - // ) + /// Check the online status on the indexer. + fn check_online(&self) -> bool { + self.online.load(Ordering::SeqCst) + } } fn set_ctrlc(online: Arc) { diff --git a/zingo-rpc/src/server.rs b/zingo-rpc/src/server.rs index 02da42d..4831034 100644 --- a/zingo-rpc/src/server.rs +++ b/zingo-rpc/src/server.rs @@ -104,7 +104,8 @@ impl From for StatusType { /// Holds the status of the server and all its components. #[derive(Debug, Clone)] pub struct ServerStatus { - server_status: AtomicStatus, + /// Status of the Server. + pub server_status: AtomicStatus, tcp_ingestor_status: AtomicStatus, nym_ingestor_status: AtomicStatus, nym_dispatcher_status: AtomicStatus, @@ -321,6 +322,7 @@ impl Server { self.statuses(); // TODO: Implement check_statuses() and run here. if self.check_for_shutdown().await { + self.status.server_status.store(4); let worker_handle_options: Vec< Option>>, > = worker_handles.into_iter().map(Some).collect(); @@ -376,8 +378,6 @@ impl Server { self.status.nym_dispatcher_status.store(4); handle.await.ok(); } - self.online - .store(false, std::sync::atomic::Ordering::SeqCst); } /// Returns the servers current status usize. From 14de7bf409b90802a5f8f22b3ae4d6530033cf0f Mon Sep 17 00:00:00 2001 From: idky137 Date: Fri, 9 Aug 2024 16:46:15 +0100 Subject: [PATCH 13/18] basic usage working --- zingo-proxyd/src/bin/zingoproxyd.rs | 87 +++++++++++++++-------------- zingo-proxyd/src/config.rs | 60 +++++++++++++++++--- zingo-proxyd/src/indexer.rs | 5 +- zingo-rpc/src/jsonrpc/connector.rs | 7 +-- zingo-rpc/src/jsonrpc/error.rs | 2 +- zingo-rpc/src/nym/client.rs | 3 - zingo-rpc/src/server.rs | 12 ++++ zingo-rpc/src/server/dispatcher.rs | 2 +- zingo-rpc/src/server/ingestor.rs | 4 +- zingo-rpc/src/server/worker.rs | 2 +- 10 files changed, 121 insertions(+), 63 deletions(-) diff --git a/zingo-proxyd/src/bin/zingoproxyd.rs b/zingo-proxyd/src/bin/zingoproxyd.rs index 0de3374..85b93e3 100644 --- a/zingo-proxyd/src/bin/zingoproxyd.rs +++ b/zingo-proxyd/src/bin/zingoproxyd.rs @@ -7,48 +7,53 @@ use std::{ Arc, }, }; -use zingoproxylib::proxy::spawn_proxy; +use zingoproxylib::{config::IndexerConfig, indexer::Indexer, proxy::spawn_proxy}; + +// #[tokio::main] +// async fn main() { +// let online = Arc::new(AtomicBool::new(true)); +// let online_ctrlc = online.clone(); +// ctrlc::set_handler(move || { +// println!("@zingoproxyd: Received Ctrl+C, exiting."); +// online_ctrlc.store(false, Ordering::SeqCst); +// process::exit(0); +// }) +// .expect("Error setting Ctrl-C handler"); + +// nym_bin_common::logging::setup_logging(); + +// #[allow(unused_mut)] +// let mut proxy_port: u16 = 8080; +// #[cfg(feature = "nym_poc")] +// { +// proxy_port = 8088; +// } + +// #[allow(unused_mut)] +// let mut lwd_port: u16 = 9067; +// #[cfg(feature = "nym_poc")] +// { +// lwd_port = 8080; +// } + +// let zcashd_port: u16 = 18232; + +// let (_handles, _nym_address) = spawn_proxy( +// &proxy_port, +// &lwd_port, +// &zcashd_port, +// "/tmp/nym_server", +// online.clone(), +// ) +// .await; + +// let mut interval = tokio::time::interval(tokio::time::Duration::from_millis(500)); +// while online.load(Ordering::SeqCst) { +// interval.tick().await; +// } +// } #[tokio::main] async fn main() { - let online = Arc::new(AtomicBool::new(true)); - let online_ctrlc = online.clone(); - ctrlc::set_handler(move || { - println!("@zingoproxyd: Received Ctrl+C, exiting."); - online_ctrlc.store(false, Ordering::SeqCst); - process::exit(0); - }) - .expect("Error setting Ctrl-C handler"); - - nym_bin_common::logging::setup_logging(); - - #[allow(unused_mut)] - let mut proxy_port: u16 = 8080; - #[cfg(feature = "nym_poc")] - { - proxy_port = 8088; - } - - #[allow(unused_mut)] - let mut lwd_port: u16 = 9067; - #[cfg(feature = "nym_poc")] - { - lwd_port = 8080; - } - - let zcashd_port: u16 = 18232; - - let (_handles, _nym_address) = spawn_proxy( - &proxy_port, - &lwd_port, - &zcashd_port, - "/tmp/nym_server", - online.clone(), - ) - .await; - - let mut interval = tokio::time::interval(tokio::time::Duration::from_millis(500)); - while online.load(Ordering::SeqCst) { - interval.tick().await; - } + Indexer::start(IndexerConfig::default()).await.unwrap(); } diff --git a/zingo-proxyd/src/config.rs b/zingo-proxyd/src/config.rs index 29a555e..7011498 100644 --- a/zingo-proxyd/src/config.rs +++ b/zingo-proxyd/src/config.rs @@ -1,22 +1,40 @@ //! Zingo-Indexer config. +use std::path::Path; + use crate::error::IndexerError; /// Config information required for Zingo-Indexer. #[derive(Debug, Clone)] pub struct IndexerConfig { + /// Sets the TcpIngestor's status. pub tcp_active: bool, + /// TcpIngestors listen port pub listen_port: Option, + /// Sets the NymIngestor's and NymDispatchers status. pub nym_active: bool, + /// Nym conf path used for micnet client conf. pub nym_conf_path: Option, + /// LightWalletD listen port [DEPRECATED]. + /// + /// Used by nym_poc and zingo-testutils. pub lightwalletd_port: u16, + /// Full node / validator listen port. pub zebrad_port: u16, + /// Maximum requests allowed in the request queue. pub max_queue_size: u16, + /// Maximum workers allowed in the worker pool pub max_worker_pool_size: u16, + /// Minimum number of workers held in the workerpool when idle. pub idle_worker_pool_size: u16, } impl IndexerConfig { + /// Performs checks on config data. + /// + /// - Checks that at least 1 of nym or tpc is active. + /// - Checks listen port is given is tcp is active. + /// - Checks nym_conf_path is given if nym is active and holds a valid utf8 string. pub fn check_config(&self) -> Result<(), IndexerError> { if !(self.tcp_active && self.nym_active) { return Err(IndexerError::ConfigError( @@ -28,27 +46,53 @@ impl IndexerConfig { "TCP is active but no address provided.".to_string(), )); } - if self.nym_active && self.nym_conf_path.is_none() { - return Err(IndexerError::ConfigError( - "NYM is active but no conf path provided.".to_string(), - )); + if let Some(path_str) = self.nym_conf_path.clone() { + if Path::new(&path_str).to_str().is_none() { + return Err(IndexerError::ConfigError( + "Invalid nym conf path syntax or non-UTF-8 characters in path.".to_string(), + )); + } + } else { + if self.nym_active { + return Err(IndexerError::ConfigError( + "NYM is active but no conf path provided.".to_string(), + )); + } } Ok(()) } } +#[cfg(not(feature = "nym_poc"))] impl Default for IndexerConfig { fn default() -> Self { Self { tcp_active: true, listen_port: Some(8080), nym_active: true, - nym_conf_path: Some("/tmp/nym_server".to_string()), + nym_conf_path: Some("/tmp/indexer/nym".to_string()), lightwalletd_port: 9067, zebrad_port: 18232, - max_queue_size: 100, - max_worker_pool_size: 16, - idle_worker_pool_size: 2, + max_queue_size: 256, + max_worker_pool_size: 32, + idle_worker_pool_size: 4, + } + } +} + +#[cfg(feature = "nym_poc")] +impl Default for IndexerConfig { + fn default() -> Self { + Self { + tcp_active: true, + listen_port: Some(8088), + nym_active: false, + nym_conf_path: Some("/tmp/indexer/nym_poc".to_string()), + lightwalletd_port: 8080, + zebrad_port: 18232, + max_queue_size: 256, + max_worker_pool_size: 32, + idle_worker_pool_size: 4, } } } diff --git a/zingo-proxyd/src/indexer.rs b/zingo-proxyd/src/indexer.rs index d3a5849..95d0acb 100644 --- a/zingo-proxyd/src/indexer.rs +++ b/zingo-proxyd/src/indexer.rs @@ -48,7 +48,7 @@ pub struct Indexer { config: IndexerConfig, /// GRPC server. server: Option, - // Internal block cache. + // /// Internal block cache. // block_cache: BlockCache, /// Indexers status. status: IndexerStatus, @@ -85,7 +85,7 @@ impl Indexer { indexer.status.indexer_status.store(2); loop { indexer.status.load(); - // printout(); + // indexer.log_status(); if indexer.check_for_shutdown() { status.indexer_status.store(4); indexer.shutdown_components(server_handle).await; @@ -134,6 +134,7 @@ impl Indexer { ) .await?, ); + println!("Server Ready."); Ok(Indexer { config, server, diff --git a/zingo-rpc/src/jsonrpc/connector.rs b/zingo-rpc/src/jsonrpc/connector.rs index 95475d9..55a7917 100644 --- a/zingo-rpc/src/jsonrpc/connector.rs +++ b/zingo-rpc/src/jsonrpc/connector.rs @@ -413,21 +413,18 @@ pub async fn test_node_and_return_uri( .map_err(JsonRpcConnectorError::InvalidUriError)?; let mut interval = tokio::time::interval(tokio::time::Duration::from_millis(500)); for _ in 0..3 { - println!("Trying connection on IPv4.."); match test_node_connection(ipv4_uri.clone(), user.clone(), password.clone()).await { Ok(_) => { println!("Connected to node using IPv4 at address {}.", ipv4_uri); return Ok(ipv4_uri); } - Err(e_ipv4) => { - eprintln!("Failed to connect to node using IPv4 with error: {}\nTrying connection on IPv6..", e_ipv4); + Err(_e_ipv4) => { match test_node_connection(ipv6_uri.clone(), user.clone(), password.clone()).await { Ok(_) => { println!("Connected to node using IPv6 at address {}.", ipv6_uri); return Ok(ipv6_uri); } - Err(e_ipv6) => { - eprintln!("Failed to connect to node using IPv6 with error: {}.\nConnection not established. Retrying..", e_ipv6); + Err(_e_ipv6) => { tokio::time::sleep(std::time::Duration::from_secs(3)).await; } } diff --git a/zingo-rpc/src/jsonrpc/error.rs b/zingo-rpc/src/jsonrpc/error.rs index 0dd3c74..31bdfcf 100644 --- a/zingo-rpc/src/jsonrpc/error.rs +++ b/zingo-rpc/src/jsonrpc/error.rs @@ -40,7 +40,7 @@ impl JsonRpcConnectorError { /// Maps JsonRpcConnectorError to tonic::Status pub fn to_grpc_status(&self) -> tonic::Status { - eprintln!("@zingoproxyd: Error occurred: {}.", self); + eprintln!("Error occurred: {}.", self); match self { JsonRpcConnectorError::SerdeJsonError(_) => { diff --git a/zingo-rpc/src/nym/client.rs b/zingo-rpc/src/nym/client.rs index 4dae03e..c4fb3bc 100644 --- a/zingo-rpc/src/nym/client.rs +++ b/zingo-rpc/src/nym/client.rs @@ -29,10 +29,7 @@ impl NymClient { .build()? .connect_to_mixnet() .await?; - let addr = client.nym_address().to_string(); - println!("@zingoproxyd[nym]: Nym server listening on: {addr}."); - Ok(Self { client, addr }) } diff --git a/zingo-rpc/src/server.rs b/zingo-rpc/src/server.rs index 4831034..c9ca7ae 100644 --- a/zingo-rpc/src/server.rs +++ b/zingo-rpc/src/server.rs @@ -54,6 +54,18 @@ impl AtomicStatus { pub fn store(&self, status: usize) { self.0.store(status, Ordering::SeqCst); } + + // fn to_display(status: &AtomicStatus) -> ColoredString { + // match StatusType::from(status.load()) { + // StatusType::Spawning => "●".yellow(), + // StatusType::Listening => "●".green(), + // StatusType::Working => "●".blue(), + // StatusType::Inactive => "●".red(), + // StatusType::Closing => "●".magenta(), + // StatusType::Offline => "●".white(), + // StatusType::Error => "●".red().bold(), + // } + // } } /// Status of the server. diff --git a/zingo-rpc/src/server/dispatcher.rs b/zingo-rpc/src/server/dispatcher.rs index 09c37ef..54f1caa 100644 --- a/zingo-rpc/src/server/dispatcher.rs +++ b/zingo-rpc/src/server/dispatcher.rs @@ -51,7 +51,7 @@ impl NymDispatcher { } /// Starts Nym service. - pub async fn serve(mut self) -> tokio::task::JoinHandle> { + pub async fn serve(self) -> tokio::task::JoinHandle> { tokio::task::spawn(async move { // NOTE: This interval may need to be changed or removed / moved once scale testing begins. let mut interval = tokio::time::interval(tokio::time::Duration::from_millis(50)); diff --git a/zingo-rpc/src/server/ingestor.rs b/zingo-rpc/src/server/ingestor.rs index 22bd9ca..83f7874 100644 --- a/zingo-rpc/src/server/ingestor.rs +++ b/zingo-rpc/src/server/ingestor.rs @@ -41,6 +41,7 @@ impl TcpIngestor { ) -> Result { status.store(0); let listener = TcpListener::bind(listen_addr).await?; + println!("TcpIngestor listening at: {}.", listen_addr); Ok(TcpIngestor { ingestor: listener, queue, @@ -50,7 +51,7 @@ impl TcpIngestor { } /// Starts Tcp service. - pub async fn serve(mut self) -> tokio::task::JoinHandle> { + pub async fn serve(self) -> tokio::task::JoinHandle> { tokio::task::spawn(async move { // NOTE: This interval may need to be changed or removed / moved once scale testing begins. let mut interval = tokio::time::interval(tokio::time::Duration::from_millis(50)); @@ -149,6 +150,7 @@ impl NymIngestor { status.store(0); // TODO: HANDLE THESE ERRORS TO SMOOTH MIXNET CLIENT SPAWN PROCESS! let listener = NymClient::spawn(&format!("{}/ingestor", nym_conf_path)).await?; + println!("NymIngestor listening at: {}.", listener.addr); Ok(NymIngestor { ingestor: listener, queue, diff --git a/zingo-rpc/src/server/worker.rs b/zingo-rpc/src/server/worker.rs index 8e1726c..b71312b 100644 --- a/zingo-rpc/src/server/worker.rs +++ b/zingo-rpc/src/server/worker.rs @@ -80,7 +80,7 @@ impl Worker { /// Starts queue worker service routine. /// /// TODO: Add requeue logic for node errors. - pub async fn serve(mut self) -> tokio::task::JoinHandle> { + pub async fn serve(self) -> tokio::task::JoinHandle> { tokio::task::spawn(async move { // NOTE: This interval may need to be reduced or removed / moved once scale testing begins. let mut interval = tokio::time::interval(tokio::time::Duration::from_millis(50)); From 6f28e86656c9e4e409906f2c47264134aa52003a Mon Sep 17 00:00:00 2001 From: idky137 Date: Fri, 9 Aug 2024 18:21:03 +0100 Subject: [PATCH 14/18] nym working, moved nym_ingestor functionality into nym_dispatcher --- zingo-proxyd/src/bin/zingoproxyd.rs | 55 +--------- zingo-proxyd/src/config.rs | 13 ++- zingo-proxyd/src/indexer.rs | 4 +- zingo-rpc/src/rpc/nymwalletservice.rs | 108 ++++++++++---------- zingo-rpc/src/server.rs | 60 ++++------- zingo-rpc/src/server/dispatcher.rs | 140 -------------------------- zingo-rpc/src/server/error.rs | 11 -- zingo-rpc/src/server/ingestor.rs | 51 +++++++++- 8 files changed, 137 insertions(+), 305 deletions(-) delete mode 100644 zingo-rpc/src/server/dispatcher.rs diff --git a/zingo-proxyd/src/bin/zingoproxyd.rs b/zingo-proxyd/src/bin/zingoproxyd.rs index 85b93e3..d353b58 100644 --- a/zingo-proxyd/src/bin/zingoproxyd.rs +++ b/zingo-proxyd/src/bin/zingoproxyd.rs @@ -1,57 +1,6 @@ -//! Zingo-Proxy daemon +//! Zingo-Indexer daemon -use std::{ - process, - sync::{ - atomic::{AtomicBool, Ordering}, - Arc, - }, -}; -use zingoproxylib::{config::IndexerConfig, indexer::Indexer, proxy::spawn_proxy}; - -// #[tokio::main] -// async fn main() { -// let online = Arc::new(AtomicBool::new(true)); -// let online_ctrlc = online.clone(); -// ctrlc::set_handler(move || { -// println!("@zingoproxyd: Received Ctrl+C, exiting."); -// online_ctrlc.store(false, Ordering::SeqCst); -// process::exit(0); -// }) -// .expect("Error setting Ctrl-C handler"); - -// nym_bin_common::logging::setup_logging(); - -// #[allow(unused_mut)] -// let mut proxy_port: u16 = 8080; -// #[cfg(feature = "nym_poc")] -// { -// proxy_port = 8088; -// } - -// #[allow(unused_mut)] -// let mut lwd_port: u16 = 9067; -// #[cfg(feature = "nym_poc")] -// { -// lwd_port = 8080; -// } - -// let zcashd_port: u16 = 18232; - -// let (_handles, _nym_address) = spawn_proxy( -// &proxy_port, -// &lwd_port, -// &zcashd_port, -// "/tmp/nym_server", -// online.clone(), -// ) -// .await; - -// let mut interval = tokio::time::interval(tokio::time::Duration::from_millis(500)); -// while online.load(Ordering::SeqCst) { -// interval.tick().await; -// } -// } +use zingoproxylib::{config::IndexerConfig, indexer::Indexer}; #[tokio::main] async fn main() { diff --git a/zingo-proxyd/src/config.rs b/zingo-proxyd/src/config.rs index 7011498..99c9931 100644 --- a/zingo-proxyd/src/config.rs +++ b/zingo-proxyd/src/config.rs @@ -16,11 +16,14 @@ pub struct IndexerConfig { /// Nym conf path used for micnet client conf. pub nym_conf_path: Option, /// LightWalletD listen port [DEPRECATED]. - /// /// Used by nym_poc and zingo-testutils. pub lightwalletd_port: u16, /// Full node / validator listen port. pub zebrad_port: u16, + /// Full node Username. + pub node_user: Option, + /// full node Password. + pub node_password: Option, /// Maximum requests allowed in the request queue. pub max_queue_size: u16, /// Maximum workers allowed in the worker pool @@ -36,7 +39,7 @@ impl IndexerConfig { /// - Checks listen port is given is tcp is active. /// - Checks nym_conf_path is given if nym is active and holds a valid utf8 string. pub fn check_config(&self) -> Result<(), IndexerError> { - if !(self.tcp_active && self.nym_active) { + if (!self.tcp_active) && (!self.nym_active) { return Err(IndexerError::ConfigError( "Cannot start server with no ingestors selected, at least one of either nym or tcp must be set to active in conf.".to_string(), )); @@ -73,6 +76,8 @@ impl Default for IndexerConfig { nym_conf_path: Some("/tmp/indexer/nym".to_string()), lightwalletd_port: 9067, zebrad_port: 18232, + node_user: Some("xxxxxx".to_string()), + node_password: Some("xxxxxx".to_string()), max_queue_size: 256, max_worker_pool_size: 32, idle_worker_pool_size: 4, @@ -87,9 +92,11 @@ impl Default for IndexerConfig { tcp_active: true, listen_port: Some(8088), nym_active: false, - nym_conf_path: Some("/tmp/indexer/nym_poc".to_string()), + nym_conf_path: None, lightwalletd_port: 8080, zebrad_port: 18232, + node_user: Some("xxxxxx".to_string()), + node_password: Some("xxxxxx".to_string()), max_queue_size: 256, max_worker_pool_size: 32, idle_worker_pool_size: 4, diff --git a/zingo-proxyd/src/indexer.rs b/zingo-proxyd/src/indexer.rs index 95d0acb..b7785d2 100644 --- a/zingo-proxyd/src/indexer.rs +++ b/zingo-proxyd/src/indexer.rs @@ -113,8 +113,8 @@ impl Indexer { println!("Checking connection with node.."); let zebrad_uri = test_node_and_return_uri( &config.zebrad_port, - Some("xxxxxx".to_string()), - Some("xxxxxx".to_string()), + config.node_user.clone(), + config.node_password.clone(), ) .await?; status.indexer_status.store(0); diff --git a/zingo-rpc/src/rpc/nymwalletservice.rs b/zingo-rpc/src/rpc/nymwalletservice.rs index a8cb53a..90a2e54 100644 --- a/zingo-rpc/src/rpc/nymwalletservice.rs +++ b/zingo-rpc/src/rpc/nymwalletservice.rs @@ -217,60 +217,60 @@ impl CompactTxStreamer for GrpcClient { ) -> tonic::Streaming ); - // define_grpc_passthrough!( - // fn get_lightd_info( - // &self, - // request: tonic::Request, - // ) -> LightdInfo - // ); - async fn get_lightd_info( - &self, - request: Request, - ) -> Result, Status> { - println!("@zingoproxyd[nym_poc]: Received call of get_lightd_info."); - // -- serialize Empty - let serialized_request = match serialize_request(&request.into_inner()).await { - Ok(data) => data, - Err(e) => { - return Err(Status::internal(format!( - "Failed to serialize request: {}", - e - ))) - } - }; - // -- create ZingoProxyRequest - let nym_request = match write_nym_request_data( - 0, - "GetLightdInfo".to_string(), - serialized_request.as_ref(), - ) { - Ok(data) => data, - Err(e) => { - return Err(Status::internal(format!( - "Failed to write nym request data: {}", - e - ))) - } - }; - // -- forward request over nym and wait for response - let args: Vec = env::args().collect(); - let recipient_address: String = args[1].clone(); - let nym_conf_path = "/tmp/nym_client"; - let mut client = NymClient::spawn(nym_conf_path).await?; - let response_data = client.send(recipient_address.as_str(), nym_request).await?; - client.close().await; - // -- deserialize LightdInfo - let response: LightdInfo = match deserialize_response(response_data.as_slice()).await { - Ok(res) => res, - Err(e) => { - return Err(Status::internal(format!( - "Failed to decode response: {}", - e - ))) - } - }; - Ok(Response::new(response)) - } + define_grpc_passthrough!( + fn get_lightd_info( + &self, + request: tonic::Request, + ) -> LightdInfo + ); + // async fn get_lightd_info( + // &self, + // request: Request, + // ) -> Result, Status> { + // println!("@zingoproxyd[nym_poc]: Received call of get_lightd_info."); + // // -- serialize Empty + // let serialized_request = match serialize_request(&request.into_inner()).await { + // Ok(data) => data, + // Err(e) => { + // return Err(Status::internal(format!( + // "Failed to serialize request: {}", + // e + // ))) + // } + // }; + // // -- create ZingoProxyRequest + // let nym_request = match write_nym_request_data( + // 0, + // "GetLightdInfo".to_string(), + // serialized_request.as_ref(), + // ) { + // Ok(data) => data, + // Err(e) => { + // return Err(Status::internal(format!( + // "Failed to write nym request data: {}", + // e + // ))) + // } + // }; + // // -- forward request over nym and wait for response + // let args: Vec = env::args().collect(); + // let recipient_address: String = args[1].clone(); + // let nym_conf_path = "/tmp/nym_client"; + // let mut client = NymClient::spawn(nym_conf_path).await?; + // let response_data = client.send(recipient_address.as_str(), nym_request).await?; + // client.close().await; + // // -- deserialize LightdInfo + // let response: LightdInfo = match deserialize_response(response_data.as_slice()).await { + // Ok(res) => res, + // Err(e) => { + // return Err(Status::internal(format!( + // "Failed to decode response: {}", + // e + // ))) + // } + // }; + // Ok(Response::new(response)) + // } define_grpc_passthrough!( fn ping( diff --git a/zingo-rpc/src/server.rs b/zingo-rpc/src/server.rs index c9ca7ae..4d5632f 100644 --- a/zingo-rpc/src/server.rs +++ b/zingo-rpc/src/server.rs @@ -10,7 +10,6 @@ use std::{ }, }; -pub mod dispatcher; pub mod error; pub mod ingestor; pub mod queue; @@ -18,8 +17,8 @@ pub mod request; pub mod worker; use self::{ - dispatcher::NymDispatcher, - error::{DispatcherError, IngestorError, ServerError, WorkerError}, + // dispatcher::NymDispatcher, + error::{IngestorError, ServerError, WorkerError}, ingestor::{NymIngestor, TcpIngestor}, queue::Queue, request::ZingoProxyRequest, @@ -157,10 +156,8 @@ impl ServerStatus { pub struct Server { /// Listens for incoming gRPC requests over HTTP. tcp_ingestor: Option, - /// Listens for incoming gRPC requests over Nym Mixnet. + /// Listens for incoming gRPC requests over Nym Mixnet, also sends responses back to clients. nym_ingestor: Option, - /// Sends gRPC responses over Nym Mixnet. - nym_dispatcher: Option, /// Dynamically sized pool of workers. worker_pool: WorkerPool, /// Request queue. @@ -188,7 +185,7 @@ impl Server { status: ServerStatus, online: Arc, ) -> Result { - if !(tcp_active && nym_active) { + if (!tcp_active) && (!nym_active) { return Err(ServerError::ServerConfigError( "Cannot start server with no ingestors selected, at least one of either nym or tcp must be set to active in conf.".to_string(), )); @@ -228,34 +225,25 @@ impl Server { } else { None }; - let (nym_ingestor, nym_dispatcher) = if nym_active { - println!("Launching NymIngestor and Nymdispatcher.."); + let nym_ingestor = if nym_active { + println!("Launching NymIngestor.."); let nym_conf_path_string = nym_conf_path.expect("nym_conf_path returned none when used."); - ( - Some( - NymIngestor::spawn( - nym_conf_path_string.clone().as_str(), - request_queue.tx().clone(), - status.nym_ingestor_status.clone(), - online.clone(), - ) - .await?, - ), - Some( - NymDispatcher::spawn( - nym_conf_path_string.as_str(), - nym_response_queue.rx().clone(), - nym_response_queue.tx().clone(), - status.nym_dispatcher_status.clone(), - online.clone(), - ) - .await?, - ), + Some( + NymIngestor::spawn( + nym_conf_path_string.clone().as_str(), + request_queue.tx().clone(), + nym_response_queue.rx().clone(), + nym_response_queue.tx().clone(), + status.nym_ingestor_status.clone(), + online.clone(), + ) + .await?, ) } else { - (None, None) + None }; + println!("Launching WorkerPool.."); let worker_pool = WorkerPool::spawn( max_worker_pool_size, @@ -272,7 +260,6 @@ impl Server { Ok(Server { tcp_ingestor, nym_ingestor, - nym_dispatcher, worker_pool, request_queue, nym_response_queue, @@ -291,13 +278,9 @@ impl Server { tokio::task::spawn(async move { // NOTE: This interval may need to be reduced or removed / moved once scale testing begins. let mut interval = tokio::time::interval(tokio::time::Duration::from_millis(50)); - let mut nym_dispatcher_handle = None; let mut nym_ingestor_handle = None; let mut tcp_ingestor_handle = None; let mut worker_handles; - if let Some(dispatcher) = self.nym_dispatcher.take() { - nym_dispatcher_handle = Some(dispatcher.serve().await); - } if let Some(ingestor) = self.nym_ingestor.take() { nym_ingestor_handle = Some(ingestor.serve().await); } @@ -341,7 +324,6 @@ impl Server { self.shutdown_components( tcp_ingestor_handle, nym_ingestor_handle, - nym_dispatcher_handle, worker_handle_options, ) .await; @@ -374,7 +356,7 @@ impl Server { &mut self, tcp_ingestor_handle: Option>>, nym_ingestor_handle: Option>>, - nym_dispatcher_handle: Option>>, + // nym_dispatcher_handle: Option>>, mut worker_handles: Vec>>>, ) { if let Some(handle) = tcp_ingestor_handle { @@ -386,10 +368,6 @@ impl Server { handle.await.ok(); } self.worker_pool.shutdown(&mut worker_handles).await; - if let Some(handle) = nym_dispatcher_handle { - self.status.nym_dispatcher_status.store(4); - handle.await.ok(); - } } /// Returns the servers current status usize. diff --git a/zingo-rpc/src/server/dispatcher.rs b/zingo-rpc/src/server/dispatcher.rs deleted file mode 100644 index 54f1caa..0000000 --- a/zingo-rpc/src/server/dispatcher.rs +++ /dev/null @@ -1,140 +0,0 @@ -//! Holds the server dispatcher (replyer) implementations. - -use nym_sdk::mixnet::MixnetMessageSender; -use nym_sphinx_anonymous_replies::requests::AnonymousSenderTag; -use std::sync::{ - atomic::{AtomicBool, Ordering}, - Arc, -}; - -use crate::{ - nym::{client::NymClient, error::NymError}, - server::{ - error::{DispatcherError, QueueError}, - queue::{QueueReceiver, QueueSender}, - AtomicStatus, StatusType, - }, -}; - -/// Sends gRPC responses over Nym Mixnet. -pub struct NymDispatcher { - /// Nym Client - dispatcher: NymClient, - /// Used to send requests to the queue. - response_queue: QueueReceiver<(Vec, AnonymousSenderTag)>, - /// Used to send requests to the queue. - response_requeue: QueueSender<(Vec, AnonymousSenderTag)>, - /// Current status of the ingestor. - status: AtomicStatus, - /// Represents the Online status of the gRPC server. - online: Arc, -} - -impl NymDispatcher { - /// Creates a Nym Ingestor - pub async fn spawn( - nym_conf_path: &str, - response_queue: QueueReceiver<(Vec, AnonymousSenderTag)>, - response_requeue: QueueSender<(Vec, AnonymousSenderTag)>, - status: AtomicStatus, - online: Arc, - ) -> Result { - status.store(0); - let client = NymClient::spawn(&format!("{}/dispatcher", nym_conf_path)).await?; - Ok(NymDispatcher { - dispatcher: client, - response_queue, - response_requeue, - online, - status, - }) - } - - /// Starts Nym service. - pub async fn serve(self) -> tokio::task::JoinHandle> { - tokio::task::spawn(async move { - // NOTE: This interval may need to be changed or removed / moved once scale testing begins. - let mut interval = tokio::time::interval(tokio::time::Duration::from_millis(50)); - // TODO Check blockcache sync status and wait on server / node if on hold. - self.status.store(1); - loop { - tokio::select! { - _ = interval.tick() => { - if self.check_for_shutdown().await { - self.status.store(5); - return Ok(()); - } - } - incoming = self.response_queue.listen() => { - match incoming { - Ok(response) => { - // NOTE: This may need to be removed / moved for scale use. - if self.check_for_shutdown().await { - self.status.store(5); - return Ok(()); - } - if let Err(nym_e) = self.dispatcher - .client - .send_reply(response.1, response.0.clone()) - .await.map_err(NymError::from) { - match self.response_requeue.try_send(response) { - Ok(_) => { - eprintln!("Failed to send response over nym: {}\nResponse requeued, restarting nym dispatcher.", nym_e); - // TODO: Handle error. Restart nym dispatcher. - } - Err(QueueError::QueueFull(_request)) => { - eprintln!("Failed to send response over nym: {}\nAnd failed to requeue response due to full response queue.\nFatal error! Restarting nym dispatcher.", nym_e); - // TODO: Handle queue full error here (start up second dispatcher?). Restart nym dispatcher - } - Err(_e) => { - eprintln!("Failed to send response over nym: {}\nAnd failed to requeue response due to the queue being closed.\nFatal error! Nym dispatcher shutting down..", nym_e); - // TODO: Handle queue closed error here. (return correct error type?) - self.status.store(6); - return Ok(()); //Return Err! - } - } - } - } - Err(_e) => { - eprintln!("Response queue closed, nym dispatcher shutting down."); - //TODO: Handle this error here (return correct error type?) - self.status.store(6); - return Ok(()); // Return Err! - } - } - } - } - } - }) - } - - /// Checks indexers online status and ingestors internal status for closure signal. - pub async fn check_for_shutdown(&self) -> bool { - if self.status() >= 4 { - return true; - } - if !self.check_online() { - return true; - } - false - } - - /// Sets the dispatcher to close gracefully. - pub async fn shutdown(&mut self) { - self.status.store(4) - } - - /// Returns the dispatchers current status usize. - pub fn status(&self) -> usize { - self.status.load() - } - - /// Returns the dispatchers current statustype. - pub fn statustype(&self) -> StatusType { - StatusType::from(self.status()) - } - - fn check_online(&self) -> bool { - self.online.load(Ordering::SeqCst) - } -} diff --git a/zingo-rpc/src/server/error.rs b/zingo-rpc/src/server/error.rs index 87cb096..851660d 100644 --- a/zingo-rpc/src/server/error.rs +++ b/zingo-rpc/src/server/error.rs @@ -50,14 +50,6 @@ pub enum IngestorError { QueuePushError(#[from] TrySendError), } -/// Zingo-Proxy distpater errors. -#[derive(Debug, thiserror::Error)] -pub enum DispatcherError { - /// Nym based errors. - #[error("Nym error: {0}")] - NymError(#[from] NymError), -} - /// Zingo-Proxy worker errors. #[derive(Debug, thiserror::Error)] pub enum WorkerError { @@ -87,9 +79,6 @@ pub enum ServerError { /// Ingestor based errors. #[error("Ingestor error: {0}")] IngestorError(#[from] IngestorError), - /// Dispatcher based errors. - #[error("Dispatcher error: {0}")] - DispatcherError(#[from] DispatcherError), /// Worker based errors. #[error("Worker error: {0}")] WorkerError(#[from] WorkerError), diff --git a/zingo-rpc/src/server/ingestor.rs b/zingo-rpc/src/server/ingestor.rs index 83f7874..78d8773 100644 --- a/zingo-rpc/src/server/ingestor.rs +++ b/zingo-rpc/src/server/ingestor.rs @@ -1,5 +1,7 @@ //! Holds the server ingestor (listener) implementations. +use nym_sdk::mixnet::MixnetMessageSender; +use nym_sphinx_anonymous_replies::requests::AnonymousSenderTag; use std::{ net::SocketAddr, sync::{ @@ -13,7 +15,7 @@ use crate::{ nym::{client::NymClient, error::NymError}, server::{ error::{IngestorError, QueueError}, - queue::QueueSender, + queue::{QueueReceiver, QueueSender}, request::ZingoProxyRequest, AtomicStatus, StatusType, }, @@ -133,6 +135,10 @@ pub struct NymIngestor { ingestor: NymClient, /// Used to send requests to the queue. queue: QueueSender, + /// Used to send requests to the queue. + response_queue: QueueReceiver<(Vec, AnonymousSenderTag)>, + /// Used to send requests to the queue. + response_requeue: QueueSender<(Vec, AnonymousSenderTag)>, /// Current status of the ingestor. status: AtomicStatus, /// Represents the Online status of the gRPC server. @@ -144,6 +150,8 @@ impl NymIngestor { pub async fn spawn( nym_conf_path: &str, queue: QueueSender, + response_queue: QueueReceiver<(Vec, AnonymousSenderTag)>, + response_requeue: QueueSender<(Vec, AnonymousSenderTag)>, status: AtomicStatus, online: Arc, ) -> Result { @@ -154,6 +162,8 @@ impl NymIngestor { Ok(NymIngestor { ingestor: listener, queue, + response_queue, + response_requeue, online, status, }) @@ -213,6 +223,45 @@ impl NymIngestor { } } } + outgoing = self.response_queue.listen() => { + match outgoing { + Ok(response) => { + println!("[TEST] Dispatcher received response: {:?}", response); + // NOTE: This may need to be removed / moved for scale use. + if self.check_for_shutdown().await { + self.status.store(5); + return Ok(()); + } + if let Err(nym_e) = self.ingestor + .client + .send_reply(response.1, response.0.clone()) + .await.map_err(NymError::from) { + match self.response_requeue.try_send(response) { + Ok(_) => { + eprintln!("Failed to send response over nym: {}\nResponse requeued, restarting nym dispatcher.", nym_e); + // TODO: Handle error. Restart nym dispatcher. + } + Err(QueueError::QueueFull(_request)) => { + eprintln!("Failed to send response over nym: {}\nAnd failed to requeue response due to full response queue.\nFatal error! Restarting nym dispatcher.", nym_e); + // TODO: Handle queue full error here (start up second dispatcher?). Restart nym dispatcher + } + Err(_e) => { + eprintln!("Failed to send response over nym: {}\nAnd failed to requeue response due to the queue being closed.\nFatal error! Nym dispatcher shutting down..", nym_e); + // TODO: Handle queue closed error here. (return correct error type?) + self.status.store(6); + return Ok(()); //Return Err! + } + } + } + } + Err(_e) => { + eprintln!("Response queue closed, nym dispatcher shutting down."); + //TODO: Handle this error here (return correct error type?) + self.status.store(6); + return Ok(()); // Return Err! + } + } + } } } }) From 9640b576ba3e681034250a76fb8122e4e9ed4556 Mon Sep 17 00:00:00 2001 From: idky137 Date: Fri, 9 Aug 2024 18:51:13 +0100 Subject: [PATCH 15/18] removed commented code --- zingo-proxyd/src/nym_server.rs | 80 -------------------------------- zingo-proxyd/src/proxy.rs | 26 ----------- zingo-proxyd/src/server.rs | 12 ----- zingo-rpc/src/server.rs | 1 - zingo-rpc/src/server/ingestor.rs | 1 + 5 files changed, 1 insertion(+), 119 deletions(-) diff --git a/zingo-proxyd/src/nym_server.rs b/zingo-proxyd/src/nym_server.rs index 5ae7715..9f6f545 100644 --- a/zingo-proxyd/src/nym_server.rs +++ b/zingo-proxyd/src/nym_server.rs @@ -1,8 +1,4 @@ //! Nym-gRPC server implementation. -//! -//! TODO: - Add NymServerError error type and rewrite functions to return >, propagating internal errors. Include NymClientError from zingo-rpc::nym::utils. -//! - Update NymServer to handle all service RPCs (currently only accepts send_command). [Return "Not Yet Implemented" for unimplemented RPC's?] -//! - Update NymServer to handle multiple requests, from multiple clients, simultaniously. [Combine with zingoproxyd "queue" logic when implemented?] use std::sync::{ atomic::{AtomicBool, Ordering}, @@ -129,79 +125,3 @@ impl NymServer { } } } - -// impl NymServer { -// /// Receives and decodes encoded gRPC messages sent over the nym mixnet, processes them, encodes the response. -// /// The encoded response is sent back to the sender using a surb (single use reply block). -// pub async fn serve( -// mut self, -// online: Arc, -// ) -> tokio::task::JoinHandle> { -// let mut request_in: Vec = Vec::new(); -// tokio::task::spawn(async move { -// // NOTE: This interval may need to be reduced or removed / moved once scale testing begins. -// let mut interval = tokio::time::interval(tokio::time::Duration::from_millis(50)); -// while online.load(Ordering::SeqCst) { -// // --- wait for request. -// while let Some(request_nym) = self.0 .0.wait_for_messages().await { -// if request_nym.is_empty() { -// interval.tick().await; -// if !online.load(Ordering::SeqCst) { -// println!("Nym server shutting down."); -// return Ok(()); -// } -// continue; -// } -// request_in = request_nym; -// break; -// } - -// // --- decode request -// let request_vu8 = request_in -// .first() -// .map(|r| r.message.clone()) -// .ok_or_else(|| "No response received from the nym network".to_string()) -// .unwrap(); - -// // --- print request for testing -// println!( -// "@zingoproxyd[nym]: request received: {:?} - request length: {}", -// &request_vu8[..], -// &request_vu8[..].len() -// ); - -// // --- deserialize request -// let request = RawTransaction::decode(&request_vu8[..]).unwrap(); - -// // --- process request -// let response = NymClient::nym_send_transaction(&request).await.unwrap(); - -// // --- decode response -// let mut response_vu8 = Vec::new(); -// response.encode(&mut response_vu8).unwrap(); - -// //print response for testing -// println!( -// "@zingoproxyd[nym]: response sent: {:?} - response length: {}", -// &response_vu8[..], -// &response_vu8[..].len() -// ); - -// // --- fetch recipient address -// let return_recipient = AnonymousSenderTag::try_from_base58_string( -// request_in[0].sender_tag.unwrap().to_base58_string(), -// ) -// .unwrap(); - -// // --- send response -// self.0 -// .0 -// .send_reply(return_recipient, response_vu8) -// .await -// .unwrap(); -// } -// println!("Nym server shutting down."); -// Ok(()) -// }) -// } -// } diff --git a/zingo-proxyd/src/proxy.rs b/zingo-proxyd/src/proxy.rs index 43c1eca..6f47e38 100644 --- a/zingo-proxyd/src/proxy.rs +++ b/zingo-proxyd/src/proxy.rs @@ -1,7 +1,4 @@ //! Zingo-Proxy server implementation. -//! -//! TODO: - Add ProxyServerError error type and rewrite functions to return >, propagating internal errors. -//! - Update spawn_server and nym_spawn to return > and > and use here. use crate::{nym_server::NymServer, server::spawn_grpc_server}; use zingo_rpc::{ @@ -13,24 +10,6 @@ use std::sync::atomic::{AtomicBool, Ordering}; use std::sync::Arc; use tokio::task::JoinHandle; -// /// Holds configuration data for ZingoProxyD. -// pub struct ProxyConfig { -// proxy_port: u16, -// zebrad_port: u16, -// nym_conf_path: String, -// max_queue_size: usize, -// max_workers: usize, -// max_cache_mem: u16, -// } - -// pub struct Proxy { -// grpc_server: GrpcServer,* -// nym_server: NymServer,* -// state_engine,* -// queue_manager,* -// config: ProxyConfig, -// } - /// Launches test Zingo_Proxy server. pub async fn spawn_proxy( proxy_port: &u16, @@ -47,7 +26,6 @@ pub async fn spawn_proxy( startup_message(); println!("@zingoproxyd: Launching Zingo-Proxy!\n@zingoproxyd: Checking connection with node.."); - // TODO Add user and password fields. let _zebrad_uri = test_node_and_return_uri( zebrad_port, Some("xxxxxx".to_string()), @@ -73,15 +51,11 @@ pub async fn spawn_proxy( { println!("@zingoproxyd[nym]: Launching Nym Server.."); - // let nym_server: NymServer = NymServer(NymClient::nym_spawn(nym_conf_path).await); - // nym_addr_out = Some(nym_server.0 .0.nym_address().to_string()); - // let nym_proxy_handle = nym_server.serve(online).await; let nym_server = NymServer::spawn(nym_conf_path, online).await; nym_addr_out = Some(nym_server.nym_addr.clone()); let nym_proxy_handle = nym_server.serve().await; handles.push(nym_proxy_handle); - // TODO: Add wait_on_nym_startup(nym_addr_out, online.clone()) function to test nym server. tokio::time::sleep(tokio::time::Duration::from_secs(2)).await; } diff --git a/zingo-proxyd/src/server.rs b/zingo-proxyd/src/server.rs index 46b34f7..f12ebde 100644 --- a/zingo-proxyd/src/server.rs +++ b/zingo-proxyd/src/server.rs @@ -1,16 +1,4 @@ //! gRPC server implementation. -//! -//! TODO: - Add GrpcServerError error type and rewrite functions to return >, propagating internal errors. -//! - Add user and password as fields of ProxyClient and use here. - -// use http::Uri; -// use std::{ -// net::{Ipv4Addr, SocketAddr}, -// sync::{ -// atomic::{AtomicBool, Ordering}, -// Arc, -// }, -// }; use http::Uri; use std::net::{Ipv4Addr, SocketAddr}; diff --git a/zingo-rpc/src/server.rs b/zingo-rpc/src/server.rs index 4d5632f..1521260 100644 --- a/zingo-rpc/src/server.rs +++ b/zingo-rpc/src/server.rs @@ -17,7 +17,6 @@ pub mod request; pub mod worker; use self::{ - // dispatcher::NymDispatcher, error::{IngestorError, ServerError, WorkerError}, ingestor::{NymIngestor, TcpIngestor}, queue::Queue, diff --git a/zingo-rpc/src/server/ingestor.rs b/zingo-rpc/src/server/ingestor.rs index 78d8773..9022439 100644 --- a/zingo-rpc/src/server/ingestor.rs +++ b/zingo-rpc/src/server/ingestor.rs @@ -236,6 +236,7 @@ impl NymIngestor { .client .send_reply(response.1, response.0.clone()) .await.map_err(NymError::from) { + eprintln!("Failed to send response over Nym Mixnet: {}", nym_e); match self.response_requeue.try_send(response) { Ok(_) => { eprintln!("Failed to send response over nym: {}\nResponse requeued, restarting nym dispatcher.", nym_e); From 04ab8896eff65aa3f8f3f2a309dd0047f6740b49 Mon Sep 17 00:00:00 2001 From: idky137 Date: Tue, 13 Aug 2024 20:57:06 +0100 Subject: [PATCH 16/18] integration tests running on new server --- zingo-proxyd/src/config.rs | 4 +- zingo-proxyd/src/error.rs | 3 + zingo-proxyd/src/indexer.rs | 106 ++++++++++++++++++++++--------- zingo-rpc/src/server.rs | 3 +- zingo-rpc/src/server/ingestor.rs | 4 +- zingo-rpc/src/server/worker.rs | 36 ++++------- zingoproxy-testutils/src/lib.rs | 49 ++++++++------ 7 files changed, 127 insertions(+), 78 deletions(-) diff --git a/zingo-proxyd/src/config.rs b/zingo-proxyd/src/config.rs index 99c9931..06ac52e 100644 --- a/zingo-proxyd/src/config.rs +++ b/zingo-proxyd/src/config.rs @@ -78,7 +78,7 @@ impl Default for IndexerConfig { zebrad_port: 18232, node_user: Some("xxxxxx".to_string()), node_password: Some("xxxxxx".to_string()), - max_queue_size: 256, + max_queue_size: 1024, max_worker_pool_size: 32, idle_worker_pool_size: 4, } @@ -97,7 +97,7 @@ impl Default for IndexerConfig { zebrad_port: 18232, node_user: Some("xxxxxx".to_string()), node_password: Some("xxxxxx".to_string()), - max_queue_size: 256, + max_queue_size: 1024, max_worker_pool_size: 32, idle_worker_pool_size: 4, } diff --git a/zingo-proxyd/src/error.rs b/zingo-proxyd/src/error.rs index c8fcbac..0d5af52 100644 --- a/zingo-proxyd/src/error.rs +++ b/zingo-proxyd/src/error.rs @@ -17,6 +17,9 @@ pub enum IndexerError { /// HTTP related errors due to invalid URI. #[error("HTTP error: Invalid URI {0}")] HttpError(#[from] http::Error), + /// Returned from tokio joinhandles.. + #[error("Join handle error: Invalid URI {0}")] + TokioJoinError(#[from] tokio::task::JoinError), /// Custom indexor errors. #[error("Misc indexer error: {0}")] MiscIndexerError(String), diff --git a/zingo-proxyd/src/indexer.rs b/zingo-proxyd/src/indexer.rs index b7785d2..2f0955e 100644 --- a/zingo-proxyd/src/indexer.rs +++ b/zingo-proxyd/src/indexer.rs @@ -45,7 +45,7 @@ impl IndexerStatus { /// Zingo-Indexer. pub struct Indexer { /// Indexer configuration data. - config: IndexerConfig, + _config: IndexerConfig, /// GRPC server. server: Option, // /// Internal block cache. @@ -57,43 +57,89 @@ pub struct Indexer { } impl Indexer { - /// Start an Indexer service. + // /// Launches an Indexer service. + // /// + // /// Currently only takes an IndexerConfig. + // pub async fn start(config: IndexerConfig) -> Result<(), IndexerError> { + // // NOTE: This interval may need to be reduced or removed / moved once scale testing begins. + // let mut interval = tokio::time::interval(tokio::time::Duration::from_millis(50)); + // let online = Arc::new(AtomicBool::new(true)); + // set_ctrlc(online.clone()); + // if config.nym_active { + // nym_bin_common::logging::setup_logging(); + // } + // startup_message(); + // println!("Launching Zingdexer!"); + // let mut indexer: Indexer = Indexer::new(config, online.clone()).await?; + // let server_handle = if let Some(server) = indexer.server.take() { + // Some(server.serve().await) + // } else { + // return Err(IndexerError::MiscIndexerError( + // "Server Missing! Fatal Error!.".to_string(), + // )); + // }; + // indexer.status.indexer_status.store(2); + // loop { + // indexer.status.load(); + // // indexer.log_status(); + // if indexer.check_for_shutdown() { + // indexer.status.indexer_status.store(4); + // indexer.shutdown_components(server_handle).await; + // indexer.status.indexer_status.store(5); + // return Ok(()); + // } + // interval.tick().await; + // } + // } + + /// Starts Indexer service. /// /// Currently only takes an IndexerConfig. pub async fn start(config: IndexerConfig) -> Result<(), IndexerError> { - // NOTE: This interval may need to be reduced or removed / moved once scale testing begins. - let mut interval = tokio::time::interval(tokio::time::Duration::from_millis(50)); - let online = Arc::new(AtomicBool::new(true)); set_ctrlc(online.clone()); - nym_bin_common::logging::setup_logging(); - startup_message(); + self::Indexer::start_indexer_service(config, online) + .await? + .await? + } + /// Launches an Indexer service. + /// + /// Spawns an indexer service in a new task. + pub async fn start_indexer_service( + config: IndexerConfig, + online: Arc, + ) -> Result>, IndexerError> { + // NOTE: This interval may need to be reduced or removed / moved once scale testing begins. + let mut interval = tokio::time::interval(tokio::time::Duration::from_millis(50)); + if config.nym_active { + nym_bin_common::logging::setup_logging(); + } println!("Launching Zingdexer!"); let mut indexer: Indexer = Indexer::new(config, online.clone()).await?; - let status = indexer.status.clone(); - - let server_handle = if let Some(server) = indexer.server.take() { - Some(server.serve().await) - } else { - return Err(IndexerError::MiscIndexerError( - "Server Missing! Fatal Error!.".to_string(), - )); - }; - - indexer.status.indexer_status.store(2); - loop { - indexer.status.load(); - // indexer.log_status(); - if indexer.check_for_shutdown() { - status.indexer_status.store(4); - indexer.shutdown_components(server_handle).await; - status.indexer_status.store(5); - return Ok(()); + Ok(tokio::task::spawn(async move { + let server_handle = if let Some(server) = indexer.server.take() { + Some(server.serve().await) + } else { + return Err(IndexerError::MiscIndexerError( + "Server Missing! Fatal Error!.".to_string(), + )); + }; + + indexer.status.indexer_status.store(2); + loop { + indexer.status.load(); + // indexer.log_status(); + if indexer.check_for_shutdown() { + indexer.status.indexer_status.store(4); + indexer.shutdown_components(server_handle).await; + indexer.status.indexer_status.store(5); + return Ok(()); + } + interval.tick().await; } - interval.tick().await; - } + })) } /// Creates a new Indexer. @@ -136,7 +182,7 @@ impl Indexer { ); println!("Server Ready."); Ok(Indexer { - config, + _config: config, server, status, online, @@ -144,7 +190,7 @@ impl Indexer { } /// Checks indexers online status and servers internal status for closure signal. - pub fn check_for_shutdown(&self) -> bool { + fn check_for_shutdown(&self) -> bool { if self.status() >= 4 { return true; } diff --git a/zingo-rpc/src/server.rs b/zingo-rpc/src/server.rs index 1521260..a03dc0c 100644 --- a/zingo-rpc/src/server.rs +++ b/zingo-rpc/src/server.rs @@ -289,7 +289,7 @@ impl Server { worker_handles = self.worker_pool.clone().serve().await; self.status.server_status.store(1); loop { - if self.request_queue.queue_length() >= (self.request_queue.max_length() / 2) + if self.request_queue.queue_length() >= (self.request_queue.max_length() / 4) && (self.worker_pool.workers() < self.worker_pool.max_size() as usize) { match self.worker_pool.push_worker().await { @@ -355,7 +355,6 @@ impl Server { &mut self, tcp_ingestor_handle: Option>>, nym_ingestor_handle: Option>>, - // nym_dispatcher_handle: Option>>, mut worker_handles: Vec>>>, ) { if let Some(handle) = tcp_ingestor_handle { diff --git a/zingo-rpc/src/server/ingestor.rs b/zingo-rpc/src/server/ingestor.rs index 9022439..ac3e484 100644 --- a/zingo-rpc/src/server/ingestor.rs +++ b/zingo-rpc/src/server/ingestor.rs @@ -76,7 +76,9 @@ impl TcpIngestor { match incoming { Ok((stream, _)) => { match self.queue.try_send(ZingoProxyRequest::new_from_grpc(stream)) { - Ok(_) => {} + Ok(_) => { + println!("[TEST] Requests in Queue: {}", self.queue.queue_length()); + } Err(QueueError::QueueFull(_request)) => { eprintln!("Queue Full."); // TODO: Return queue full tonic status over tcpstream and close (that TcpStream..). diff --git a/zingo-rpc/src/server/worker.rs b/zingo-rpc/src/server/worker.rs index b71312b..bc919e3 100644 --- a/zingo-rpc/src/server/worker.rs +++ b/zingo-rpc/src/server/worker.rs @@ -97,27 +97,7 @@ impl Worker { incoming = self.queue.listen() => { match incoming { Ok(request) => { - // NOTE: This may need to be removed / moved for scale use (possible it should be moved to after the request is serviced?). - if self.check_for_shutdown().await { - match self.requeue.try_send(request) { - Ok(_) => { - return Ok(()); - } - Err(QueueError::QueueFull(_request)) => { - self.atomic_status.store(5); - eprintln!("Request Queue Full. Failed to send response to queue.\nWorker shutting down."); - // TODO: Handle this error! (cancel shutdown?). - return Ok(()); - } - Err(e) => { - self.atomic_status.store(5); - eprintln!("Request Queue Closed. Failed to send response to queue: {}\nWorker shutting down.", e); - // TODO: Handle queue closed error here. (return correct error?) - return Ok(()); - } - } - } else { - self.atomic_status.store(2); + self.atomic_status.store(2); match request { ZingoProxyRequest::TcpServerRequest(request) => { Server::builder().add_service(svc.clone()) @@ -157,13 +137,18 @@ impl Worker { } } } + // NOTE: This may need to be removed for scale use. + if self.check_for_shutdown().await { + self.atomic_status.store(5); + return Ok(()); + } else { self.atomic_status.store(1); } } Err(_e) => { self.atomic_status.store(5); eprintln!("Queue closed, worker shutting down."); - // TODO: Handle queue closed error here. (return correct error?) + // TODO: Handle queue closed error here. (return correct error / undate status to correct err code.) return Ok(()); } } @@ -304,21 +289,22 @@ impl WorkerPool { if self.workers.len() >= self.max_size as usize { Err(WorkerError::WorkerPoolFull) } else { + let worker_index = self.workers(); self.workers.push( Worker::spawn( - self.workers.len(), + worker_index, self.workers[0].queue.clone(), self.workers[0].requeue.clone(), self.workers[0].nym_response_queue.clone(), self.workers[0].grpc_client.lightwalletd_uri.clone(), self.workers[0].grpc_client.zebrad_uri.clone(), - self.status.statuses[self.workers.len()].clone(), + self.status.statuses[worker_index].clone(), self.online.clone(), ) .await, ); self.status.workers.fetch_add(1, Ordering::SeqCst); - Ok(self.workers[self.workers.len()].clone().serve().await) + Ok(self.workers[worker_index].clone().serve().await) } } diff --git a/zingoproxy-testutils/src/lib.rs b/zingoproxy-testutils/src/lib.rs index 0190a14..7db31c4 100644 --- a/zingoproxy-testutils/src/lib.rs +++ b/zingoproxy-testutils/src/lib.rs @@ -16,8 +16,8 @@ pub struct TestManager { pub regtest_manager: zingo_testutils::regtest::RegtestManager, /// Zingolib regtest network. pub regtest_network: zingoconfig::RegtestNetwork, - /// Zingo-Proxy gRPC listen port. - pub proxy_port: u16, + /// Zingo-Indexer gRPC listen port. + pub indexer_port: u16, /// Zingo-Proxy Nym listen address. pub nym_addr: Option, /// Zebrad/Zcashd JsonRpc listen port. @@ -27,21 +27,21 @@ pub struct TestManager { } impl TestManager { - /// Launches a zingo regtest manager and zingo-proxy, created TempDir for configuration and log files. + /// Launches a zingo regtest manager and zingo-indexer, created TempDir for configuration and log files. pub async fn launch( online: std::sync::Arc, ) -> ( Self, zingo_testutils::regtest::ChildProcessHandler, - Vec>>, + tokio::task::JoinHandle>, ) { let lwd_port = portpicker::pick_unused_port().expect("No ports free"); let zebrad_port = portpicker::pick_unused_port().expect("No ports free"); - let proxy_port = portpicker::pick_unused_port().expect("No ports free"); + let indexer_port = portpicker::pick_unused_port().expect("No ports free"); let temp_conf_dir = create_temp_conf_files(lwd_port, zebrad_port).unwrap(); let temp_conf_path = temp_conf_dir.path().to_path_buf(); - let nym_conf_path = temp_conf_path.join("nym"); + let _nym_conf_path = temp_conf_path.join("nym"); set_custom_drops(online.clone(), Some(temp_conf_path.clone())); @@ -52,27 +52,40 @@ impl TestManager { .launch(true) .expect("Failed to start regtest services"); - let (proxy_handler, nym_addr) = zingoproxylib::proxy::spawn_proxy( - &proxy_port, - &lwd_port, - &zebrad_port, - nym_conf_path.to_str().unwrap(), - online.clone(), - ) - .await; + // TODO: This turns nym functionality off. for nym tests we will need to add option to include nym in test manager. + // - queue and workerpool sizes may need to be changed here. + let indexer_config = zingoproxylib::config::IndexerConfig { + tcp_active: true, + listen_port: Some(indexer_port), + nym_active: false, + nym_conf_path: None, + lightwalletd_port: lwd_port, + zebrad_port, + node_user: Some("xxxxxx".to_string()), + node_password: Some("xxxxxx".to_string()), + max_queue_size: 512, + max_worker_pool_size: 96, + idle_worker_pool_size: 48, + }; + let indexer_handler = + zingoproxylib::indexer::Indexer::start_indexer_service(indexer_config, online.clone()) + .await + .unwrap(); + // NOTE: This is required to give the server time to launch, this is not used in production code but could be rewritten to improve testing efficiency. + tokio::time::sleep(tokio::time::Duration::from_secs(3)).await; ( TestManager { temp_conf_dir, regtest_manager, regtest_network, - proxy_port, - nym_addr, + indexer_port, + nym_addr: None, zebrad_port, online, }, regtest_handler, - proxy_handler, + indexer_handler, ) } @@ -80,7 +93,7 @@ impl TestManager { pub fn get_proxy_uri(&self) -> http::Uri { http::Uri::builder() .scheme("http") - .authority(format!("127.0.0.1:{0}", self.proxy_port)) + .authority(format!("127.0.0.1:{0}", self.indexer_port)) .path_and_query("") .build() .unwrap() From bbc292a294c1d9b263f0a514691e73fdadef5760 Mon Sep 17 00:00:00 2001 From: idky137 Date: Tue, 13 Aug 2024 21:10:34 +0100 Subject: [PATCH 17/18] removed old server --- zingo-proxyd/src/indexer.rs | 35 -------- zingo-proxyd/src/lib.rs | 3 - zingo-proxyd/src/nym_server.rs | 127 -------------------------- zingo-proxyd/src/proxy.rs | 146 ------------------------------ zingo-proxyd/src/server.rs | 149 ------------------------------- zingo-rpc/src/server.rs | 12 --- zingo-rpc/src/server/ingestor.rs | 3 +- zingo-rpc/src/server/worker.rs | 4 +- 8 files changed, 2 insertions(+), 477 deletions(-) delete mode 100644 zingo-proxyd/src/nym_server.rs delete mode 100644 zingo-proxyd/src/proxy.rs delete mode 100644 zingo-proxyd/src/server.rs diff --git a/zingo-proxyd/src/indexer.rs b/zingo-proxyd/src/indexer.rs index 2f0955e..183b601 100644 --- a/zingo-proxyd/src/indexer.rs +++ b/zingo-proxyd/src/indexer.rs @@ -57,41 +57,6 @@ pub struct Indexer { } impl Indexer { - // /// Launches an Indexer service. - // /// - // /// Currently only takes an IndexerConfig. - // pub async fn start(config: IndexerConfig) -> Result<(), IndexerError> { - // // NOTE: This interval may need to be reduced or removed / moved once scale testing begins. - // let mut interval = tokio::time::interval(tokio::time::Duration::from_millis(50)); - // let online = Arc::new(AtomicBool::new(true)); - // set_ctrlc(online.clone()); - // if config.nym_active { - // nym_bin_common::logging::setup_logging(); - // } - // startup_message(); - // println!("Launching Zingdexer!"); - // let mut indexer: Indexer = Indexer::new(config, online.clone()).await?; - // let server_handle = if let Some(server) = indexer.server.take() { - // Some(server.serve().await) - // } else { - // return Err(IndexerError::MiscIndexerError( - // "Server Missing! Fatal Error!.".to_string(), - // )); - // }; - // indexer.status.indexer_status.store(2); - // loop { - // indexer.status.load(); - // // indexer.log_status(); - // if indexer.check_for_shutdown() { - // indexer.status.indexer_status.store(4); - // indexer.shutdown_components(server_handle).await; - // indexer.status.indexer_status.store(5); - // return Ok(()); - // } - // interval.tick().await; - // } - // } - /// Starts Indexer service. /// /// Currently only takes an IndexerConfig. diff --git a/zingo-proxyd/src/lib.rs b/zingo-proxyd/src/lib.rs index 2b6ef41..d259702 100644 --- a/zingo-proxyd/src/lib.rs +++ b/zingo-proxyd/src/lib.rs @@ -7,6 +7,3 @@ pub mod config; pub mod error; pub mod indexer; -pub mod nym_server; -pub mod proxy; -pub mod server; diff --git a/zingo-proxyd/src/nym_server.rs b/zingo-proxyd/src/nym_server.rs deleted file mode 100644 index 9f6f545..0000000 --- a/zingo-proxyd/src/nym_server.rs +++ /dev/null @@ -1,127 +0,0 @@ -//! Nym-gRPC server implementation. - -use std::sync::{ - atomic::{AtomicBool, Ordering}, - Arc, -}; - -use nym_sdk::mixnet::{MixnetMessageSender, ReconstructedMessage}; -use nym_sphinx_anonymous_replies::requests::AnonymousSenderTag; - -use zingo_rpc::{nym::client::NymClient, rpc::GrpcClient, server::request::ZingoProxyRequest}; - -/// Wrapper struct for a Nym client. -pub struct NymServer { - /// NymClient data - pub nym_client: NymClient, - /// Nym Address - pub nym_addr: String, - /// Represents the Online status of the gRPC server. - pub online: Arc, -} - -impl NymServer { - /// Receives and decodes encoded gRPC messages sent over the nym mixnet, processes them, encodes the response. - /// The encoded response is sent back to the sender using a surb (single use reply block). - pub async fn serve(mut self) -> tokio::task::JoinHandle> { - let mut request_in: Vec = Vec::new(); - tokio::task::spawn(async move { - // NOTE: This interval may need to be reduced or removed / moved once scale testing begins. - let mut interval = tokio::time::interval(tokio::time::Duration::from_millis(50)); - // NOTE: the following should be removed with the addition of the queue and worker pool. - let lwd_port = 8080; - let zebrad_port = 18232; - let proxy_client = GrpcClient { - lightwalletd_uri: http::Uri::builder() - .scheme("http") - .authority(format!("localhost:{lwd_port}")) - .path_and_query("/") - .build() - .unwrap(), - zebrad_uri: http::Uri::builder() - .scheme("http") - .authority(format!("localhost:{zebrad_port}")) - .path_and_query("/") - .build() - .unwrap(), - online: self.online.clone(), - }; - while self.online.load(Ordering::SeqCst) { - // --- wait for request. - while let Some(request_nym) = self.nym_client.client.wait_for_messages().await { - if request_nym.is_empty() { - interval.tick().await; - if !self.online.load(Ordering::SeqCst) { - println!("Nym server shutting down."); - return Ok(()); - } - continue; - } - request_in = request_nym; - break; - } - - // --- decode request - let request_vu8 = request_in - .first() - .map(|r| r.message.clone()) - .ok_or_else(|| "No response received from the nym network".to_string()) - .unwrap(); - // --- fetch recipient address - let return_recipient = AnonymousSenderTag::try_from_base58_string( - request_in[0].sender_tag.unwrap().to_base58_string(), - ) - .unwrap(); - // --- build ZingoProxyRequest - let zingo_proxy_request = - ZingoProxyRequest::new_from_nym(return_recipient, request_vu8.as_ref()) - .unwrap(); - - // print request for testing - // println!( - // "@zingoproxyd[nym][TEST]: ZingoProxyRequest recieved: {:?}.", - // zingo_proxy_request - // ); - - // --- process request - // NOTE: when the queue is added requests will not be processed here but by the queue! - let response: Vec; - match zingo_proxy_request { - ZingoProxyRequest::NymServerRequest(request) => { - response = proxy_client.process_nym_request(&request).await.unwrap(); - } - _ => { - todo!() - } - } - - // print response for testing - // println!( - // "@zingoproxyd[nym][TEST]: Response sent: {:?}.", - // &response[..], - // ); - - // --- send response - self.nym_client - .client - .send_reply(return_recipient, response) - .await - .unwrap(); - } - // Why print this? - println!("Nym server shutting down."); - Ok(()) - }) - } - - /// Returns a new NymServer Inatanse - pub async fn spawn(nym_conf_path: &str, online: Arc) -> Self { - let nym_client = NymClient::spawn(nym_conf_path).await.unwrap(); - let nym_addr = nym_client.client.nym_address().to_string(); - NymServer { - nym_client, - nym_addr, - online, - } - } -} diff --git a/zingo-proxyd/src/proxy.rs b/zingo-proxyd/src/proxy.rs deleted file mode 100644 index 6f47e38..0000000 --- a/zingo-proxyd/src/proxy.rs +++ /dev/null @@ -1,146 +0,0 @@ -//! Zingo-Proxy server implementation. - -use crate::{nym_server::NymServer, server::spawn_grpc_server}; -use zingo_rpc::{ - jsonrpc::connector::test_node_and_return_uri, - proto::service::{compact_tx_streamer_client::CompactTxStreamerClient, Empty}, -}; - -use std::sync::atomic::{AtomicBool, Ordering}; -use std::sync::Arc; -use tokio::task::JoinHandle; - -/// Launches test Zingo_Proxy server. -pub async fn spawn_proxy( - proxy_port: &u16, - lwd_port: &u16, - zebrad_port: &u16, - nym_conf_path: &str, - online: Arc, -) -> ( - Vec>>, - Option, -) { - let mut handles = vec![]; - let nym_addr_out: Option; - - startup_message(); - println!("@zingoproxyd: Launching Zingo-Proxy!\n@zingoproxyd: Checking connection with node.."); - let _zebrad_uri = test_node_and_return_uri( - zebrad_port, - Some("xxxxxx".to_string()), - Some("xxxxxx".to_string()), - ) - .await - .unwrap(); - - println!("@zingoproxyd: Launching gRPC Server.."); - let proxy_handle = spawn_grpc_server(proxy_port, lwd_port, zebrad_port, online.clone()).await; - handles.push(proxy_handle); - - #[cfg(not(feature = "nym_poc"))] - { - wait_on_grpc_startup(proxy_port, online.clone()).await; - } - #[cfg(feature = "nym_poc")] - { - wait_on_grpc_startup(lwd_port, online.clone()).await; - } - - #[cfg(not(feature = "nym_poc"))] - { - println!("@zingoproxyd[nym]: Launching Nym Server.."); - - let nym_server = NymServer::spawn(nym_conf_path, online).await; - nym_addr_out = Some(nym_server.nym_addr.clone()); - let nym_proxy_handle = nym_server.serve().await; - - handles.push(nym_proxy_handle); - tokio::time::sleep(tokio::time::Duration::from_secs(2)).await; - } - - #[cfg(feature = "nym_poc")] - { - nym_addr_out = None; - } - (handles, nym_addr_out) -} - -/// Closes test Zingo-Proxy servers currently active. -pub async fn close_proxy(online: Arc) { - online.store(false, Ordering::SeqCst); -} - -/// Tries to connect to the gRPC server and retruns if connection established. Shuts down with error message if connection with server cannot be established after 3 attempts. -async fn wait_on_grpc_startup(proxy_port: &u16, online: Arc) { - let proxy_uri = http::Uri::builder() - .scheme("http") - .authority(format!("localhost:{proxy_port}")) - .path_and_query("/") - .build() - .unwrap(); - let mut attempts = 0; - let mut interval = tokio::time::interval(tokio::time::Duration::from_millis(500)); - interval.tick().await; - while attempts < 3 { - match CompactTxStreamerClient::connect(proxy_uri.clone()).await { - Ok(mut client) => match client.get_lightd_info(tonic::Request::new(Empty {})).await { - Ok(_) => { - return; - } - Err(e) => { - println!( - "@zingoproxyd: GRPC server connection attempt {} failed with error: {}. Re", - attempts + 1, - e - ); - } - }, - Err(e) => { - println!( - "@zingoproxyd: GRPC server attempt {} failed to connect with error: {}", - attempts + 1, - e - ); - } - } - attempts += 1; - interval.tick().await; - } - println!("@zingoproxyd: Failed to start gRPC server, please check system config. Exiting Zingo-Proxy..."); - online.store(false, Ordering::SeqCst); - std::process::exit(1); -} - -fn startup_message() { - let welcome_message = r#" - ░░░░░░░▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒░░░▒▒░░░░░ - ░░░░▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒████▓░▒▒▒░░ - ░░▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒████▓▒▒▒▒▒▒ - ▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒░▒▒▒▒▒▒▒▒ - ▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▓▓▓▓▒▒▒▒▒▒▒▒▒▒▒▒▓▓▒▒▒▒▒▒▒▒ - ▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▒▒▒▒▒██▓▒▒▒▒▒▒▒ - ▒▒▒▒▒▒▒▒▒▒▒▒▒▒▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▒▒██▓▒▒▒▒▒▒▒ - ▒▒▒▒▒▒▒▒▒▒▒▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓███▓██▓▒▒▒▒▒▒▒ - ▒▒▒▒▒▒▒▒▒▒▓▓▓▓▒███▓░▒▓▓████████████████▓▓▒▒▒▒▒▒▒▒▒ - ▒▒▒▒▒▒▒▒▒▓▓▓▓▒▓████▓▓███████████████████▓▒▓▓▒▒▒▒▒▒ - ▒▒▒▒▒▒▒▒▓▓▓▓▓▒▒▓▓▓▓████████████████████▓▒▓▓▓▒▒▒▒▒▒ - ▒▒▒▒▒▒▒▒▓▓▓▓▓█████████████████████████▓▒▓▓▓▓▓▒▒▒▒▒ - ▒▒▒▒▒▒▒▓▓▓▒▓█████████████████████████▓▓▓▓▓▓▓▓▒▒▒▒▒ - ▒▒▒▒▒▒▒▒▓▓▓████████████████████████▓▓▓▓▓▓▓▓▓▒▒▒▒▒▒ - ▒▒▒▒▒▒▒▒▓▒███████████████████████▒▓▓▓▓▓▓▓▓▓▓▒▒▒▒▒▒ - ▒▒▒▒▒▒▒▒▒▓███████████████████▓▓▓▓▓▓▓▓▓▓▓▓▓▓▒▒▒▒▒▒▒ - ▒▒▒▒▒▒▒▒▒▓███████████████▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▒▒▒▒▒▒▒▒ - ▒▒▒▒▒▒▒▒▒▓██████████▓▓▒▒▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▒▒▒▒▒▒▒▒▒▒ - ▒▒▒▒▒▒▒███▓▒▓▓▓▓▓▒▒▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▒▒▒▒▒▒▒▒▒▒▒▒▒ - ▒▒▒▒▒▒▓████▒▒▒▒▒▒▒▒▓▓▓▓▓▓▓▓▓▓▓▓▓▓▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒ - ▒▒▒▒▒▒▒░▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒ - Thank you for using ZingoLabs Zingdexer! - - - Donate to us at https://free2z.cash/zingolabs. - - Submit any security conserns to us at zingodisclosure@proton.me. - -****** Please note Zingdexer is currently in development and should not be used to run mainnet nodes. ****** - "#; - println!("{}", welcome_message); -} diff --git a/zingo-proxyd/src/server.rs b/zingo-proxyd/src/server.rs deleted file mode 100644 index f12ebde..0000000 --- a/zingo-proxyd/src/server.rs +++ /dev/null @@ -1,149 +0,0 @@ -//! gRPC server implementation. - -use http::Uri; -use std::net::{Ipv4Addr, SocketAddr}; -use std::sync::{ - atomic::{AtomicBool, Ordering}, - Arc, -}; -use std::task::{Context, Poll}; -use tonic::codegen::{BoxFuture, StdError}; -use tonic::transport::NamedService; -use tower::Service; - -use zingo_rpc::{jsonrpc::connector::test_node_and_return_uri, rpc::GrpcClient}; - -#[cfg(not(feature = "nym_poc"))] -use zingo_rpc::proto::service::compact_tx_streamer_server::CompactTxStreamerServer; - -#[cfg(feature = "nym_poc")] -use zcash_client_backend::proto::service::compact_tx_streamer_server::CompactTxStreamerServer; - -/// Configuration data for gRPC server. -pub struct GrpcServer(pub GrpcClient); - -impl GrpcServer { - /// Starts gRPC service. - pub fn serve( - self, - port: impl Into + Send + Sync + 'static, - online: Arc, - ) -> tokio::task::JoinHandle> { - tokio::task::spawn(async move { - let svc = CompactTxStreamerServer::new(self.0); - let logging_svc = LoggingService::new(svc); - - let sockaddr = SocketAddr::new(std::net::IpAddr::V4(Ipv4Addr::LOCALHOST), port.into()); - println!("@zingoproxyd: gRPC server listening on: {sockaddr}"); - - let server = tonic::transport::Server::builder() - .add_service(logging_svc.clone()) - .serve(sockaddr); - - let mut interval = tokio::time::interval(tokio::time::Duration::from_millis(500)); - tokio::select! { - result = server => { - match result { - Ok(_) => { - // TODO: Gracefully restart gRPC server. - println!("@zingoproxyd: gRPC Server closed early. Restart required"); - Ok(()) - } - Err(e) => { - // TODO: restart server or set online to false and exit - println!("@zingoproxyd: gRPC Server closed with error: {}. Restart required", e); - Err(e) - } - } - } - _ = async { - while online.load(Ordering::SeqCst) { - interval.tick().await; - } - } => { - println!("@zingoproxyd: gRPC server shutting down."); - Ok(()) - } - } - }) - } - - /// Creates configuration data for gRPC server. - pub fn new(lightwalletd_uri: http::Uri, zebrad_uri: http::Uri) -> Self { - GrpcServer(GrpcClient { - lightwalletd_uri, - zebrad_uri, - online: Arc::new(AtomicBool::new(true)), - }) - } -} - -/// Spawns a gRPC server. -pub async fn spawn_grpc_server( - proxy_port: &u16, - lwd_port: &u16, - zebrad_port: &u16, - online: Arc, -) -> tokio::task::JoinHandle> { - let lwd_uri = Uri::builder() - .scheme("http") - .authority(format!("localhost:{lwd_port}")) - .path_and_query("/") - .build() - .unwrap(); - - // TODO Add user and password as fields of ProxyClient and use here. - let zebra_uri = test_node_and_return_uri( - zebrad_port, - Some("xxxxxx".to_string()), - Some("xxxxxx".to_string()), - ) - .await - .unwrap(); - - let server = GrpcServer::new(lwd_uri, zebra_uri); - server.serve(*proxy_port, online) -} - -#[derive(Clone)] -struct LoggingService { - inner: T, -} - -impl LoggingService { - pub fn new(inner: T) -> Self { - Self { inner } - } -} - -impl Service> for LoggingService -where - T: Service, Response = http::Response> + Send + 'static, - B: Send + 'static + std::fmt::Debug, - T::Error: Into + Send + 'static, - T::Future: Send + 'static, -{ - type Response = T::Response; - type Error = T::Error; - type Future = BoxFuture; - - fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll> { - self.inner.poll_ready(cx) - } - - fn call(&mut self, req: http::Request) -> Self::Future { - println!("Received request: {:?}", req); - let fut = self.inner.call(req); - Box::pin(async move { - let res = fut.await?; - Ok(res) - }) - } -} - -impl NamedService for LoggingService -where - T: NamedService, -{ - const NAME: &'static str = T::NAME; -} diff --git a/zingo-rpc/src/server.rs b/zingo-rpc/src/server.rs index a03dc0c..54bf96e 100644 --- a/zingo-rpc/src/server.rs +++ b/zingo-rpc/src/server.rs @@ -52,18 +52,6 @@ impl AtomicStatus { pub fn store(&self, status: usize) { self.0.store(status, Ordering::SeqCst); } - - // fn to_display(status: &AtomicStatus) -> ColoredString { - // match StatusType::from(status.load()) { - // StatusType::Spawning => "●".yellow(), - // StatusType::Listening => "●".green(), - // StatusType::Working => "●".blue(), - // StatusType::Inactive => "●".red(), - // StatusType::Closing => "●".magenta(), - // StatusType::Offline => "●".white(), - // StatusType::Error => "●".red().bold(), - // } - // } } /// Status of the server. diff --git a/zingo-rpc/src/server/ingestor.rs b/zingo-rpc/src/server/ingestor.rs index ac3e484..c5942a8 100644 --- a/zingo-rpc/src/server/ingestor.rs +++ b/zingo-rpc/src/server/ingestor.rs @@ -194,8 +194,7 @@ impl NymIngestor { } match incoming { Some(request) => { - // NOTE / TODO: POC server checked for empty emssages here (if request.is_empty()). Could be required here... - // TODO: Handle EmptyMessageError here. + // NOTE / TODO: POC server checked for empty messages here (if request.is_empty()). Could be required here... let request_vu8 = request .first() .map(|r| r.message.clone()) diff --git a/zingo-rpc/src/server/worker.rs b/zingo-rpc/src/server/worker.rs index bc919e3..67a14cd 100644 --- a/zingo-rpc/src/server/worker.rs +++ b/zingo-rpc/src/server/worker.rs @@ -41,8 +41,6 @@ pub struct Worker { nym_response_queue: QueueSender<(Vec, AnonymousSenderTag)>, /// gRPC client used for processing requests received over http. grpc_client: GrpcClient, - // /// Workers current status, includes timestamp for despawning inactive workers.. - // worker_status: WorkerStatus, /// Thread safe worker status. atomic_status: AtomicStatus, /// Represents the Online status of the Worker. @@ -83,7 +81,7 @@ impl Worker { pub async fn serve(self) -> tokio::task::JoinHandle> { tokio::task::spawn(async move { // NOTE: This interval may need to be reduced or removed / moved once scale testing begins. - let mut interval = tokio::time::interval(tokio::time::Duration::from_millis(50)); + let mut interval = tokio::time::interval(tokio::time::Duration::from_millis(100)); let svc = CompactTxStreamerServer::new(self.grpc_client.clone()); // TODO: create tonic server here for use within loop. self.atomic_status.store(1); From 6b72f017a237d3b5bd9d5f4bbcf21428567b12ce Mon Sep 17 00:00:00 2001 From: idky137 Date: Fri, 16 Aug 2024 15:31:02 +0100 Subject: [PATCH 18/18] updated readme --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index e686dba..570c701 100644 --- a/README.md +++ b/README.md @@ -15,7 +15,7 @@ zingodisclosure@proton.me Will eventually hold the rust implementations of the LightWallet Service and Darkside RPCs, along with the wallet-side and server-side Nym-powered implementations. # Zingo-IndexerD -A gRPC server +A gRPC server capable of servicing clients both over http and over the nym mixnet. currently only send_transaction and get_lightd_info have been implemented over nym. Under the "nym_poc" feature flag Zingo-IndexerD can also act as a Nym powered proxy, running between zcash wallets and Zingo-IndexerD, capable of sending zcash transactions over the Nym Mixnet. Note: The wallet-side nym service RPC implementations are moving to CompactTxStreamerClient for easier consumption by wallets. Functionality under the "nym_poc" feature flag will be removed once a working example has been implemented directly in zingolib.