diff --git a/implementations/rust/ockam/ockam_api/src/cli_state/storage/tcp_portals_repository.rs b/implementations/rust/ockam/ockam_api/src/cli_state/storage/tcp_portals_repository.rs index 9e8b9efc3c9..9be3d269d9f 100644 --- a/implementations/rust/ockam/ockam_api/src/cli_state/storage/tcp_portals_repository.rs +++ b/implementations/rust/ockam/ockam_api/src/cli_state/storage/tcp_portals_repository.rs @@ -72,7 +72,7 @@ impl TcpPortalsRepository for AutoRetry { #[derive(Debug, Clone, PartialEq, Eq)] pub struct TcpInlet { bind_addr: SocketAddr, - outlet_addr: MultiAddr, + outlet_addresses: Vec, alias: String, privileged: bool, } @@ -80,13 +80,13 @@ pub struct TcpInlet { impl TcpInlet { pub fn new( bind_addr: &SocketAddr, - outlet_addr: &MultiAddr, + outlet_addresses: &[MultiAddr], alias: &str, privileged: bool, ) -> TcpInlet { Self { bind_addr: *bind_addr, - outlet_addr: outlet_addr.clone(), + outlet_addresses: outlet_addresses.to_owned(), alias: alias.to_string(), privileged, } @@ -96,12 +96,12 @@ impl TcpInlet { self.bind_addr } - pub fn outlet_addr(&self) -> MultiAddr { - self.outlet_addr.clone() + pub fn outlet_addr(&self) -> &Vec { + &self.outlet_addresses } - pub fn alias(&self) -> String { - self.alias.clone() + pub fn alias(&self) -> &str { + &self.alias } pub fn privileged(&self) -> bool { diff --git a/implementations/rust/ockam/ockam_api/src/cli_state/storage/tcp_portals_repository_sql.rs b/implementations/rust/ockam/ockam_api/src/cli_state/storage/tcp_portals_repository_sql.rs index 3f532ac3072..6f6de8be76b 100644 --- a/implementations/rust/ockam/ockam_api/src/cli_state/storage/tcp_portals_repository_sql.rs +++ b/implementations/rust/ockam/ockam_api/src/cli_state/storage/tcp_portals_repository_sql.rs @@ -1,8 +1,8 @@ +use itertools::Itertools; +use sqlx::*; use std::net::SocketAddr; use std::str::FromStr; use std::sync::Arc; - -use sqlx::*; use tracing::debug; use crate::cli_state::storage::tcp_portals_repository::TcpPortalsRepository; @@ -63,7 +63,13 @@ impl TcpPortalsRepository for TcpPortalsSqlxDatabase { ) .bind(node_name) .bind(tcp_inlet.bind_addr().to_string()) - .bind(tcp_inlet.outlet_addr().to_string()) + .bind( + tcp_inlet + .outlet_addr() + .iter() + .map(|x| x.to_string()) + .join("//"), + ) .bind(tcp_inlet.alias()) .bind(tcp_inlet.privileged()); query.execute(&*self.database.pool).await.void()?; @@ -158,18 +164,26 @@ struct TcpInletRow { impl TcpInletRow { fn bind_addr(&self) -> Result { SocketAddr::from_str(&self.bind_addr) - .map_err(|e| ockam_core::Error::new(Origin::Api, Kind::Serialization, format!("{e:?}"))) + .map_err(|e| Error::new(Origin::Api, Kind::Serialization, format!("{e:?}"))) } - fn outlet_addr(&self) -> Result { - MultiAddr::from_str(&self.outlet_addr) - .map_err(|e| ockam_core::Error::new(Origin::Api, Kind::Serialization, format!("{e:?}"))) + fn outlet_addresses(&self) -> Result> { + let mut multiaddresses = Vec::new(); + + for addr in self.outlet_addr.split("//") { + multiaddresses.push( + MultiAddr::from_str(addr) + .map_err(|e| Error::new(Origin::Api, Kind::Serialization, format!("{e:?}")))?, + ); + } + + Ok(multiaddresses) } fn tcp_inlet(&self) -> Result { Ok(TcpInlet::new( &self.bind_addr()?, - &self.outlet_addr()?, + &self.outlet_addresses()?, &self.alias, self.privileged.to_bool(), )) @@ -212,7 +226,7 @@ mod tests { let tcp_inlet = TcpInlet::new( &SocketAddr::from_str("127.0.0.1:80").unwrap(), - &MultiAddr::from_str("/node/outlet").unwrap(), + &["/node/outlet1".parse()?, "/node/outlet2".parse()?], "alias", true, ); diff --git a/implementations/rust/ockam/ockam_api/src/cli_state/tcp_portals.rs b/implementations/rust/ockam/ockam_api/src/cli_state/tcp_portals.rs index 82d041ee0a9..f78841b83a2 100644 --- a/implementations/rust/ockam/ockam_api/src/cli_state/tcp_portals.rs +++ b/implementations/rust/ockam/ockam_api/src/cli_state/tcp_portals.rs @@ -15,11 +15,11 @@ impl CliState { &self, node_name: &str, bind_addr: &SocketAddr, - outlet_addr: &MultiAddr, + outlet_addresses: &[MultiAddr], alias: &str, privileged: bool, ) -> Result { - let tcp_inlet = TcpInlet::new(bind_addr, outlet_addr, alias, privileged); + let tcp_inlet = TcpInlet::new(bind_addr, outlet_addresses, alias, privileged); self.tcp_portals_repository() .store_tcp_inlet(node_name, &tcp_inlet) .await?; diff --git a/implementations/rust/ockam/ockam_api/src/control_api/backend/inlet.rs b/implementations/rust/ockam/ockam_api/src/control_api/backend/inlet.rs index e121fbc2988..5f9e5288b46 100644 --- a/implementations/rust/ockam/ockam_api/src/control_api/backend/inlet.rs +++ b/implementations/rust/ockam/ockam_api/src/control_api/backend/inlet.rs @@ -14,6 +14,7 @@ use ockam_core::Route; use ockam_multiaddr::MultiAddr; use ockam_node::Context; use std::sync::Arc; +use std::time::Duration; impl HttpControlNodeApiBackend { pub(super) async fn handle_tcp_inlet( @@ -35,7 +36,7 @@ impl HttpControlNodeApiBackend { }, Method::DELETE => match resource_id { None => ControlApiHttpResponse::missing_resource_id(ResourceKind::TcpInlets), - Some(id) => handle_tcp_inlet_delete(&self.node_manager, id).await, + Some(id) => handle_tcp_inlet_delete(context, &self.node_manager, id).await, }, _ => { warn!("Invalid method: {method}"); @@ -153,16 +154,30 @@ async fn handle_tcp_inlet_create( )?), }; + if request.to.is_empty() { + return ControlApiHttpResponse::bad_request("`to` must not be empty"); + } + + let to = { + let mut to = Vec::new(); + for address in request.to.iter() { + to.push(address.parse()?); + } + to + }; + let result = node_manager .create_inlet( context, request.from.try_into()?, Route::default(), Route::default(), - request.to.parse()?, + request.target_redundancy.unwrap_or(to.len() - 1), + to, request.name.unwrap_or_else(random_string), allow, None, + Some(Duration::from_millis(request.ping_timeout)), authorized, false, None, @@ -286,10 +301,11 @@ async fn handle_tcp_inlet_list( ) )] async fn handle_tcp_inlet_delete( + context: &Context, node_manager: &Arc, resource_id: &str, ) -> Result { - let result = node_manager.delete_inlet(resource_id).await; + let result = node_manager.delete_inlet(context, resource_id).await; match result { Ok(_) => Ok(ControlApiHttpResponse::without_body( StatusCode::NO_CONTENT, @@ -362,11 +378,13 @@ mod test { hostname: "127.0.0.1".to_string(), port: 0, }, - to: "/service/outlet".to_string(), + to: vec!["/service/outlet".to_string()], + target_redundancy: None, identity: None, authorized: None, allow: None, - retry_wait: 1000, + retry_wait: 1_000, + ping_timeout: 1_000, }) .unwrap(), ), @@ -384,8 +402,8 @@ mod test { let inlet_status: InletStatus = serde_json::from_slice(response.body.as_slice()).unwrap(); assert_eq!(inlet_status.name, "inlet-name"); assert_eq!(inlet_status.status, ConnectionStatus::Down); - assert_eq!(inlet_status.current_route, None); - assert_eq!(inlet_status.to, "/service/outlet"); + assert!(inlet_status.active_routes.is_empty()); + assert_eq!(inlet_status.to, vec!["/service/outlet"]); assert_eq!(inlet_status.bind_address.hostname, "127.0.0.1"); assert!(inlet_status.bind_address.port > 0); @@ -408,8 +426,8 @@ mod test { let inlet_status: InletStatus = serde_json::from_slice(response.body.as_slice()).unwrap(); assert_eq!(inlet_status.name, "inlet-name"); assert_eq!(inlet_status.status, ConnectionStatus::Up); - assert_eq!(inlet_status.current_route, Some("0#outlet".to_string())); - assert_eq!(inlet_status.to, "/service/outlet"); + assert_eq!(inlet_status.active_routes, vec!["0#outlet".to_string()]); + assert_eq!(inlet_status.to, vec!["/service/outlet"]); let request = ControlApiHttpRequest { method: "GET".to_string(), @@ -429,8 +447,8 @@ mod test { assert_eq!(inlets.len(), 1); assert_eq!(inlets[0].name, "inlet-name"); assert_eq!(inlets[0].status, ConnectionStatus::Up); - assert_eq!(inlets[0].current_route, Some("0#outlet".to_string())); - assert_eq!(inlets[0].to, "/service/outlet"); + assert_eq!(inlets[0].active_routes, vec!["0#outlet".to_string()]); + assert_eq!(inlets[0].to, vec!["/service/outlet"]); let request = ControlApiHttpRequest { method: "DELETE".to_string(), diff --git a/implementations/rust/ockam/ockam_api/src/control_api/protocol/inlet.rs b/implementations/rust/ockam/ockam_api/src/control_api/protocol/inlet.rs index 37b6124c374..aa10ee08c8c 100644 --- a/implementations/rust/ockam/ockam_api/src/control_api/protocol/inlet.rs +++ b/implementations/rust/ockam/ockam_api/src/control_api/protocol/inlet.rs @@ -13,6 +13,10 @@ fn retry_wait_default() -> u64 { 20000 } +fn ping_timeout_default() -> u64 { + 10_000 +} + #[derive(Debug, Serialize, Deserialize, Default, ToSchema)] #[serde(rename_all = "kebab-case")] pub enum InletKind { @@ -65,9 +69,13 @@ pub struct CreateInletRequest { #[serde(default = "tcp_inlet_default_bind_address")] #[schema(default = tcp_inlet_default_bind_address)] pub from: HostnamePort, - /// Multiaddress to a TCP Outlet + /// Multiaddresses to a TCP Outlet #[schema(example = "/project/default/service/forward_to_node1/secure/api/service/outlet")] - pub to: String, + pub to: Vec, + /// Target redundancy for the TCP Inlet routes; 0 means only one route is instantiated + /// When omitted, the number of provided Multiaddresses minus one applies + #[serde(default)] + pub target_redundancy: Option, /// Identity to be used to create the secure channel; /// When omitted, the node's identity will be used pub identity: Option, @@ -84,6 +92,11 @@ pub struct CreateInletRequest { #[serde(default = "retry_wait_default")] #[schema(default = retry_wait_default)] pub retry_wait: u64, + /// How long until the outlet route is considered disconnected; + /// In milliseconds + #[serde(default = "ping_timeout_default")] + #[schema(default = ping_timeout_default)] + pub ping_timeout: u64, } #[derive(Debug, Serialize, Deserialize, ToSchema)] #[serde(rename_all = "kebab-case")] @@ -103,24 +116,33 @@ pub struct InletStatus { pub status: ConnectionStatus, /// Bind address of the TCP Inlet pub bind_address: HostnamePort, - /// The current route of the TCP Inlet, populated only when the status is `up` - pub current_route: Option, - /// Multiaddress to the TCP Outlet - pub to: String, + /// The active route of the TCP Inlet, empty when the connection is down + pub active_routes: Vec, + /// The number of target redundant routes, 0 means only one route is instantiated + pub target_redundancy: usize, + /// Multiaddresses to the TCP Outlet + pub to: Vec, } -impl TryFrom for InletStatus { +impl TryFrom for InletStatus { type Error = ockam_core::Error; - fn try_from(status: crate::nodes::models::portal::InletStatus) -> Result { - let bind_address = HostnamePort::try_from(status.bind_addr.as_str())?; + fn try_from( + status: crate::nodes::models::portal::InletStatusView, + ) -> Result { + let bind_address = HostnamePort::try_from(status.bind_address.as_str())?; Ok(InletStatus { - status: status.status.into(), + status: status.connection.into(), bind_address, name: status.alias, - current_route: status.outlet_route.map(|r| r.to_string()), - to: status.outlet_addr, + active_routes: status + .outlet_routes + .into_iter() + .map(|r| r.to_string()) + .collect(), + target_redundancy: status.target_redundancy, + to: status.outlet_addresses, }) } } diff --git a/implementations/rust/ockam/ockam_api/src/influxdb/portal.rs b/implementations/rust/ockam/ockam_api/src/influxdb/portal.rs index ed26fdaad5b..4e2b0ef2fae 100644 --- a/implementations/rust/ockam/ockam_api/src/influxdb/portal.rs +++ b/implementations/rust/ockam/ockam_api/src/influxdb/portal.rs @@ -2,7 +2,7 @@ use crate::influxdb::gateway::interceptor::HttpAuthInterceptorFactory; use crate::influxdb::gateway::token_lease_refresher::TokenLeaseRefresher; use crate::influxdb::{LeaseUsage, StartInfluxDBLeaseIssuerRequest}; use crate::nodes::models::portal::{ - CreateInlet, CreateOutlet, InletStatus, OutletAccessControl, OutletStatus, + CreateInlet, CreateOutlet, InletStatusView, OutletAccessControl, OutletStatus, }; use crate::nodes::service::tcp_inlets::create_inlet_payload; use crate::nodes::{BackgroundNodeClient, NodeManagerWorker}; @@ -112,13 +112,15 @@ impl NodeManagerWorker { &self, ctx: &Context, body: CreateInfluxDBInlet, - ) -> Result, Response> { + ) -> Result, Response> { let CreateInlet { listen_addr, - outlet_addr, + target_redundancy, + outlet_addresses, alias, authorized, - wait_for_outlet_duration, + ping_timeout, + wait_for_outlet, policy_expression, wait_connection, secure_channel_identifier, @@ -132,7 +134,13 @@ impl NodeManagerWorker { } = body.tcp_inlet.clone(); //TODO: should be an easier way to tweak the multiaddr - let mut issuer_route = outlet_addr.clone(); + let mut issuer_route = if let Some(first) = outlet_addresses.first() { + first.clone() + } else { + return Err(Response::bad_request_no_request( + "The outlet address is invalid", + )); + }; let outlet_addr_last_service = issuer_route .pop_back() .ok_or_else(|| Response::bad_request_no_request("The outlet address is invalid"))?; @@ -186,10 +194,12 @@ impl NodeManagerWorker { listen_addr, prefix_route, suffix_route, - outlet_addr, + target_redundancy, + outlet_addresses, alias, policy_expression, - wait_for_outlet_duration, + ping_timeout, + wait_for_outlet, authorized, wait_connection, secure_channel_identifier, @@ -305,10 +315,12 @@ pub trait InfluxDBPortals { &self, ctx: &Context, listen_addr: &HostnamePort, - outlet_addr: &MultiAddr, + target_redundancy: usize, + outlet_addr: Vec, alias: &str, authorized_identifier: &Option, policy_expression: &Option, + ping_timeout: Duration, wait_for_outlet_timeout: Duration, wait_connection: bool, secure_channel_identifier: &Option, @@ -317,7 +329,7 @@ pub trait InfluxDBPortals { tls_certificate_provider: &Option, lease_usage: LeaseUsage, lease_issuer_route: Option, - ) -> miette::Result>; + ) -> miette::Result>; #[allow(clippy::too_many_arguments)] async fn create_influxdb_outlet( @@ -360,10 +372,12 @@ impl InfluxDBPortals for BackgroundNodeClient { &self, ctx: &Context, listen_addr: &HostnamePort, - outlet_addr: &MultiAddr, + target_redundancy: usize, + outlet_addr: Vec, alias: &str, authorized_identifier: &Option, policy_expression: &Option, + ping_timeout: Duration, wait_for_outlet_timeout: Duration, wait_connection: bool, secure_channel_identifier: &Option, @@ -372,14 +386,16 @@ impl InfluxDBPortals for BackgroundNodeClient { tls_certificate_provider: &Option, lease_usage: LeaseUsage, lease_issuer_route: Option, - ) -> miette::Result> { + ) -> miette::Result> { let request = { let inlet_payload = create_inlet_payload( listen_addr, + target_redundancy, outlet_addr, alias, authorized_identifier, policy_expression, + ping_timeout, wait_for_outlet_timeout, wait_connection, secure_channel_identifier, diff --git a/implementations/rust/ockam/ockam_api/src/kafka/inlet_controller.rs b/implementations/rust/ockam/ockam_api/src/kafka/inlet_controller.rs index d611ef5fef8..5cf289d24eb 100644 --- a/implementations/rust/ockam/ockam_api/src/kafka/inlet_controller.rs +++ b/implementations/rust/ockam/ockam_api/src/kafka/inlet_controller.rs @@ -130,11 +130,13 @@ impl KafkaInletController { inlet_bind_address.clone(), inner.local_interceptor_route.clone(), inner.remote_interceptor_route.clone() + kafka_outlet_address(broker_id), - inner.outlet_node_multiaddr.clone(), + 0, + vec![inner.outlet_node_multiaddr.clone()], format!("kafka-inlet-{}", random_string()), self.policy_expression.clone(), None, None, + None, false, None, false, diff --git a/implementations/rust/ockam/ockam_api/src/nodes/connection/mod.rs b/implementations/rust/ockam/ockam_api/src/nodes/connection/mod.rs index faac368da29..9f8d5b98681 100644 --- a/implementations/rust/ockam/ockam_api/src/nodes/connection/mod.rs +++ b/implementations/rust/ockam/ockam_api/src/nodes/connection/mod.rs @@ -262,7 +262,7 @@ impl ConnectionBuilder { if self.current_multiaddr.len() > length { while start < self.current_multiaddr.len() - length { - if self.current_multiaddr.matches(start, &codes) { + if self.current_multiaddr.matches_at(start, &codes) { // the transport route should include only the pieces before the match self.transport_route = self .recalculate_transport_route( diff --git a/implementations/rust/ockam/ockam_api/src/nodes/models/node.rs b/implementations/rust/ockam/ockam_api/src/nodes/models/node.rs index bd30cec5aad..5ffc8dfb950 100644 --- a/implementations/rust/ockam/ockam_api/src/nodes/models/node.rs +++ b/implementations/rust/ockam/ockam_api/src/nodes/models/node.rs @@ -2,7 +2,7 @@ use crate::cli_state::{NodeInfo, NodeProcessStatus}; use crate::colors::color_primary; -use crate::nodes::models::portal::{InletStatus, OutletStatus}; +use crate::nodes::models::portal::{InletStatusView, OutletStatus}; use crate::nodes::models::services::ServiceStatus; use crate::nodes::models::transport::TransportStatus; use crate::output::Output; @@ -76,7 +76,7 @@ pub struct NodeResources { #[n(6)] pub status_endpoint_address: Option, #[n(7)] pub transports: Vec, #[n(8)] pub secure_channel_listeners: Vec, - #[n(9)] pub inlets: Vec, + #[n(9)] pub inlets: Vec, #[n(10)] pub outlets: Vec, #[n(11)] pub services: Vec, } @@ -100,7 +100,7 @@ impl NodeResources { identity_name: String, transports: Vec, listeners: Vec, - inlets: Vec, + inlets: Vec, outlets: Vec, services: Vec, ) -> Result { diff --git a/implementations/rust/ockam/ockam_api/src/nodes/models/portal.rs b/implementations/rust/ockam/ockam_api/src/nodes/models/portal.rs index 7e57a05753a..0cdfc1481f2 100644 --- a/implementations/rust/ockam/ockam_api/src/nodes/models/portal.rs +++ b/implementations/rust/ockam/ockam_api/src/nodes/models/portal.rs @@ -1,15 +1,18 @@ //! Inlets and outlet request/response types use std::fmt::{Display, Formatter}; +use std::net::SocketAddr; use std::sync::Arc; use std::time::Duration; use crate::colors::{color_primary, color_primary_alt}; use crate::error::ApiError; +use crate::nodes::registry::InletStateSummary; use crate::output::Output; use crate::session::connection_status::ConnectionStatus; use crate::terminal::fmt; -use crate::ReverseLocalConverter; +use crate::{fmt_list, ReverseLocalConverter}; +use colorful::Colorful; use minicbor::{CborLen, Decode, Encode}; use ockam::identity::Identifier; use ockam::transport::HostnamePort; @@ -32,39 +35,43 @@ pub struct CreateInlet { /// The peer address. /// This can either be the address of an already /// created outlet, or a forwarding mechanism via ockam cloud. - #[n(2)] pub(crate) outlet_addr: MultiAddr, + #[n(2)] pub(crate) outlet_addresses: Vec, + /// TCP Inlet route redundancy. 0 means only one route is instantiated. + #[n(3)] pub(crate) target_redundancy: usize, /// A human-friendly alias for this portal endpoint - #[b(3)] pub(crate) alias: String, + #[b(4)] pub(crate) alias: String, /// An authorised identity for secure channels. /// Only set for non-project addresses as for projects the project's /// authorised identity will be used. - #[n(4)] pub(crate) authorized: Option, + #[n(5)] pub(crate) authorized: Option, + /// How long until the outlet route is considered disconnected. + #[n(6)] pub(crate) ping_timeout: Option, /// The maximum duration to wait for an outlet to be available - #[n(5)] pub(crate) wait_for_outlet_duration: Option, + #[n(7)] pub(crate) wait_for_outlet: Option, /// The expression for the access control policy for this inlet. /// If not set, the policy set for the [TCP inlet resource type](ockam_abac::ResourceType::TcpInlet) /// will be used. - #[n(6)] pub(crate) policy_expression: Option, + #[n(8)] pub(crate) policy_expression: Option, /// Create the inlet and wait for the outlet to connect - #[n(7)] pub(crate) wait_connection: bool, + #[n(9)] pub(crate) wait_connection: bool, /// The identifier to be used to create the secure channel. /// If not set, the node's identifier will be used. - #[n(8)] pub(crate) secure_channel_identifier: Option, + #[n(10)] pub(crate) secure_channel_identifier: Option, /// Enable UDP NAT puncture. - #[n(9)] pub(crate) enable_udp_puncture: bool, + #[n(11)] pub(crate) enable_udp_puncture: bool, /// Disable fallback to TCP. /// TCP won't be used to transfer data between the Inlet and the Outlet. - #[n(11)] pub(crate) disable_tcp_fallback: bool, + #[n(12)] pub(crate) disable_tcp_fallback: bool, /// Use eBPF and RawSocket to access TCP packets instead of TCP data stream. - #[n(12)] pub(crate) privileged: bool, + #[n(13)] pub(crate) privileged: bool, /// TLS certificate provider route. - #[n(13)] pub(crate) tls_certificate_provider: Option, + #[n(14)] pub(crate) tls_certificate_provider: Option, /// Skip Portal handshake for lower latency, but also lower throughput - #[n(14)] pub(crate) skip_handshake: bool, + #[n(15)] pub(crate) skip_handshake: bool, /// Enable Nagle's algorithm for potentially higher throughput, but higher latency - #[n(15)] pub(crate) enable_nagle: bool, + #[n(16)] pub(crate) enable_nagle: bool, /// The prefix route to be used for interceptors. - #[n(16)] pub(crate) prefix_route: Route, + #[n(17)] pub(crate) prefix_route: Route, } impl Encodable for CreateInlet { @@ -81,40 +88,10 @@ impl Decodable for CreateInlet { impl CreateInlet { #[allow(clippy::too_many_arguments)] - pub fn via_project( - listen: HostnamePort, - to: MultiAddr, - alias: String, - wait_connection: bool, - enable_udp_puncture: bool, - disable_tcp_fallback: bool, - privileged: bool, - skip_handshake: bool, - enable_nagle: bool, - ) -> Self { - Self { - listen_addr: listen, - outlet_addr: to, - alias, - authorized: None, - wait_for_outlet_duration: None, - policy_expression: None, - wait_connection, - secure_channel_identifier: None, - enable_udp_puncture, - disable_tcp_fallback, - privileged, - tls_certificate_provider: None, - skip_handshake, - enable_nagle, - prefix_route: Default::default(), - } - } - - #[allow(clippy::too_many_arguments)] - pub fn to_node( + pub fn new( listen: HostnamePort, - to: MultiAddr, + target_redundancy: usize, + to: Vec, alias: String, auth: Option, wait_connection: bool, @@ -126,10 +103,12 @@ impl CreateInlet { ) -> Self { Self { listen_addr: listen, - outlet_addr: to, + outlet_addresses: to, + target_redundancy, alias, authorized: auth, - wait_for_outlet_duration: None, + ping_timeout: None, + wait_for_outlet: None, policy_expression: None, wait_connection, secure_channel_identifier: None, @@ -151,8 +130,12 @@ impl CreateInlet { self.prefix_route = route; } - pub fn set_wait_ms(&mut self, ms: u64) { - self.wait_for_outlet_duration = Some(Duration::from_millis(ms)) + pub fn set_ping_timeout(&mut self, ping_timeout: Duration) { + self.ping_timeout = Some(ping_timeout) + } + + pub fn set_wait_ms(&mut self, wait_for_outlet_duration: Duration) { + self.wait_for_outlet = Some(wait_for_outlet_duration); } pub fn set_policy_expression(&mut self, expression: PolicyExpression) { @@ -162,26 +145,6 @@ impl CreateInlet { pub fn set_secure_channel_identifier(&mut self, identifier: Identifier) { self.secure_channel_identifier = Some(identifier); } - - pub fn listen_addr(&self) -> HostnamePort { - self.listen_addr.clone() - } - - pub fn outlet_addr(&self) -> &MultiAddr { - &self.outlet_addr - } - - pub fn authorized(&self) -> Option { - self.authorized.clone() - } - - pub fn alias(&self) -> String { - self.alias.clone() - } - - pub fn wait_for_outlet_duration(&self) -> Option { - self.wait_for_outlet_duration - } } /// Request body to create an outlet @@ -253,83 +216,78 @@ impl CreateOutlet { #[derive(Clone, Debug, Encode, Decode, CborLen, Serialize, Message)] #[rustfmt::skip] #[cbor(map)] -pub struct InletStatus { - #[n(1)] pub bind_addr: String, - #[n(2)] pub worker_addr: Option, - #[n(3)] pub alias: String, - /// An optional status payload - #[n(4)] pub payload: Option, - #[n(5)] pub outlet_route: Option, - #[n(6)] pub status: ConnectionStatus, - #[n(7)] pub outlet_addr: String, - #[n(8)] pub privileged: bool, +pub struct InletStatusView { + #[n(1)] pub bind_address: String, + #[n(2)] pub alias: String, + #[n(3)] pub outlet_addresses: Vec, + #[n(4)] pub privileged: bool, + /// Status of the inlet, up if at least one session is up + #[n(5)] pub connection: ConnectionStatus, + #[n(6)] pub outlet_routes: Vec, + #[n(7)] pub target_redundancy: usize, } -impl Encodable for InletStatus { +impl Encodable for InletStatusView { fn encode(self) -> ockam_core::Result { cbor_encode_preallocate(self) } } -impl Decodable for InletStatus { +impl Decodable for InletStatusView { fn decode(e: &[u8]) -> ockam_core::Result { Ok(minicbor::decode(e)?) } } -impl InletStatus { +impl InletStatusView { #[allow(clippy::too_many_arguments)] - pub fn new( - bind_addr: impl Into, - worker_addr: impl Into>, + pub(crate) fn new( + bind_address: SocketAddr, alias: impl Into, - payload: impl Into>, - outlet_route: impl Into>, - status: ConnectionStatus, - outlet_addr: impl Into, + outlet_addresses: &[MultiAddr], privileged: bool, + summary: InletStateSummary, ) -> Self { Self { - bind_addr: bind_addr.into(), - worker_addr: worker_addr.into(), + bind_address: bind_address.to_string(), alias: alias.into(), - payload: payload.into(), - outlet_route: outlet_route.into(), - status, - outlet_addr: outlet_addr.into(), + outlet_routes: summary + .active_routes + .iter() + .map(|s| s.route.to_string()) + .collect(), + target_redundancy: summary.target_redundancy, + connection: summary.connection_status(), + outlet_addresses: outlet_addresses.iter().map(|a| a.to_string()).collect(), privileged, } } } -impl Display for InletStatus { +impl Display for InletStatusView { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { writeln!( f, "Inlet {} at {} is {}", color_primary(&self.alias), - color_primary(&self.bind_addr), - self.status, + color_primary(&self.bind_address), + self.connection, )?; - if let Some(r) = self - .outlet_route - .as_ref() - .and_then(Route::parse) - .and_then(|r| ReverseLocalConverter::convert_route(&r).ok()) - { - writeln!( - f, - "{}With route to outlet {}", - fmt::INDENTATION, - color_primary(r.to_string()) - )?; + + if self.outlet_routes.is_empty() { + writeln!(f, "{}No routes to outlet", fmt::INDENTATION)?; + } else { + writeln!(f, "{}With routes to outlet:", fmt::INDENTATION)?; + for route in &self.outlet_routes { + writeln!(f, "{}", &fmt_list!("{route}"))?; + } } - writeln!( - f, - "{}Outlet Address: {}", - fmt::INDENTATION, - color_primary(&self.outlet_addr) - )?; + + writeln!(f, "{}Outlet Addresses:", fmt::INDENTATION,)?; + for address in &self.outlet_addresses { + writeln!(f, "{}", &fmt_list!("{address}"))?; + } + if self.privileged { writeln!( f, @@ -342,14 +300,14 @@ impl Display for InletStatus { } } -impl Output for InletStatus { +impl Output for InletStatusView { fn item(&self) -> crate::Result { Ok(self.padded_display()) } } #[derive(Encode, Decode, CborLen, Debug, Default, Clone, Message)] -pub struct InletStatusList(#[n(0)] pub Vec); +pub struct InletStatusList(#[n(0)] pub Vec); impl Encodable for InletStatusList { fn encode(self) -> ockam_core::Result { diff --git a/implementations/rust/ockam/ockam_api/src/nodes/registry.rs b/implementations/rust/ockam/ockam_api/src/nodes/registry.rs index 93389bc0251..937646565bc 100644 --- a/implementations/rust/ockam/ockam_api/src/nodes/registry.rs +++ b/implementations/rust/ockam/ockam_api/src/nodes/registry.rs @@ -1,5 +1,5 @@ use crate::cli_state::random_name; -use crate::DefaultAddress; +use crate::{ConnectionStatus, DefaultAddress}; use ockam::identity::Identifier; use ockam::identity::{SecureChannel, SecureChannelListener}; @@ -11,9 +11,13 @@ use ockam_multiaddr::MultiAddr; use ockam_node::compat::asynchronous::Mutex as AsyncMutex; use ockam_transport_core::HostnamePort; +use crate::session::replacer::{ActiveInletRoute, ReplacerOutputKind}; use crate::session::session::Session; +use ockam_node::Context; +use ockam_transport_tcp::TcpInlet; use std::fmt::Display; use std::hash::Hash; +use std::net::SocketAddr; use std::sync::Arc; #[derive(Default)] @@ -133,27 +137,80 @@ impl KafkaServiceInfo { } #[derive(Clone)] -pub(crate) struct InletInfo { - pub(crate) bind_addr: String, - pub(crate) outlet_addr: MultiAddr, - pub(crate) session: Arc>, +pub(crate) struct TcpInletHandle { + pub(crate) tcp_inlet: Arc, + pub(crate) outlet_addresses: Vec, + pub(crate) sessions: Arc>>, pub(crate) privileged: bool, } -impl InletInfo { +impl TcpInletHandle { pub(crate) fn new( - bind_addr: &str, - outlet_addr: MultiAddr, - session: Session, + tcp_inlet: Arc, + outlet_addresses: Vec, + sessions: Vec, privileged: bool, ) -> Self { Self { - bind_addr: bind_addr.to_owned(), - outlet_addr, - session: Arc::new(AsyncMutex::new(session)), + tcp_inlet, + outlet_addresses, + sessions: Arc::new(AsyncMutex::new(sessions)), privileged, } } + + pub(crate) fn bind_address(&self) -> SocketAddr { + self.tcp_inlet.socket_address() + } + + pub(crate) async fn stop(&self, context: &Context) -> ockam_core::Result<()> { + for session in self.sessions.lock().await.iter_mut() { + session.stop().await; + } + self.tcp_inlet.stop(context) + } + + /// Returns all available statues, usually one per session, but could be fewer + /// if a session was recently added + pub(crate) async fn summary(&self) -> InletStateSummary { + let sessions = self.sessions.lock().await; + let states = sessions + .iter() + .flat_map(|s| { + s.last_outcome().map(|outcome| match outcome { + ReplacerOutputKind::Inlet(inlet_status) => inlet_status, + _ => panic!("Unexpected outcome"), + }) + }) + .collect(); + + // the number of sessions is the target redundancy + InletStateSummary::new(states, sessions.len()) + } +} + +/// An Internal representation of the state of an Inlet across all sessions +#[derive(Default)] +pub(crate) struct InletStateSummary { + pub(crate) active_routes: Vec, + pub(crate) target_redundancy: usize, +} + +impl InletStateSummary { + pub(crate) fn new(statuses: Vec, target_redundancy: usize) -> Self { + Self { + active_routes: statuses, + target_redundancy, + } + } + + pub(crate) fn connection_status(&self) -> ConnectionStatus { + if self.active_routes.is_empty() { + ConnectionStatus::Down + } else { + ConnectionStatus::Up + } + } } #[derive(Clone)] @@ -194,7 +251,7 @@ pub(crate) struct Registry { pub(crate) hop_services: RegistryOf, pub(crate) http_headers_interceptors: RegistryOf, pub(crate) relays: RegistryOf, - pub(crate) inlets: RegistryOf, + pub(crate) inlets: RegistryOf, pub(crate) outlets: RegistryOf, pub(crate) influxdb_services: RegistryOf, // TODO: what should we persist here? } diff --git a/implementations/rust/ockam/ockam_api/src/nodes/service/in_memory_node.rs b/implementations/rust/ockam/ockam_api/src/nodes/service/in_memory_node.rs index cfe721b6076..0712337a971 100644 --- a/implementations/rust/ockam/ockam_api/src/nodes/service/in_memory_node.rs +++ b/implementations/rust/ockam/ockam_api/src/nodes/service/in_memory_node.rs @@ -190,9 +190,11 @@ impl InMemoryNode { self } - pub async fn stop(&self, ctx: &Context) -> Result<()> { - for session in self.registry.inlets.values() { - session.session.lock().await.stop().await; + pub async fn stop(&self, context: &Context) -> Result<()> { + for (alias, inlet_info) in self.registry.inlets.entries() { + if let Err(error) = inlet_info.stop(context).await { + error!(%alias, %error, "Failed to stop inlet"); + } } for session in self.registry.relays.values() { @@ -200,7 +202,7 @@ impl InMemoryNode { } for addr in DefaultAddress::iter() { - let result = ctx.stop_address(&addr.into()); + let result = context.stop_address(&addr.into()); // when stopping we can safely ignore missing services if let Err(err) = result { if err.code().kind == Kind::NotFound { diff --git a/implementations/rust/ockam/ockam_api/src/nodes/service/kafka_services.rs b/implementations/rust/ockam/ockam_api/src/nodes/service/kafka_services.rs index 2c2e112e0ea..fa369f0fac9 100644 --- a/implementations/rust/ockam/ockam_api/src/nodes/service/kafka_services.rs +++ b/implementations/rust/ockam/ockam_api/src/nodes/service/kafka_services.rs @@ -209,11 +209,13 @@ impl InMemoryNode { KAFKA_OUTLET_INTERCEPTOR_ADDRESS, KAFKA_OUTLET_BOOTSTRAP_ADDRESS ], - outlet_node_multiaddr, + 0, + vec![outlet_node_multiaddr], inlet_alias, inlet_policy_expression.clone(), None, None, + None, true, None, false, diff --git a/implementations/rust/ockam/ockam_api/src/nodes/service/relay.rs b/implementations/rust/ockam/ockam_api/src/nodes/service/relay.rs index caaa28a26cf..ca255a330c3 100644 --- a/implementations/rust/ockam/ockam_api/src/nodes/service/relay.rs +++ b/implementations/rust/ockam/ockam_api/src/nodes/service/relay.rs @@ -169,7 +169,7 @@ impl NodeManager { authorized: authorized.clone(), }; - let mut session = Session::create(ctx, Arc::new(Mutex::new(replacer)), None)?; + let mut session = Session::create(ctx, Arc::new(Mutex::new(replacer)), None, None)?; let remote_relay_info = match return_timing { ReturnTiming::Immediately => None, diff --git a/implementations/rust/ockam/ockam_api/src/nodes/service/tcp_inlets/access_control.rs b/implementations/rust/ockam/ockam_api/src/nodes/service/tcp_inlets/access_control.rs new file mode 100644 index 00000000000..a1a17357b4d --- /dev/null +++ b/implementations/rust/ockam/ockam_api/src/nodes/service/tcp_inlets/access_control.rs @@ -0,0 +1,58 @@ +use crate::nodes::service::tcp_inlets::session_replacer::InletParameters; +use crate::nodes::NodeManager; +use crate::ApiError; +use ockam_abac::Action; +use ockam_core::{IncomingAccessControl, OutgoingAccessControl}; +use ockam_multiaddr::proto::Project; +use ockam_multiaddr::MultiAddr; +use ockam_node::Context; +use std::sync::Arc; + +pub(in crate::nodes::service::tcp_inlets) async fn inlet_access_control( + context: &Context, + node_manager: &NodeManager, + parameters: &InletParameters, + original_multi_addr: Option<&MultiAddr>, +) -> ockam_core::Result<( + Arc, + Arc, +)> { + let authority = { + if let Some(original_multi_addr) = original_multi_addr { + if let Some(p) = original_multi_addr.first() { + if let Some(p) = p.cast::() { + if let Ok(p) = node_manager + .cli_state + .projects() + .get_project_by_name(&p) + .await + { + Some( + p.authority_identifier() + .ok_or_else(|| ApiError::core("no authority identifier"))?, + ) + } else { + None + } + } else { + None + } + } else { + None + } + } else { + None + } + } + .or(node_manager.project_authority()); + + node_manager + .access_control( + context, + authority, + parameters.resource.clone(), + Action::HandleMessage, + parameters.policy_expression.clone(), + ) + .await +} diff --git a/implementations/rust/ockam/ockam_api/src/nodes/service/tcp_inlets/background_node_client.rs b/implementations/rust/ockam/ockam_api/src/nodes/service/tcp_inlets/background_node_client.rs index 35a98d34f42..99452f7d05a 100644 --- a/implementations/rust/ockam/ockam_api/src/nodes/service/tcp_inlets/background_node_client.rs +++ b/implementations/rust/ockam/ockam_api/src/nodes/service/tcp_inlets/background_node_client.rs @@ -2,23 +2,24 @@ use ockam::identity::Identifier; use ockam_abac::PolicyExpression; use ockam_core::api::{Reply, Request}; use ockam_core::{async_trait, Route}; -use ockam_multiaddr::proto::Project as ProjectProto; -use ockam_multiaddr::{MultiAddr, Protocol}; +use ockam_multiaddr::MultiAddr; use ockam_node::Context; use ockam_transport_core::HostnamePort; use std::time::Duration; -use crate::nodes::models::portal::{CreateInlet, InletStatus}; +use crate::nodes::models::portal::{CreateInlet, InletStatusView}; use crate::nodes::service::tcp_inlets::Inlets; use crate::nodes::BackgroundNodeClient; #[allow(clippy::too_many_arguments)] pub fn create_inlet_payload( listen_addr: &HostnamePort, - outlet_addr: &MultiAddr, + target_redundancy: usize, + outlet_addresses: Vec, alias: &str, authorized_identifier: &Option, policy_expression: &Option, + ping_timeout: Duration, wait_for_outlet_timeout: Duration, wait_connection: bool, secure_channel_identifier: &Option, @@ -30,33 +31,19 @@ pub fn create_inlet_payload( enable_nagle: bool, prefix_route: Route, ) -> CreateInlet { - let via_project = outlet_addr.matches(0, &[ProjectProto::CODE.into()]); - let mut payload = if via_project { - CreateInlet::via_project( - listen_addr.clone(), - outlet_addr.clone(), - alias.into(), - wait_connection, - enable_udp_puncture, - disable_tcp_fallback, - privileged, - skip_handshake, - enable_nagle, - ) - } else { - CreateInlet::to_node( - listen_addr.clone(), - outlet_addr.clone(), - alias.into(), - authorized_identifier.clone(), - wait_connection, - enable_udp_puncture, - disable_tcp_fallback, - privileged, - skip_handshake, - enable_nagle, - ) - }; + let mut payload = CreateInlet::new( + listen_addr.clone(), + target_redundancy, + outlet_addresses.clone(), + alias.into(), + authorized_identifier.clone(), + wait_connection, + enable_udp_puncture, + disable_tcp_fallback, + privileged, + skip_handshake, + enable_nagle, + ); if let Some(e) = policy_expression.as_ref() { payload.set_policy_expression(e.clone()) } @@ -66,8 +53,9 @@ pub fn create_inlet_payload( if let Some(tls_provider) = tls_certificate_provider { payload.set_tls_certificate_provider(tls_provider.clone()) } + payload.set_ping_timeout(ping_timeout); payload.set_prefix_route(prefix_route); - payload.set_wait_ms(wait_for_outlet_timeout.as_millis() as u64); + payload.set_wait_ms(wait_for_outlet_timeout); payload } @@ -77,10 +65,12 @@ impl Inlets for BackgroundNodeClient { &self, ctx: &Context, listen_addr: &HostnamePort, - outlet_addr: &MultiAddr, + target_redundancy: usize, + outlet_addresses: Vec, alias: &str, authorized_identifier: &Option, policy_expression: &Option, + ping_timeout: Duration, wait_for_outlet_timeout: Duration, wait_connection: bool, secure_channel_identifier: &Option, @@ -91,14 +81,16 @@ impl Inlets for BackgroundNodeClient { skip_handshake: bool, enable_nagle: bool, prefix_route: Route, - ) -> miette::Result> { + ) -> miette::Result> { let request = { let payload = create_inlet_payload( listen_addr, - outlet_addr, + target_redundancy, + outlet_addresses, alias, authorized_identifier, policy_expression, + ping_timeout, wait_for_outlet_timeout, wait_connection, secure_channel_identifier, @@ -115,7 +107,11 @@ impl Inlets for BackgroundNodeClient { self.ask_and_get_reply(ctx, request).await } - async fn show_inlet(&self, ctx: &Context, alias: &str) -> miette::Result> { + async fn show_inlet( + &self, + ctx: &Context, + alias: &str, + ) -> miette::Result> { let request = Request::get(format!("/node/inlet/{alias}")); self.ask_and_get_reply(ctx, request).await } diff --git a/implementations/rust/ockam/ockam_api/src/nodes/service/tcp_inlets/in_memory_node.rs b/implementations/rust/ockam/ockam_api/src/nodes/service/tcp_inlets/in_memory_node.rs index a4bc8c7f611..597bf05aa15 100644 --- a/implementations/rust/ockam/ockam_api/src/nodes/service/tcp_inlets/in_memory_node.rs +++ b/implementations/rust/ockam/ockam_api/src/nodes/service/tcp_inlets/in_memory_node.rs @@ -7,7 +7,7 @@ use ockam_node::Context; use ockam_transport_core::HostnamePort; use std::time::Duration; -use crate::nodes::models::portal::InletStatus; +use crate::nodes::models::portal::InletStatusView; use crate::nodes::InMemoryNode; impl InMemoryNode { @@ -19,10 +19,12 @@ impl InMemoryNode { listen_addr: HostnamePort, prefix_route: Route, suffix_route: Route, - outlet_addr: MultiAddr, + target_redundancy: usize, + outlet_addresses: Vec, alias: String, policy_expression: Option, - wait_for_outlet_duration: Option, + ping_timeout: Option, + wait_for_outlet: Option, authorized: Option, wait_connection: bool, secure_channel_identifier: Option, @@ -32,17 +34,19 @@ impl InMemoryNode { tls_certificate_provider: Option, skip_handshake: bool, enable_nagle: bool, - ) -> Result { + ) -> Result { self.node_manager .create_inlet( ctx, - listen_addr.clone(), - prefix_route.clone(), - suffix_route.clone(), - outlet_addr.clone(), + listen_addr, + prefix_route, + suffix_route, + target_redundancy, + outlet_addresses, alias, policy_expression, - wait_for_outlet_duration, + ping_timeout, + wait_for_outlet, authorized, wait_connection, secure_channel_identifier, diff --git a/implementations/rust/ockam/ockam_api/src/nodes/service/tcp_inlets/inlets_trait.rs b/implementations/rust/ockam/ockam_api/src/nodes/service/tcp_inlets/inlets_trait.rs index 5a9724f0a7d..dc34fdaf00f 100644 --- a/implementations/rust/ockam/ockam_api/src/nodes/service/tcp_inlets/inlets_trait.rs +++ b/implementations/rust/ockam/ockam_api/src/nodes/service/tcp_inlets/inlets_trait.rs @@ -7,7 +7,7 @@ use ockam_node::Context; use ockam_transport_core::HostnamePort; use std::time::Duration; -use crate::nodes::models::portal::InletStatus; +use crate::nodes::models::portal::InletStatusView; #[async_trait] pub trait Inlets { @@ -16,10 +16,12 @@ pub trait Inlets { &self, ctx: &Context, listen_addr: &HostnamePort, - outlet_addr: &MultiAddr, + target_redundancy: usize, + outlet_addresses: Vec, alias: &str, authorized_identifier: &Option, policy_expression: &Option, + ping_timeout: Duration, wait_for_outlet_timeout: Duration, wait_connection: bool, secure_channel_identifier: &Option, @@ -30,9 +32,13 @@ pub trait Inlets { skip_handshake: bool, enable_nagle: bool, prefix_route: Route, - ) -> miette::Result>; + ) -> miette::Result>; - async fn show_inlet(&self, ctx: &Context, alias: &str) -> miette::Result>; + async fn show_inlet( + &self, + ctx: &Context, + alias: &str, + ) -> miette::Result>; async fn delete_inlet(&self, ctx: &Context, inlet_alias: &str) -> miette::Result>; } diff --git a/implementations/rust/ockam/ockam_api/src/nodes/service/tcp_inlets/mod.rs b/implementations/rust/ockam/ockam_api/src/nodes/service/tcp_inlets/mod.rs index a9f557894ce..358146dc5e0 100644 --- a/implementations/rust/ockam/ockam_api/src/nodes/service/tcp_inlets/mod.rs +++ b/implementations/rust/ockam/ockam_api/src/nodes/service/tcp_inlets/mod.rs @@ -1,9 +1,11 @@ +mod access_control; mod background_node_client; mod in_memory_node; mod inlets_trait; mod node_manager; mod node_manager_worker; mod session_replacer; +mod terminal_notifier; pub use inlets_trait::*; use session_replacer::*; diff --git a/implementations/rust/ockam/ockam_api/src/nodes/service/tcp_inlets/node_manager.rs b/implementations/rust/ockam/ockam_api/src/nodes/service/tcp_inlets/node_manager.rs index bb8fd70c0f1..7200947cc64 100644 --- a/implementations/rust/ockam/ockam_api/src/nodes/service/tcp_inlets/node_manager.rs +++ b/implementations/rust/ockam/ockam_api/src/nodes/service/tcp_inlets/node_manager.rs @@ -2,6 +2,16 @@ use std::sync::Arc; use std::time::Duration; use crate::address::get_free_address_for; +use crate::nodes::models::portal::InletStatusView; +use crate::nodes::registry::{InletStateSummary, TcpInletHandle}; +use crate::nodes::service::tcp_inlets::access_control::inlet_access_control; +use crate::nodes::service::tcp_inlets::session_replacer::selector::OutletMultiAddrSelector; +use crate::nodes::service::tcp_inlets::session_replacer::InletParameters; +use crate::nodes::service::tcp_inlets::terminal_notifier::TcpInletNotifier; +use crate::nodes::service::tcp_inlets::InletSessionReplacer; +use crate::nodes::NodeManager; +use crate::session::replacer::MAX_CONNECT_TIME; +use crate::session::session::{AdditionalSessionOptions, Session}; use ockam::identity::Identifier; use ockam::Result; use ockam_abac::{PolicyExpression, Resource, ResourceType}; @@ -12,14 +22,6 @@ use ockam_node::compat::asynchronous::Mutex; use ockam_node::Context; use ockam_transport_core::HostnamePort; -use crate::nodes::models::portal::InletStatus; -use crate::nodes::registry::InletInfo; -use crate::nodes::service::tcp_inlets::InletSessionReplacer; -use crate::nodes::NodeManager; -use crate::session::connection_status::ConnectionStatus; -use crate::session::replacer::{ReplacerOutputKind, SessionReplacer, MAX_CONNECT_TIME}; -use crate::session::session::{AdditionalSessionOptions, Session}; - impl NodeManager { #[allow(clippy::too_many_arguments)] #[instrument(skip_all)] @@ -29,9 +31,11 @@ impl NodeManager { listen_address: HostnamePort, prefix_route: Route, suffix_route: Route, - outlet_address: MultiAddr, + target_redundancy: usize, + outlet_addresses: Vec, alias: String, policy_expression: Option, + ping_timeout: Option, wait_for_outlet_duration: Option, authorized: Option, wait_connection: bool, @@ -43,12 +47,18 @@ impl NodeManager { tls_certificate_provider: Option, skip_handshake: bool, enable_nagle: bool, - ) -> Result { + ) -> Result { + let outlet_addresses_str = outlet_addresses + .iter() + .map(|a| a.to_string()) + .collect::>() + .join(", "); + debug! { %listen_address, prefix = %prefix_route, suffix = %suffix_route, - %outlet_address, + outlet_addresses = %outlet_addresses_str, %alias, %enable_udp_puncture, %disable_tcp_fallback, @@ -97,7 +107,7 @@ impl NodeManager { if registry .values() .iter() - .any(|inlet| inlet.bind_addr == listen_addr.to_string()) + .any(|inlet| inlet.bind_address() == listen_addr) { let message = format!("A TCP inlet with bind tcp address '{listen_addr}' already exists"); @@ -109,110 +119,131 @@ impl NodeManager { } } - let replacer = InletSessionReplacer { - node_manager: Arc::downgrade(self), - udp_transport, - context: ctx.try_clone()?, - listen_addr: listen_addr.to_string(), - outlet_addr: outlet_address.clone(), + let parameters = Arc::new(InletParameters { + terminal_notifier: TcpInletNotifier::new(Arc::downgrade(self), alias.clone()), + outlet_address_selector: OutletMultiAddrSelector::new(outlet_addresses.clone()), + wait_for_outlet_duration: wait_for_outlet_duration.unwrap_or(MAX_CONNECT_TIME), + resource: Resource::new(alias.clone(), ResourceType::TcpInlet), prefix_route, suffix_route, authorized, - wait_for_outlet_duration: wait_for_outlet_duration.unwrap_or(MAX_CONNECT_TIME), - resource: Resource::new(alias.clone(), ResourceType::TcpInlet), policy_expression, secure_channel_identifier, disable_tcp_fallback, tls_certificate_provider, - inlet: None, - connection: None, - main_route: None, - additional_secure_channel: None, - udp_puncture: None, - additional_route: None, - privileged, skip_handshake, enable_nagle, - }; + }); - let replacer = Arc::new(Mutex::new(replacer)); + let inlet = if privileged { + #[cfg(privileged_portals_support)] + { + let (incoming_access_control, outgoing_access_control) = + inlet_access_control(ctx, self, ¶meters, None).await?; - let main_replacer: Arc> = replacer.clone(); + // TODO: should options be dependent on the MultiAddr? + Arc::new( + self.tcp_transport + .create_privileged_inlet( + listen_addr, + incoming_access_control, + outgoing_access_control, + ) + .await?, + ) + } + #[cfg(not(privileged_portals_support))] + { + return Err(ockam_core::Error::new( + Origin::Node, + Kind::Internal, + "Privileged Portals support is not enabled", + )); + } + } else { + Arc::new(self.tcp_transport.crate_inlet_multi(listen_addr).await?) + }; + + let replacer = InletSessionReplacer { + node_manager: Arc::downgrade(self), + udp_transport, + context: ctx.try_clone()?, + inlet: inlet.clone(), + parameters, + status: None, + }; let _ = self .cli_state .create_tcp_inlet( &self.node_name, &listen_addr, - &outlet_address, + &outlet_addresses, &alias, privileged, ) .await?; - let additional_session_options = if enable_udp_puncture { - Some(AdditionalSessionOptions::create( - replacer.clone(), - !disable_tcp_fallback, - )) - } else { - None + let mut sessions = Vec::with_capacity(target_redundancy + 1); + for _ in 0..target_redundancy { + // we need replacers with independent status for each session + let replacer = Arc::new(Mutex::new(replacer.standalone()?)); + + let additional_session_options = if enable_udp_puncture { + Some(AdditionalSessionOptions::create( + replacer.clone(), + !disable_tcp_fallback, + ping_timeout, + )) + } else { + None + }; + + let mut session = + Session::create(ctx, replacer, additional_session_options, ping_timeout)?; + session.start_monitoring()?; + sessions.push(session); + } + + let mut first_session = { + let replacer = Arc::new(Mutex::new(replacer)); + + let additional_session_options = if enable_udp_puncture { + Some(AdditionalSessionOptions::create( + replacer.clone(), + !disable_tcp_fallback, + ping_timeout, + )) + } else { + None + }; + + Session::create(ctx, replacer, additional_session_options, ping_timeout)? }; - let mut session = Session::create(ctx, main_replacer, additional_session_options)?; - - let outcome = if wait_connection { - let result = session - .initial_connect() - .await - .map(|outcome| match outcome { - ReplacerOutputKind::Inlet(status) => status, - _ => { - panic!("Unexpected outcome: {:?}", outcome) - } - }); - - match result { - Ok(status) => Some(status), - Err(err) => { - warn!("Failed to create inlet: {err}"); - None - } + if wait_connection { + let result = first_session.initial_connect().await; + if let Err(error) = result { + warn!(%error, "Failed to connect to the outlet"); } - } else { - None }; - let connection_status = session.connection_status(); + first_session.start_monitoring()?; + sessions.push(first_session); - session.start_monitoring()?; + let inlet_info = TcpInletHandle::new(inlet, outlet_addresses.clone(), sessions, privileged); - self.registry.inlets.insert( - alias.clone(), - InletInfo::new( - &listen_addr.to_string(), - outlet_address.clone(), - session, - privileged, - ), - ); - - let tcp_inlet_status = InletStatus::new( - listen_addr.to_string(), - outcome - .clone() - .and_then(|s| s.worker.map(|address| address.address().to_string())), - &alias, - None, - outcome.clone().map(|s| s.route.to_string()), - connection_status, - outlet_address.to_string(), - privileged, - ); + // this summary already contains the connection status + let summary = inlet_info.summary().await; + + self.registry.inlets.insert(alias.clone(), inlet_info); + + let tcp_inlet_status = + InletStatusView::new(listen_addr, &alias, &outlet_addresses, privileged, summary); info! { %listen_address, - %outlet_address, + outlet_addresses = %outlet_addresses_str, %alias, "inlet created" } @@ -220,24 +251,24 @@ impl NodeManager { Ok(tcp_inlet_status) } - pub async fn delete_inlet(&self, alias: &str) -> Result { + pub async fn delete_inlet(&self, context: &Context, alias: &str) -> Result { info!(%alias, "Handling request to delete inlet portal"); if let Some(inlet_to_delete) = self.registry.inlets.remove(alias) { debug!(%alias, "Successfully removed inlet from node registry"); - inlet_to_delete.session.lock().await.stop().await; + if let Err(error) = inlet_to_delete.stop(context).await { + error!(%alias, %error, "Failed to stop inlet"); + } + self.resources().delete_resource(&alias.into()).await?; self.cli_state .delete_tcp_inlet(&self.node_name, alias) .await?; - Ok(InletStatus::new( - inlet_to_delete.bind_addr, - None, + Ok(InletStatusView::new( + inlet_to_delete.bind_address(), alias, - None, - None, - ConnectionStatus::Down, - inlet_to_delete.outlet_addr.to_string(), + &inlet_to_delete.outlet_addresses, inlet_to_delete.privileged, + InletStateSummary::default(), )) } else { error!(%alias, "Inlet not found in the node registry"); @@ -250,96 +281,32 @@ impl NodeManager { } } - pub async fn show_inlet(&self, alias: &str) -> Option { + pub async fn show_inlet(&self, alias: &str) -> Option { info!(%alias, "Handling request to show inlet portal"); if let Some(inlet_info) = self.registry.inlets.get(alias) { - let session = inlet_info.session.lock().await; - let connection_status = session.connection_status(); - let outcome = session.last_outcome(); - drop(session); - if let Some(outcome) = outcome { - if let ReplacerOutputKind::Inlet(status) = outcome { - let address = match &status.worker { - Some(address) => address.address().to_string(), - None => "<>".to_string(), - }; - - Some(InletStatus::new( - inlet_info.bind_addr.to_string(), - address, - alias, - None, - status.route.to_string(), - connection_status, - inlet_info.outlet_addr.to_string(), - inlet_info.privileged, - )) - } else { - panic!("Unexpected outcome: {:?}", outcome) - } - } else { - Some(InletStatus::new( - inlet_info.bind_addr.to_string(), - None, - alias, - None, - None, - connection_status, - inlet_info.outlet_addr.to_string(), - inlet_info.privileged, - )) - } + Some(InletStatusView::new( + inlet_info.bind_address(), + alias, + &inlet_info.outlet_addresses, + inlet_info.privileged, + inlet_info.summary().await, + )) } else { error!(%alias, "Inlet not found in the node registry"); None } } - pub async fn list_inlets(&self) -> Vec { + pub async fn list_inlets(&self) -> Vec { let mut res = vec![]; for (alias, info) in self.registry.inlets.entries() { - let session = info.session.lock().await; - let connection_status = session.connection_status(); - let outcome = session.last_outcome(); - drop(session); - - let status = if let Some(outcome) = outcome { - match &outcome { - ReplacerOutputKind::Inlet(status) => { - let address = match &status.worker { - Some(address) => address.address().to_string(), - None => "<>".to_string(), - }; - - InletStatus::new( - &info.bind_addr, - address, - alias, - None, - status.route.to_string(), - connection_status, - info.outlet_addr.to_string(), - info.privileged, - ) - } - _ => { - panic!("Unexpected outcome: {:?}", outcome) - } - } - } else { - InletStatus::new( - &info.bind_addr, - None, - alias, - None, - None, - connection_status, - info.outlet_addr.to_string(), - info.privileged, - ) - }; - - res.push(status); + res.push(InletStatusView::new( + info.bind_address(), + alias, + &info.outlet_addresses, + info.privileged, + info.summary().await, + )); } res diff --git a/implementations/rust/ockam/ockam_api/src/nodes/service/tcp_inlets/node_manager_worker.rs b/implementations/rust/ockam/ockam_api/src/nodes/service/tcp_inlets/node_manager_worker.rs index ce30fe500c9..cf54c6cfa87 100644 --- a/implementations/rust/ockam/ockam_api/src/nodes/service/tcp_inlets/node_manager_worker.rs +++ b/implementations/rust/ockam/ockam_api/src/nodes/service/tcp_inlets/node_manager_worker.rs @@ -2,7 +2,7 @@ use ockam::{route, Result}; use ockam_core::api::{Error, Response}; use ockam_node::Context; -use crate::nodes::models::portal::{CreateInlet, InletStatus, InletStatusList}; +use crate::nodes::models::portal::{CreateInlet, InletStatusList, InletStatusView}; use crate::nodes::NodeManagerWorker; impl NodeManagerWorker { @@ -16,13 +16,15 @@ impl NodeManagerWorker { &self, ctx: &Context, create_inlet: CreateInlet, - ) -> Result, Response> { + ) -> Result, Response> { let CreateInlet { listen_addr, - outlet_addr, + target_redundancy, + outlet_addresses, alias, authorized, - wait_for_outlet_duration, + ping_timeout, + wait_for_outlet, policy_expression, wait_connection, secure_channel_identifier, @@ -41,10 +43,12 @@ impl NodeManagerWorker { listen_addr, prefix_route, route![], - outlet_addr, + target_redundancy, + outlet_addresses, alias, policy_expression, - wait_for_outlet_duration, + ping_timeout, + wait_for_outlet, authorized, wait_connection, secure_channel_identifier, @@ -64,9 +68,10 @@ impl NodeManagerWorker { pub(crate) async fn delete_inlet( &self, + context: &Context, alias: &str, - ) -> Result, Response> { - match self.node_manager.delete_inlet(alias).await { + ) -> Result, Response> { + match self.node_manager.delete_inlet(context, alias).await { Ok(status) => Ok(Response::ok().body(status)), Err(e) => Err(Response::bad_request_no_request(&format!("{e:?}"))), } @@ -75,7 +80,7 @@ impl NodeManagerWorker { pub(crate) async fn show_inlet( &self, alias: &str, - ) -> Result, Response> { + ) -> Result, Response> { match self.node_manager.show_inlet(alias).await { Some(inlet) => Ok(Response::ok().body(inlet)), None => Err(Response::not_found_no_request(&format!( diff --git a/implementations/rust/ockam/ockam_api/src/nodes/service/tcp_inlets/session_replacer.rs b/implementations/rust/ockam/ockam_api/src/nodes/service/tcp_inlets/session_replacer.rs deleted file mode 100644 index c118405b292..00000000000 --- a/implementations/rust/ockam/ockam_api/src/nodes/service/tcp_inlets/session_replacer.rs +++ /dev/null @@ -1,457 +0,0 @@ -use ockam_transport_tcp::new_certificate_provider_cache; -use std::sync::{Arc, Weak}; -use std::time::Duration; - -use colorful::Colorful; -use tokio::time::timeout; - -use ockam::identity::{Identifier, SecureChannel}; -use ockam::tcp::TcpInletOptions; -use ockam::udp::{UdpPuncture, UdpPunctureNegotiation, UdpTransport}; -use ockam::Result; -use ockam_abac::{Action, PolicyExpression, Resource}; -use ockam_core::errcode::{Kind, Origin}; -use ockam_core::{async_trait, route, Error, IncomingAccessControl, OutgoingAccessControl, Route}; -use ockam_multiaddr::proto::Project as ProjectProto; -use ockam_multiaddr::MultiAddr; -use ockam_node::Context; -use ockam_transport_tcp::TcpInlet; - -use crate::colors::color_primary; -use crate::error::ApiError; -use crate::nodes::connection::Connection; -use crate::nodes::service::certificate_provider::ProjectCertificateProvider; -use crate::nodes::service::SecureChannelType; -use crate::nodes::NodeManager; -use crate::session::replacer::{ - AdditionalSessionReplacer, CurrentInletStatus, ReplacerOutcome, ReplacerOutputKind, - SessionReplacer, MAX_RECOVERY_TIME, -}; -use crate::{fmt_info, fmt_ok, fmt_warn, DefaultAddress}; - -pub(super) struct InletSessionReplacer { - pub(super) node_manager: Weak, - pub(super) udp_transport: Option>, - pub(super) context: Context, - pub(super) listen_addr: String, - pub(super) outlet_addr: MultiAddr, - pub(super) prefix_route: Route, - pub(super) suffix_route: Route, - pub(super) authorized: Option, - pub(super) wait_for_outlet_duration: Duration, - pub(super) resource: Resource, - pub(super) policy_expression: Option, - pub(super) secure_channel_identifier: Option, - pub(super) disable_tcp_fallback: bool, - pub(super) tls_certificate_provider: Option, - - // current status - pub(super) inlet: Option>, - pub(super) main_route: Option, - - pub(super) connection: Option, - - pub(super) additional_secure_channel: Option, - pub(super) udp_puncture: Option, - pub(super) additional_route: Option, - pub(super) privileged: bool, - pub(super) skip_handshake: bool, - pub(super) enable_nagle: bool, -} - -impl InletSessionReplacer { - fn udp_puncture_enabled(&self) -> bool { - self.udp_transport.is_some() - } - - async fn access_control( - &self, - node_manager: &NodeManager, - ) -> Result<( - Arc, - Arc, - )> { - let authority = { - if let Some(p) = self.outlet_addr.first() { - if let Some(p) = p.cast::() { - if let Ok(p) = node_manager - .cli_state - .projects() - .get_project_by_name(&p) - .await - { - Some( - p.authority_identifier() - .ok_or_else(|| ApiError::core("no authority identifier"))?, - ) - } else { - None - } - } else { - None - } - } else { - None - } - } - .or(node_manager.project_authority()); - - node_manager - .access_control( - &self.context, - authority, - self.resource.clone(), - Action::HandleMessage, - self.policy_expression.clone(), - ) - .await - } - - async fn inlet_options(&self, node_manager: &NodeManager) -> Result { - let (incoming_ac, outgoing_ac) = self.access_control(node_manager).await?; - let options = TcpInletOptions::new() - .with_incoming_access_control(incoming_ac) - .with_outgoing_access_control(outgoing_ac) - .set_skip_handshake(self.skip_handshake) - .set_enable_nagle(self.enable_nagle); - - let options = if self.udp_puncture_enabled() && self.disable_tcp_fallback { - options.paused() - } else { - options - }; - - let options = if let Some(tls_provider) = &self.tls_certificate_provider { - options.with_tls_certificate_provider(new_certificate_provider_cache(Arc::new( - ProjectCertificateProvider::new(self.node_manager.clone(), tls_provider.clone()), - ))) - } else { - options - }; - - Ok(options) - } - - async fn create_impl(&mut self, node_manager: &NodeManager) -> Result { - self.pause_inlet(); - self.close_connection(node_manager); - - let connection = node_manager - .make_connection( - &self.context, - &self.outlet_addr, - self.secure_channel_identifier - .clone() - .unwrap_or(node_manager.identifier()), - self.authorized.clone(), - Some(self.wait_for_outlet_duration), - ) - .await?; - let connection = self.connection.insert(connection); - let connection_route = connection.route()?; - let transport_route = connection.transport_route(); - - //we expect a fully normalized MultiAddr - let normalized_route = - self.prefix_route.clone() + connection_route + self.suffix_route.clone(); - - // Drop the last address as it will be appended automatically under the hood - let normalized_stripped_route: Route = normalized_route.clone().modify().pop_back().into(); - - // Finally, attempt to create/update inlet using the new route - let inlet_address = match self.inlet.clone() { - Some(inlet) => { - inlet.unpause(&self.context, normalized_stripped_route.clone())?; - - inlet.processor_address().cloned() - } - None => { - let options = self.inlet_options(node_manager).await?; - let inlet = if self.privileged { - #[cfg(privileged_portals_support)] - { - node_manager - .tcp_transport - .create_privileged_inlet( - self.listen_addr.clone(), - normalized_route.clone(), - options, - ) - .await? - } - #[cfg(not(privileged_portals_support))] - { - return Err(ockam_core::Error::new( - Origin::Node, - Kind::Internal, - "Privileged Portals support is not enabled", - )); - } - } else { - node_manager - .tcp_transport - .create_inlet(self.listen_addr.clone(), normalized_route.clone(), options) - .await? - }; - - let inlet_address = inlet.processor_address().cloned(); - - let inlet = Arc::new(inlet); - self.inlet = Some(inlet); - - inlet_address - } - }; - - self.main_route = Some(normalized_stripped_route); - info!(address = ?inlet_address, - route = %self.main_route.as_ref().map(|r| r.to_string()).unwrap_or("None".to_string()), - "tcp inlet restored"); - - Ok(ReplacerOutcome { - ping_route: transport_route, - kind: ReplacerOutputKind::Inlet(CurrentInletStatus { - worker: inlet_address, - route: normalized_route, - }), - }) - } - - fn pause_inlet(&mut self) { - if let Some(inlet) = self.inlet.as_mut() { - inlet.pause(); - } - } - - fn close_inlet(&mut self) { - if let Some(inlet) = self.inlet.take() { - // The previous inlet worker needs to be stopped: - let result = inlet.stop(&self.context); - - if let Err(err) = result { - error!( - ?err, - "Failed to remove inlet with address {:?}", - inlet.processor_address() - ); - } - } - } - - fn close_connection(&mut self, node_manager: &NodeManager) { - if let Some(connection) = self.connection.take() { - let result = connection.close(&self.context, node_manager); - if let Err(err) = result { - error!(?err, "Failed to close connection"); - } - } - } -} - -#[async_trait] -impl SessionReplacer for InletSessionReplacer { - async fn create(&mut self) -> Result { - // The addressing scheme is very flexible. Typically, the node connects to - // the cloud via a secure channel and with another secure channel via - // relay to the actual outlet on the target node. However, it is also - // possible that there is just a single secure channel used to go directly - // to another node. - let node_manager = if let Some(node_manager) = self.node_manager.upgrade() { - node_manager - } else { - return Err(Error::new( - Origin::Node, - Kind::Cancelled, - "Node manager is dropped. Can't create the Inlet.", - )); - }; - - debug!(%self.outlet_addr, "creating new tcp inlet"); - - // The future is given some limited time to succeed. - // TODO: I believe that every operation inside should have a timeout on its own, the need - // of this timeout is questionable (given it's also not adjustable) - match timeout(MAX_RECOVERY_TIME, self.create_impl(&node_manager)).await { - Err(_) => { - warn!(%self.outlet_addr, "timeout creating new tcp inlet"); - Err(ApiError::core("timeout")) - } - Ok(Err(e)) => { - warn!(%self.outlet_addr, err = %e, "failed to create tcp inlet"); - Err(e) - } - Ok(Ok(route)) => Ok(route), - } - } - - async fn close(&mut self) { - self.main_route = None; - - let node_manager = if let Some(node_manager) = self.node_manager.upgrade() { - node_manager - } else { - warn!("An inlet close was issued after the NodeManager shut down, skipping."); - return; - }; - - self.close_inlet(); - self.close_connection(&node_manager); - } - - async fn on_session_down(&self) { - if let Some(node_manager) = self.node_manager.upgrade() { - node_manager.cli_state.notify_message( - fmt_warn!( - "The TCP Inlet {} listening at {} lost the connection to the TCP Outlet at {}\n", - color_primary(&self.resource.resource_name), - color_primary(&self.listen_addr), - color_primary(&self.outlet_addr) - ) + &fmt_info!("Attempting to reconnect...\n"), - ); - } - } - - async fn on_session_replaced(&self) { - if let Some(node_manager) = self.node_manager.upgrade() { - node_manager.cli_state.notify_message(fmt_ok!( - "The TCP Inlet {} listening at {} has restored the connection to the TCP Outlet at {}\n", - color_primary(&self.resource.resource_name), - color_primary(&self.listen_addr), - color_primary(&self.outlet_addr) - )); - } - } -} - -#[async_trait] -impl AdditionalSessionReplacer for InletSessionReplacer { - async fn create_additional(&mut self) -> Result { - let node_manager = if let Some(node_manager) = self.node_manager.upgrade() { - node_manager - } else { - return Err(Error::new( - Origin::Node, - Kind::Cancelled, - "Node manager is dropped. Can't start UDP puncture for an Inlet.", - )); - }; - - let udp_transport = self - .udp_transport - .as_ref() - .ok_or_else(|| { - Error::new( - Origin::Node, - Kind::Invalid, - "Couldn't create inlet with puncture", - ) - })? - .clone(); - - let main_route = if let Some(connection) = self.connection.as_ref() { - connection.route()? - } else { - return Err(Error::new( - Origin::Api, - Kind::Internal, - "Error while creating additional session. Connection is absent", - )); - }; - - let inlet = if let Some(inlet) = self.inlet.clone() { - inlet - } else { - return Err(Error::new( - Origin::Api, - Kind::Internal, - "Error while creating additional session. Inlet is absent", - )); - }; - - let main_route: Route = main_route.modify().pop_back().into(); - - let additional_sc_route = main_route.clone() + DefaultAddress::SECURE_CHANNEL_LISTENER; - - let additional_sc = node_manager - .create_secure_channel_internal( - &self.context, - additional_sc_route, - self.secure_channel_identifier - .as_ref() - .unwrap_or(&node_manager.identifier()), - self.authorized.clone().map(|authorized| vec![authorized]), - None, - // TODO: Have a dedicated timeout - Some(Duration::from_secs(10)), - SecureChannelType::KeyExchangeAndMessages, - ) - .await?; - let additional_sc = self.additional_secure_channel.insert(additional_sc); - - let rendezvous_route = route![ - DefaultAddress::get_rendezvous_server_address(), - DefaultAddress::RENDEZVOUS_SERVICE - ]; - - let puncture = UdpPunctureNegotiation::start_negotiation( - &self.context, - main_route + DefaultAddress::UDP_PUNCTURE_NEGOTIATION_LISTENER, - &udp_transport, - rendezvous_route, - // TODO: Have a dedicated timeout - Duration::from_secs(10), - ) - .await?; - let puncture = self.udp_puncture.insert(puncture); - - // TODO: Have a dedicated timeout duration - puncture.wait_for_puncture(Duration::from_secs(10)).await?; - - info!("Updating route to UDP"); - - additional_sc.update_remote_node_route(route![puncture.sender_address()])?; - - let new_route = route![additional_sc.clone()]; - inlet.unpause(&self.context, new_route.clone())?; - - self.additional_route = Some(new_route.clone()); - - Ok(new_route) - } - - async fn close_additional(&mut self, enable_fallback: bool) { - self.additional_route = None; - - if let Some(inlet) = self.inlet.as_ref() { - match self.main_route.as_ref() { - Some(main_route) if enable_fallback => { - // Switch Inlet to the main route - let res = inlet.unpause(&self.context, main_route.clone()); - - if let Some(err) = res.err() { - error!("Error switching Inlet to the main route {}", err); - } - } - _ => { - inlet.pause(); - } - } - } - - if let Some(secure_channel) = self.additional_secure_channel.take() { - let res = self.context.stop_address(secure_channel.as_ref()); - - if let Some(err) = res.err() { - error!("Error closing secure channel {}", err); - } - } - - if let Some(puncture) = self.udp_puncture.take() { - let res = puncture.stop(&self.context); - - if let Some(err) = res.err() { - error!("Error stopping puncture {}", err); - } - } - } -} diff --git a/implementations/rust/ockam/ockam_api/src/nodes/service/tcp_inlets/session_replacer/mod.rs b/implementations/rust/ockam/ockam_api/src/nodes/service/tcp_inlets/session_replacer/mod.rs new file mode 100644 index 00000000000..9c0e0755127 --- /dev/null +++ b/implementations/rust/ockam/ockam_api/src/nodes/service/tcp_inlets/session_replacer/mod.rs @@ -0,0 +1,452 @@ +use ockam_transport_tcp::new_certificate_provider_cache; +use std::sync::{Arc, Weak}; +use std::time::Duration; + +use tokio::time::timeout; + +use crate::error::ApiError; +use crate::nodes::connection::Connection; +use crate::nodes::service::certificate_provider::ProjectCertificateProvider; +use crate::nodes::service::tcp_inlets::access_control::inlet_access_control; +use crate::nodes::service::tcp_inlets::terminal_notifier::TcpInletNotifier; +use crate::nodes::service::SecureChannelType; +use crate::nodes::NodeManager; +use crate::session::replacer::{ + ActiveInletRoute, AdditionalSessionReplacer, ReplacerOutcome, ReplacerOutputKind, + SessionReplacer, MAX_RECOVERY_TIME, +}; +use crate::DefaultAddress; +use ockam::identity::{Identifier, SecureChannel}; +use ockam::tcp::TcpInletOptions; +use ockam::udp::{UdpPuncture, UdpPunctureNegotiation, UdpTransport}; +use ockam::Result; +use ockam_abac::{PolicyExpression, Resource}; +use ockam_core::errcode::{Kind, Origin}; +use ockam_core::{async_trait, route, Error, Route, TryClone}; +use ockam_multiaddr::MultiAddr; +use ockam_node::Context; +use ockam_transport_tcp::TcpInlet; +use selector::OutletMultiAddrSelector; + +pub(super) mod selector; + +pub(super) struct InletParameters { + pub(super) terminal_notifier: TcpInletNotifier, + pub(super) outlet_address_selector: OutletMultiAddrSelector, + pub(super) prefix_route: Route, + pub(super) suffix_route: Route, + pub(super) authorized: Option, + pub(super) wait_for_outlet_duration: Duration, + pub(super) resource: Resource, + pub(super) policy_expression: Option, + pub(super) secure_channel_identifier: Option, + pub(super) disable_tcp_fallback: bool, + pub(super) tls_certificate_provider: Option, + pub(super) enable_nagle: bool, + pub(super) skip_handshake: bool, +} + +/// The status of the additional Inlet session. +pub(super) struct AdditionalInletSessionStatus { + pub(super) secure_channel: SecureChannel, + pub(super) udp_puncture: UdpPuncture, +} + +/// The status of the Inlet session. +pub(super) struct InletSessionStatus { + pub(super) main_route: Route, + pub(super) connection: Connection, + pub(super) last_route_key: String, + pub(super) additional: Option, + pub(super) original_multiaddr: MultiAddr, +} + +pub(super) struct InletSessionReplacer { + pub(super) context: Context, + pub(super) node_manager: Weak, + pub(super) inlet: Arc, + pub(super) udp_transport: Option>, + pub(super) status: Option, + pub(super) parameters: Arc, +} + +impl InletSessionReplacer { + /// Returns a standalone instance of the replacer, it shared the dependencies but not + /// the status + pub(super) fn standalone(&self) -> Result { + Ok(InletSessionReplacer { + context: self.context.try_clone()?, + node_manager: self.node_manager.clone(), + inlet: self.inlet.clone(), + udp_transport: self.udp_transport.clone(), + parameters: self.parameters.clone(), + status: None, + }) + } + + fn udp_puncture_enabled(&self) -> bool { + self.udp_transport.is_some() + } + + async fn inlet_options( + &self, + node_manager: &NodeManager, + original_multi_addr: &MultiAddr, + ) -> Result { + let (incoming_ac, outgoing_ac) = inlet_access_control( + &self.context, + node_manager, + &self.parameters, + Some(original_multi_addr), + ) + .await?; + + let options = TcpInletOptions::new() + .set_skip_handshake(self.parameters.skip_handshake) + .set_enable_nagle(self.parameters.enable_nagle) + .with_incoming_access_control(incoming_ac) + .with_outgoing_access_control(outgoing_ac); + + let options = if self.udp_puncture_enabled() && self.parameters.disable_tcp_fallback { + options.paused() + } else { + options + }; + + let options = if let Some(tls_provider) = &self.parameters.tls_certificate_provider { + options.with_tls_certificate_provider(new_certificate_provider_cache(Arc::new( + ProjectCertificateProvider::new(self.node_manager.clone(), tls_provider.clone()), + ))) + } else { + options + }; + + Ok(options) + } + + async fn create_impl(&mut self, node_manager: &NodeManager) -> Result { + self.close().await; + + let selected_outlet_addr = self + .parameters + .outlet_address_selector + .select( + &self.context, + &self.inlet, + &node_manager.cli_state.projects(), + ) + .await?; + + debug!( + "trying to connect to outlet using {} (derived from {})", + selected_outlet_addr.selected, selected_outlet_addr.original + ); + + let result = node_manager + .make_connection( + &self.context, + &selected_outlet_addr.selected, + self.parameters + .secure_channel_identifier + .clone() + .unwrap_or(node_manager.identifier()), + self.parameters + .authorized + .clone() + .or_else(|| selected_outlet_addr.expected_identifier.clone()), + Some(self.parameters.wait_for_outlet_duration), + ) + .await; + let connection = match result { + Ok(connection) => connection, + Err(error) => { + warn!(original = %selected_outlet_addr.original, selected=%selected_outlet_addr.selected, "failed to instantiate connection: {error:?}"); + return Err(error); + } + }; + + let connection_route = connection.route()?; + let transport_route = connection.transport_route(); + + //we expect a fully normalized MultiAddr + let normalized_route = self.parameters.prefix_route.clone() + + connection_route + + self.parameters.suffix_route.clone(); + + let options = self + .inlet_options(node_manager, &selected_outlet_addr.original) + .await?; + + let (original_multiaddr, route_key) = + selected_outlet_addr.confirm(&self.context, normalized_route.clone(), options)?; + + // Drop the last address as it will be appended automatically under the hood + let normalized_stripped_route: Route = normalized_route.clone().modify().pop_back().into(); + + let inlet_address = self.inlet.processor_address().cloned(); + + let main_route = normalized_stripped_route; + info!(address = ?inlet_address, route = %main_route, "tcp inlet restored"); + + // TODO: keep additional connection open? + self.status = Some(InletSessionStatus { + main_route, + connection, + original_multiaddr, + last_route_key: route_key, + additional: None, + }); + + Ok(ReplacerOutcome { + ping_route: transport_route, + kind: ReplacerOutputKind::Inlet(ActiveInletRoute { + route: normalized_route, + }), + }) + } +} + +#[async_trait] +impl SessionReplacer for InletSessionReplacer { + async fn create(&mut self) -> Result { + // The addressing scheme is very flexible. Typically, the node connects to + // the cloud via a secure channel and with another secure channel via + // relay to the actual outlet on the target node. However, it is also + // possible that there is just a single secure channel used to go directly + // to another node. + let node_manager = if let Some(node_manager) = self.node_manager.upgrade() { + node_manager + } else { + return Err(Error::new( + Origin::Node, + Kind::Cancelled, + "Node manager is dropped. Can't create the Inlet.", + )); + }; + + debug!(%self.parameters.outlet_address_selector, "creating new tcp inlet"); + + // The future is given some limited time to succeed. + // TODO: I believe that every operation inside should have a timeout on its own, the need + // of this timeout is questionable (given it's also not adjustable) + match timeout(MAX_RECOVERY_TIME, self.create_impl(&node_manager)).await { + Err(_) => { + warn!(outlet_multiaddresses = %self.parameters.outlet_address_selector, "timeout creating new tcp inlet"); + Err(ApiError::core("timeout")) + } + Ok(Err(e)) => { + warn!(outlet_multiaddresses = %self.parameters.outlet_address_selector, err = %e, "failed to create tcp inlet"); + Err(e) + } + Ok(Ok(route)) => Ok(route), + } + } + + async fn close(&mut self) { + if let Some(status) = self.status.take() { + self.inlet.remove_route(&status.last_route_key); + + let node_manager = if let Some(node_manager) = self.node_manager.upgrade() { + node_manager + } else { + warn!("An inlet close was issued after the NodeManager shut down, skipping."); + return; + }; + + let result = status.connection.close(&self.context, &node_manager); + if let Err(err) = result { + error!(?err, "Failed to close connection"); + } + } + } + + async fn on_session_down(&self) { + if let Some(status) = &self.status { + self.parameters + .terminal_notifier + .on_session_down(&status.original_multiaddr) + .await; + } + } + + async fn on_session_replaced(&self) { + if let Some(status) = &self.status { + self.parameters + .terminal_notifier + .on_session_replaced(&status.original_multiaddr) + .await; + } + } +} + +#[async_trait] +impl AdditionalSessionReplacer for InletSessionReplacer { + async fn create_additional(&mut self) -> Result { + let node_manager = if let Some(node_manager) = self.node_manager.upgrade() { + node_manager + } else { + return Err(Error::new( + Origin::Node, + Kind::Cancelled, + "Node manager is dropped. Can't start UDP puncture for an Inlet.", + )); + }; + + let udp_transport = self + .udp_transport + .as_ref() + .ok_or_else(|| { + Error::new( + Origin::Node, + Kind::Invalid, + "Couldn't create inlet with puncture", + ) + })? + .clone(); + + let status = if let Some(status) = &mut self.status { + status + } else { + return Err(Error::new( + Origin::Api, + Kind::Internal, + "Error while creating additional session. Connection is absent", + )); + }; + + let transport_route = status.connection.transport_route(); + let additional_sc_route = transport_route.clone() + DefaultAddress::SECURE_CHANNEL_LISTENER; + + let additional_sc = node_manager + .create_secure_channel_internal( + &self.context, + additional_sc_route, + self.parameters + .secure_channel_identifier + .as_ref() + .unwrap_or(&node_manager.identifier()), + self.parameters + .authorized + .clone() + .map(|authorized| vec![authorized]), + None, + // TODO: Have a dedicated timeout + Some(Duration::from_secs(10)), + SecureChannelType::KeyExchangeAndMessages, + ) + .await?; + + let rendezvous_route = route![ + DefaultAddress::get_rendezvous_server_address(), + DefaultAddress::RENDEZVOUS_SERVICE + ]; + + let mut udp_puncture = UdpPunctureNegotiation::start_negotiation( + &self.context, + transport_route + DefaultAddress::UDP_PUNCTURE_NEGOTIATION_LISTENER, + &udp_transport, + rendezvous_route, + // TODO: Have a dedicated timeout + Duration::from_secs(10), + ) + .await?; + + // TODO: Have a dedicated timeout duration + udp_puncture + .wait_for_puncture(Duration::from_secs(10)) + .await?; + + info!("Updating route to UDP"); + + additional_sc.update_remote_node_route(route![udp_puncture.sender_address()])?; + + let additional_route = route![ + additional_sc.clone(), + status.connection.route()?.recipient()?.clone() + ]; + + status.additional = Some(AdditionalInletSessionStatus { + secure_channel: additional_sc, + udp_puncture, + }); + + // drop the mutable borrow + let status: &InletSessionStatus = self.status.as_ref().unwrap(); + + let options = self + .inlet_options(&node_manager, &status.original_multiaddr) + .await?; + + self.inlet.update_outlet_route_and_unpause( + &self.context, + &status.last_route_key, + additional_route.clone(), + options, + )?; + + Ok(additional_route) + } + + async fn close_additional(&mut self, enable_fallback: bool) { + let status = if let Some(status) = &mut self.status { + status + } else { + return; + }; + + let additional = if let Some(additional) = status.additional.take() { + additional + } else { + return; + }; + + if enable_fallback { + let node_manager = if let Some(node_manager) = self.node_manager.upgrade() { + node_manager + } else { + warn!("TCP Inlet fallback to the main route was requested after the NodeManager shut down, skipping."); + return; + }; + + // turn mutable borrow into immutable + let status: &InletSessionStatus = self.status.as_ref().unwrap(); + + let options = match self + .inlet_options(&node_manager, &status.original_multiaddr) + .await + { + Ok(options) => options, + Err(err) => { + error!("Error creating TCP Inlet fallback options {}", err); + return; + } + }; + + // Switch Inlet to the main route + let res = self.inlet.update_outlet_route_and_unpause( + &self.context, + &status.last_route_key, + status.main_route.clone(), + options, + ); + if let Some(err) = res.err() { + error!("Error switching Inlet to the main route {}", err); + } + } else { + // No main_route or no fallback + self.inlet.pause_route(&status.last_route_key); + } + + let res = self + .context + .stop_address(additional.secure_channel.as_ref()); + if let Some(err) = res.err() { + error!("Error closing secure channel {}", err); + } + + let res = additional.udp_puncture.stop(&self.context); + if let Some(err) = res.err() { + error!("Error stopping puncture {}", err); + } + } +} diff --git a/implementations/rust/ockam/ockam_api/src/nodes/service/tcp_inlets/session_replacer/selector.rs b/implementations/rust/ockam/ockam_api/src/nodes/service/tcp_inlets/session_replacer/selector.rs new file mode 100644 index 00000000000..ff16bd66192 --- /dev/null +++ b/implementations/rust/ockam/ockam_api/src/nodes/service/tcp_inlets/session_replacer/selector.rs @@ -0,0 +1,540 @@ +use crate::cli_state::projects::Projects; +use ockam::identity::Identifier; +use ockam_core::compat::rand::random_string; +use ockam_core::errcode::{Kind, Origin}; +use ockam_core::{Error, Route}; +use ockam_multiaddr::proto::{DnsAddr, Project}; +use ockam_multiaddr::{MultiAddr, Protocol}; +use ockam_node::Context; +use ockam_transport_tcp::{TcpInlet, TcpInletOptions}; +use rand::prelude::SliceRandom; +use std::fmt::Display; +use std::net::SocketAddr; +use std::sync::Arc; +use std::time::Duration; +use tokio::sync::Mutex as AsyncMutex; +use tokio::time::timeout; + +pub struct ReservedMultiAddr { + pub original: MultiAddr, + pub selected: MultiAddr, + pub route_key: String, + pub expected_identifier: Option, + inlet: Arc, + confirmed: bool, +} + +impl ReservedMultiAddr { + /// The connection was successfully instantiated, and the inlet was updated with the new route. + /// Returns the original MultiAddr and the route_key. + pub fn confirm( + mut self, + context: &Context, + route: Route, + options: TcpInletOptions, + ) -> ockam_core::Result<(MultiAddr, String)> { + self.inlet + .update_outlet_route_and_unpause(context, &self.route_key, route, options)?; + self.confirmed = true; + Ok((self.original.clone(), self.route_key.clone())) + } +} + +impl Drop for ReservedMultiAddr { + fn drop(&mut self) { + if !self.confirmed { + // If the connection fails, or other errors occur before being confirmed, the route is removed. + self.inlet.remove_route(&self.selected.to_string()); + } + } +} + +const CACHE_DURATION: Duration = Duration::from_secs(20); + +#[derive(Clone, Debug)] +struct Variant { + address: MultiAddr, + route_key: String, +} + +#[derive(Clone, Debug)] +struct CacheEntry { + original: MultiAddr, + variants: Vec, + // in case the original is a project, we need to enforce + // identity verification also when the MultiAddr is resolved + // into /ip4/... + expected_identifier: Option, +} + +/// Cache for the resolved MultiAddr variants +#[derive(Clone, Debug)] +struct VariantsCache { + /// The list of original MultiAddr and the resolved MultiAddr variants + entries: Vec, + + /// Keep a list of previous variants to preserve the route_key mappings. + /// This is mainly for DNS records that may rotate over time. + previous_variants: Vec, + + /// The timestamp when the cache was created + timestamp: std::time::Instant, +} + +impl VariantsCache { + /// Returns true if the cache is still valid and has at least one variant available. + fn is_valid(&self) -> bool { + // TODO: use the lowest DNS TTL as the cache duration + self.timestamp.elapsed() <= CACHE_DURATION + && self.entries.iter().any(|entry| !entry.variants.is_empty()) + } + + /// Returns the route_key of the provided MultiAddr, if it exists. + /// The relative variant is also removed from the cache. + fn take_variant_route_key( + &mut self, + original: &MultiAddr, + variant: &MultiAddr, + ) -> Option { + self.entries + .iter_mut() + .find(|entry| &entry.original == original) + .and_then(|entry| { + let index = entry.variants.iter().position(|v| &v.address == variant); + index.map(|index| { + let variant = entry.variants.remove(index); + variant.route_key + }) + }) + .or_else(|| { + let index = self + .previous_variants + .iter() + .position(|v| &v.address == variant); + index.map(|index| { + let variant = self.previous_variants.remove(index); + variant.route_key + }) + }) + } + + /// Returns a list of every remaining (unused) MultiAddr variant with the relative route_key. + /// This is to keep the mapping consistent in case DNS records rotate. + pub(crate) fn take_unused_variants(self) -> Vec { + self.entries + .into_iter() + .flat_map(|entry| entry.variants) + .chain(self.previous_variants) + .collect() + } + + /// Selects a random MultiAddr from the cache, excluding the provided route_keys. + /// If all variants are exhausted, a random one is returned. + fn select_one_random_except( + &self, + except: &[String], + ) -> Option<(&MultiAddr, Option, Variant)> { + let mut rng = rand::thread_rng(); + let variants: Vec<(&MultiAddr, &Option, &Variant)> = self + .entries + .iter() + .flat_map(|entry| { + entry + .variants + .iter() + .map(move |variant| (&entry.original, &entry.expected_identifier, variant)) + }) + .filter(|(_original, _identifier, variant)| !except.contains(&variant.route_key)) + .collect(); + + if variants.is_empty() { + // in case we exhausted all variants, we return a random one with a random route_key + let view: Vec<(&MultiAddr, &Option, &Variant)> = + self.entries + .iter() + .flat_map(|entry| { + entry.variants.iter().map(move |variant| { + (&entry.original, &entry.expected_identifier, variant) + }) + }) + .collect(); + + view.choose(&mut rng) + .copied() + .map(|(original, expected_identifier, variant)| { + ( + original, + expected_identifier.clone(), + Variant { + address: variant.address.clone(), + route_key: random_string(), + }, + ) + }) + } else { + variants + .choose(&mut rng) + .copied() + .map(|(original, expected_identifier, variant)| { + (original, expected_identifier.clone(), variant.clone()) + }) + } + } +} + +#[derive(Clone)] +pub(in super::super) struct OutletMultiAddrSelector { + pub(in super::super) outlet_addresses: Vec, + cache: Arc>>, +} + +impl OutletMultiAddrSelector { + pub fn new(outlet_addresses: Vec) -> Self { + Self { + outlet_addresses, + cache: Default::default(), + } + } + + /// Creates a new [VariantsCache] complete with all variants from the provided outlet addresses. + /// The previous cache must be provided, when present, to preserve the `route_key` mappings. + async fn create_complete_cache( + &self, + projects: &Projects, + mut previous_cache: Option, + ) -> VariantsCache { + let timestamp = std::time::Instant::now(); + let mut cache_entries: Vec = Vec::with_capacity(self.outlet_addresses.len()); + + for original in &self.outlet_addresses { + let (multiaddr, project_identifier) = + match Self::convert_project_multiaddr(projects, original).await { + Ok(multiaddr) => multiaddr, + Err(error) => { + warn!("Skipping MultiAddr {} due an error: {}", original, error); + continue; + } + }; + + let multiaddrs = match Self::expand_dns_entries(&multiaddr).await { + Ok(multiaddrs) => multiaddrs, + Err(error) => { + warn!("Skipping MultiAddr {} due an error: {}", multiaddr, error); + continue; + } + }; + + cache_entries.push(CacheEntry { + original: original.clone(), + variants: multiaddrs + .into_iter() + .map(|variant| Variant { + route_key: previous_cache + .as_mut() + .and_then(|c| c.take_variant_route_key(original, &variant)) + .unwrap_or_else(random_string), + address: variant, + }) + .collect(), + expected_identifier: project_identifier, + }); + } + + let previous_variants = if let Some(previous_cache) = previous_cache { + previous_cache.take_unused_variants() + } else { + Vec::new() + }; + + VariantsCache { + entries: cache_entries, + timestamp, + previous_variants, + } + } + + /// Selects a MultiAddr from the list of outlet addresses and reserves the route to avoid + /// concurrent connections to the same outlet. + pub(super) async fn select( + &self, + context: &Context, + inlet: &Arc, + projects: &Projects, + ) -> ockam_core::Result { + let mut guard = self.cache.lock().await; + let cache = { + match &*guard { + Some(cache) if cache.is_valid() => {} + Some(_cache) => { + let previous = guard.take(); + *guard = Some(self.create_complete_cache(projects, previous).await); + } + None => { + *guard = Some(self.create_complete_cache(projects, None).await); + } + }; + guard.as_ref().unwrap() + }; + + loop { + let route_keys = inlet.list_all_route_keys(); + let (original, expected_identifier, selected) = + match cache.select_one_random_except(&route_keys) { + Some(variant) => variant, + None => { + let delay = Duration::from_secs(15); + warn!( + "No available MultiAddr found for inlet {}, retrying in {} seconds", + inlet, + delay.as_secs() + ); + tokio::time::sleep(delay).await; + continue; + } + }; + + if inlet.reserve_route_key(context, selected.route_key.clone())? { + break Ok(ReservedMultiAddr { + inlet: inlet.clone(), + original: original.clone(), + selected: selected.address, + confirmed: false, + route_key: selected.route_key, + expected_identifier, + }); + } else { + trace!("Route already taken by another thread, retrying..."); + } + } + } + + /// Converts /project/ into its MultiAddr representation + async fn convert_project_multiaddr( + projects: &Projects, + multiaddr: &MultiAddr, + ) -> Result<(MultiAddr, Option), Error> { + // TODO: verify: /project/default gets resolved with the /service/api rather than /secure/api: it shouldn't work + if let Some((dnsaddr, position)) = multiaddr.find(&[Project::CODE.into()]) { + let project_name = dnsaddr.cast::().ok_or_else(|| { + Error::new( + Origin::Channel, + Kind::Invalid, + "invalid project name in outlet address", + ) + })?; + let project = projects.get_project_by_name(&project_name).await?; + + let (before, _) = multiaddr.split(position); + let (_, after) = multiaddr.split(position + 1); + + let mut combined = MultiAddr::new(before.registry().clone()); + combined.try_extend(before.iter())?; + combined.try_extend(project.project_multiaddr()?.into_iter())?; + combined.try_extend(after.iter())?; + + Ok((combined, project.project_identifier())) + } else { + Ok((multiaddr.clone(), None)) + } + } + + /// Converts /dnsaddr/ into a list of MultiAddr with resolved IP addresses. + /// Returns an error if resolution fails or empty + async fn expand_dns_entries(multiaddr: &MultiAddr) -> Result, Error> { + if let Some((dnsaddr, position)) = multiaddr.find(&[DnsAddr::CODE.into()]) { + let host: &str = &dnsaddr.cast::().ok_or_else(|| { + Error::new( + Origin::Channel, + Kind::Invalid, + "invalid DNS address in outlet address", + ) + })?; + + let resolved: Vec = timeout( + Duration::from_millis(1_000), + // tokio does require the port to be present even when it is not used + tokio::net::lookup_host(format!("{host}:0")), + ) + .await + .map_err(|_timeout| { + Error::new( + Origin::Channel, + Kind::Invalid, + "Timeout while resolving DNS address".to_string(), + ) + })? + .map_err(|error| { + Error::new( + Origin::Channel, + Kind::Invalid, + format!("Error while resolving DNS address: {error:?}"), + ) + })? + .collect(); + + if resolved.is_empty() { + return Err(Error::new( + Origin::Channel, + Kind::Invalid, + "DNS address resolved to an empty list", + )); + } + + let mut entries = Vec::with_capacity(resolved.len()); + + for socket_address in resolved { + let (before, _) = multiaddr.split(position); + let (_, after) = multiaddr.split(position + 1); + + let mut combined = MultiAddr::new(before.registry().clone()); + let ip_multiaddr: MultiAddr = match socket_address { + SocketAddr::V4(address) => format!("/ip4/{}", address.ip()).parse()?, + SocketAddr::V6(address) => format!("/ip6/{}", address.ip()).parse()?, + }; + combined.try_extend(before.iter())?; + combined.try_extend(ip_multiaddr.into_iter())?; + combined.try_extend(after.iter())?; + + entries.push(combined); + } + + Ok(entries) + } else { + Ok(vec![multiaddr.clone()]) + } + } +} + +impl Display for OutletMultiAddrSelector { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!( + f, + "{}", + self.outlet_addresses + .iter() + .map(|addr| addr.to_string()) + .collect::>() + .join(", ") + ) + } +} + +#[cfg(test)] +mod test { + use super::OutletMultiAddrSelector; + use super::*; + use crate::orchestrator::project::models::ProjectModel; + use crate::test_utils::start_manager_for_tests; + + #[ockam::test] + async fn expand_project_and_dns(context: &mut Context) -> ockam::Result<()> { + let handler = start_manager_for_tests(context, None, None).await?; + + let multiaddresses: Vec = vec![ + "/project/default/service/outlet".parse()?, + "/secure/api/service/outlet".parse()?, + ]; + + let projects = handler.cli_state.projects(); + + projects + .store_project( + crate::orchestrator::project::Project::import(ProjectModel { + id: "id".to_string(), + name: "default".to_string(), + space_name: "space".to_string(), + access_route: "/dnsaddr/orchestrator.ockam.io/tcp/1234/service/api".to_string(), + space_id: "space-id".to_string(), + ..Default::default() + }) + .await?, + ) + .await?; + + let cache = OutletMultiAddrSelector::new(multiaddresses.clone()) + .create_complete_cache(&projects, None) + .await; + + assert_eq!(cache.entries.len(), 2); + + assert_eq!( + cache.entries[0].original.to_string(), + "/project/default/service/outlet" + ); + assert_eq!(cache.entries[0].variants.len(), 2); + let variants = &cache.entries[0].variants; + + let address = variants[0].address.to_string(); + assert!(address.starts_with("/ip4/")); + assert!(address.ends_with("/tcp/1234/service/api/service/outlet")); + + let address = variants[1].address.to_string(); + assert!(address.starts_with("/ip4/")); + assert!(address.ends_with("/tcp/1234/service/api/service/outlet")); + + assert_eq!( + cache.entries[1].original.to_string(), + "/secure/api/service/outlet" + ); + assert_eq!(cache.entries[1].variants.len(), 1); + assert_eq!( + cache.entries[1].variants[0].address.to_string(), + "/secure/api/service/outlet" + ); + + let mut previous_cache = cache; + let mut cache = OutletMultiAddrSelector::new(multiaddresses) + .create_complete_cache(&projects, Some(previous_cache.clone())) + .await; + + // sort both cache and previous cache to compare them + previous_cache.entries.iter_mut().for_each(|entry| { + entry.variants.sort_by(|a, b| a.route_key.cmp(&b.route_key)); + }); + + cache.entries.iter_mut().for_each(|entry| { + entry.variants.sort_by(|a, b| a.route_key.cmp(&b.route_key)); + }); + + // verify that the newer cache keeps the same route_keys + assert_eq!(cache.entries[0].variants.len(), 2); + let variants = &cache.entries[0].variants; + let previous_variants = &previous_cache.entries[0].variants; + assert_eq!(variants[0].route_key, previous_variants[0].route_key); + assert_eq!(variants[1].route_key, previous_variants[1].route_key); + + assert_eq!(cache.entries[1].variants.len(), 1); + assert_eq!( + cache.entries[1].variants[0].route_key, + previous_cache.entries[1].variants[0].route_key + ); + + Ok(()) + } + + #[ockam::test] + async fn verify_unused_route_key_is_kept(context: &mut Context) -> ockam::Result<()> { + let handler = start_manager_for_tests(context, None, None).await?; + + let projects = handler.cli_state.projects(); + + let cache = OutletMultiAddrSelector::new(vec!["/secure/api/service/outlet1".parse()?]) + .create_complete_cache(&projects, None) + .await; + + assert_eq!(cache.entries.len(), 1); + let route_key1 = cache.entries[0].variants[0].route_key.clone(); + + let cache = OutletMultiAddrSelector::new(vec!["/secure/api/service/outlet2".parse()?]) + .create_complete_cache(&projects, Some(cache)) + .await; + + let cache = OutletMultiAddrSelector::new(vec!["/secure/api/service/outlet1".parse()?]) + .create_complete_cache(&projects, Some(cache)) + .await; + + assert_eq!(route_key1, cache.entries[0].variants[0].route_key); + + Ok(()) + } +} diff --git a/implementations/rust/ockam/ockam_api/src/nodes/service/tcp_inlets/terminal_notifier.rs b/implementations/rust/ockam/ockam_api/src/nodes/service/tcp_inlets/terminal_notifier.rs new file mode 100644 index 00000000000..4f9e1d89869 --- /dev/null +++ b/implementations/rust/ockam/ockam_api/src/nodes/service/tcp_inlets/terminal_notifier.rs @@ -0,0 +1,63 @@ +use crate::colors::color_primary; +use crate::nodes::NodeManager; +use crate::{fmt_info, fmt_ok, fmt_warn}; +use colorful::Colorful; +use ockam_multiaddr::MultiAddr; +use std::sync::Weak; + +pub(super) struct TcpInletNotifier { + node_manager: Weak, + inlet_name: String, +} + +impl TcpInletNotifier { + pub(super) fn new(node_manager: Weak, inlet_name: String) -> Self { + Self { + node_manager, + inlet_name, + } + } + pub(super) async fn on_session_down(&self, outlet_address: &MultiAddr) { + if let Some(node_manager) = self.node_manager.upgrade() { + if let Some(inlet_handle) = node_manager.registry.inlets.get(&self.inlet_name) { + let message = fmt_warn!( + "The TCP Inlet {} listening at {} lost the connection to the TCP Outlet at {}\n", + color_primary(&self.inlet_name), + color_primary(inlet_handle.tcp_inlet.socket_address()), + color_primary(outlet_address) + ); + + let summary = inlet_handle.summary().await; + + let message = if summary.active_routes.is_empty() { + message + &fmt_info!("Every route to TCP Outlet is disconnected.\n",) + } else { + message + + &fmt_info!( + "There are still {} routes connected to the TCP Outlet\n", + summary.active_routes.len() + ) + }; + + node_manager + .cli_state + .notify_message(message + &fmt_info!("Attempting to reconnect...\n")); + } + } + } + + pub(super) async fn on_session_replaced(&self, outlet_address: &MultiAddr) { + if let Some(node_manager) = self.node_manager.upgrade() { + if let Some(inlet_handle) = node_manager.registry.inlets.get(&self.inlet_name) { + let message = fmt_ok!( + "The TCP Inlet {} listening at {} has restored the connection to the TCP Outlet at {}\n", + color_primary(&self.inlet_name), + color_primary(inlet_handle.tcp_inlet.socket_address()), + color_primary(outlet_address) + ); + + node_manager.cli_state.notify_message(message); + } + } + } +} diff --git a/implementations/rust/ockam/ockam_api/src/nodes/service/worker.rs b/implementations/rust/ockam/ockam_api/src/nodes/service/worker.rs index c4215a068e8..4d99437fb31 100644 --- a/implementations/rust/ockam/ockam_api/src/nodes/service/worker.rs +++ b/implementations/rust/ockam/ockam_api/src/nodes/service/worker.rs @@ -225,7 +225,7 @@ impl NodeManagerWorker { encode_response(&header, self.delete_outlet(&addr).await) } (Delete, ["node", "inlet", alias]) => { - encode_response(&header, self.delete_inlet(alias).await) + encode_response(&header, self.delete_inlet(ctx, alias).await) } (Delete, ["node", "portal"]) => todo!(), diff --git a/implementations/rust/ockam/ockam_api/src/session/replacer.rs b/implementations/rust/ockam/ockam_api/src/session/replacer.rs index 55fdccf643e..498296090b2 100644 --- a/implementations/rust/ockam/ockam_api/src/session/replacer.rs +++ b/implementations/rust/ockam/ockam_api/src/session/replacer.rs @@ -1,7 +1,7 @@ use std::time::Duration; use ockam::remote::RemoteRelayInfo; -use ockam_core::{async_trait, Address, Result, Route}; +use ockam_core::{async_trait, Result, Route}; //most sessions replacer are dependent on the node manager, if many session //fails concurrently, which is the common scenario we need extra time @@ -27,14 +27,14 @@ pub trait AdditionalSessionReplacer: Send + Sync + 'static { } #[derive(Debug, Clone)] -pub struct CurrentInletStatus { +pub struct ActiveInletRoute { pub route: Route, - pub worker: Option
, + // TODO: add an established timestamp, maybe some counters? } #[derive(Debug, Clone)] pub enum ReplacerOutputKind { - Inlet(CurrentInletStatus), + Inlet(ActiveInletRoute), Relay(RemoteRelayInfo), } diff --git a/implementations/rust/ockam/ockam_api/src/session/session.rs b/implementations/rust/ockam/ockam_api/src/session/session.rs index 8b9dccc444e..debf99bc86a 100644 --- a/implementations/rust/ockam/ockam_api/src/session/session.rs +++ b/implementations/rust/ockam/ockam_api/src/session/session.rs @@ -1,13 +1,13 @@ -use rand::random; -use std::sync::atomic::{AtomicBool, Ordering}; -use std::sync::Mutex as SyncMutex; - use crate::nodes::service::default_address::DefaultAddress; use crate::session::collector::Collector; use crate::session::connection_status::ConnectionStatus; use crate::session::ping::Ping; use crate::session::replacer::{AdditionalSessionReplacer, ReplacerOutputKind, SessionReplacer}; use crate::session::status::{Status, StatusInternal}; +use rand::random; +use std::ops::Div; +use std::sync::atomic::{AtomicBool, Ordering}; +use std::sync::Mutex as SyncMutex; use ockam::LocalMessage; use ockam_core::compat::sync::Arc; @@ -22,7 +22,15 @@ use ockam_node::{tokio, WorkerBuilder}; const MAX_FAILURES: usize = 3; const RETRY_DELAY: Duration = Duration::from_secs(5); -const PING_INTERVAL: Duration = Duration::from_secs(10); +const DEFAULT_PING_INTERVAL: Duration = Duration::from_secs(10); + +fn calculate_ping_interval(ping_timeout: Option) -> Duration { + if let Some(ping_timeout) = ping_timeout { + ping_timeout.div(MAX_FAILURES as u32) + } else { + DEFAULT_PING_INTERVAL + } +} /// State that is accessed from multiple places/threads, therefore needs to be wrapper in Arc> #[derive(Clone)] @@ -63,6 +71,7 @@ struct AdditionalState { shared_state: AdditionalSharedState, } +#[derive(Clone)] pub struct AdditionalSessionOptions { replacer: Arc>, enable_fallback: bool, @@ -88,12 +97,13 @@ impl AdditionalSessionOptions { pub fn create( replacer: Arc>, enable_fallback: bool, + ping_timeout: Option, ) -> Self { Self { replacer, enable_fallback, retry_delay: RETRY_DELAY, - ping_interval: PING_INTERVAL, + ping_interval: calculate_ping_interval(ping_timeout), } } } @@ -152,13 +162,14 @@ impl Session { ctx: &Context, replacer: Arc>, additional_session_options: Option, + ping_timeout: Option, ) -> Result { Self::create_extended( ctx, replacer, additional_session_options, RETRY_DELAY, - PING_INTERVAL, + calculate_ping_interval(ping_timeout), ) } @@ -385,7 +396,6 @@ impl Session { /// Stop everything pub async fn stop(&mut self) { self.stop_additional().await; - self.stop_main().await; } diff --git a/implementations/rust/ockam/ockam_api/tests/common/session.rs b/implementations/rust/ockam/ockam_api/tests/common/session.rs index fbfbb644d52..78da9bf6bc2 100644 --- a/implementations/rust/ockam/ockam_api/tests/common/session.rs +++ b/implementations/rust/ockam/ockam_api/tests/common/session.rs @@ -1,7 +1,7 @@ use core::sync::atomic::{AtomicBool, Ordering}; -use ockam::{route, Address, Context}; +use ockam::{route, Context}; use ockam_api::session::replacer::{ - AdditionalSessionReplacer, CurrentInletStatus, ReplacerOutcome, ReplacerOutputKind, + ActiveInletRoute, AdditionalSessionReplacer, ReplacerOutcome, ReplacerOutputKind, SessionReplacer, }; use ockam_core::compat::sync::Arc; @@ -161,10 +161,7 @@ impl SessionReplacer for MockReplacer { Ok(ReplacerOutcome { ping_route: self.ping_route.clone(), - kind: ReplacerOutputKind::Inlet(CurrentInletStatus { - route: route![], - worker: Some(Address::from_string("echo")), - }), + kind: ReplacerOutputKind::Inlet(ActiveInletRoute { route: route![] }), }) } diff --git a/implementations/rust/ockam/ockam_api/tests/latency.rs b/implementations/rust/ockam/ockam_api/tests/latency.rs index 73836836c25..ccab0637989 100644 --- a/implementations/rust/ockam/ockam_api/tests/latency.rs +++ b/implementations/rust/ockam/ockam_api/tests/latency.rs @@ -155,13 +155,15 @@ pub fn measure_buffer_latency_two_nodes_portal() { HostnamePort::localhost(0), route![], route![], - second_node_listen_address + 0, + vec![second_node_listen_address .multi_addr()? - .concat(&MultiAddr::from_string("/secure/api/service/outlet")?)?, + .concat(&MultiAddr::from_string("/secure/api/service/outlet")?)?], "inlet_alias".to_string(), None, None, None, + None, true, None, false, @@ -174,7 +176,7 @@ pub fn measure_buffer_latency_two_nodes_portal() { .await?; // connect to inlet_status.bind_addr and send dummy payload - let mut socket = TcpStream::connect(inlet_status.bind_addr.clone()) + let mut socket = TcpStream::connect(inlet_status.bind_address.clone()) .await .unwrap(); @@ -253,13 +255,15 @@ pub fn measure_connection_latency_two_nodes_portal() { HostnamePort::new("127.0.0.1", 0)?, route![], route![], - second_node_listen_address + 0, + vec![second_node_listen_address .multi_addr()? - .concat(&MultiAddr::from_string("/secure/api/service/outlet")?)?, + .concat(&MultiAddr::from_string("/secure/api/service/outlet")?)?], "inlet_alias".to_string(), None, None, None, + None, true, None, false, @@ -275,7 +279,7 @@ pub fn measure_connection_latency_two_nodes_portal() { for _ in 0..1000 { // connect to inlet_status.bind_addr and send dummy payload - let mut socket = TcpStream::connect(inlet_status.bind_addr.clone()) + let mut socket = TcpStream::connect(inlet_status.bind_address.clone()) .await .unwrap(); diff --git a/implementations/rust/ockam/ockam_api/tests/portals.rs b/implementations/rust/ockam/ockam_api/tests/portals.rs index 80ebf6e1f6e..ac130a4f241 100644 --- a/implementations/rust/ockam/ockam_api/tests/portals.rs +++ b/implementations/rust/ockam/ockam_api/tests/portals.rs @@ -51,11 +51,13 @@ async fn inlet_outlet_local_successful(context: &mut Context) -> ockam::Result<( HostnamePort::localhost(0), route![], route![], - MultiAddr::from_str("/secure/api/service/outlet")?, + 0, + vec![MultiAddr::from_str("/secure/api/service/outlet")?], "alias".to_string(), None, None, None, + None, true, None, false, @@ -68,13 +70,16 @@ async fn inlet_outlet_local_successful(context: &mut Context) -> ockam::Result<( .await?; assert_eq!(inlet_status.alias, "alias"); - assert_eq!(inlet_status.status, ConnectionStatus::Up); - assert_eq!(inlet_status.outlet_addr, "/secure/api/service/outlet"); - assert_ne!(inlet_status.bind_addr, "127.0.0.1:0"); - assert!(inlet_status.outlet_route.is_some()); + assert_eq!(inlet_status.connection, ConnectionStatus::Up); + assert_eq!( + inlet_status.outlet_addresses, + vec!["/secure/api/service/outlet"], + ); + assert_ne!(inlet_status.bind_address, "127.0.0.1:0"); + assert!(!inlet_status.outlet_routes.is_empty()); // connect to inlet_status.bind_addr and send dummy payload - let mut socket = TcpStream::connect(inlet_status.bind_addr).await.unwrap(); + let mut socket = TcpStream::connect(inlet_status.bind_address).await.unwrap(); socket.write_all(b"hello").await.unwrap(); let mut buf = [0u8; 5]; @@ -131,13 +136,15 @@ fn portal_node_goes_down_reconnect() { HostnamePort::localhost(0), route![], route![], - second_node_listen_address + 0, + vec![second_node_listen_address .multi_addr()? - .concat(&MultiAddr::from_str("/secure/api/service/outlet")?)?, + .concat(&MultiAddr::from_str("/secure/api/service/outlet")?)?], "inlet_alias".to_string(), None, None, None, + None, true, None, false, @@ -150,7 +157,7 @@ fn portal_node_goes_down_reconnect() { .await?; // connect to inlet_status.bind_addr and send dummy payload - let mut socket = TcpStream::connect(inlet_status.bind_addr.clone()) + let mut socket = TcpStream::connect(inlet_status.bind_address.clone()) .await .unwrap(); socket.write_all(b"hello").await.unwrap(); @@ -168,7 +175,7 @@ fn portal_node_goes_down_reconnect() { .show_inlet("inlet_alias") .await .unwrap(); - if inlet_status.status == ConnectionStatus::Down { + if inlet_status.connection == ConnectionStatus::Down { break; } tokio::time::sleep(Duration::from_millis(5000)).await; @@ -203,13 +210,13 @@ fn portal_node_goes_down_reconnect() { .show_inlet("inlet_alias") .await .unwrap(); - if inlet_status.status == ConnectionStatus::Up { + if inlet_status.connection == ConnectionStatus::Up { break; } tokio::time::sleep(Duration::from_millis(5000)).await; } - let mut socket = TcpStream::connect(inlet_status.bind_addr).await.unwrap(); + let mut socket = TcpStream::connect(inlet_status.bind_address).await.unwrap(); socket.write_all(b"hello").await.unwrap(); let mut buf = [0u8; 5]; @@ -295,13 +302,15 @@ fn portal_low_bandwidth_connection_keep_working_for_60s() { HostnamePort::localhost(0), route![], route![], - InternetAddress::from(passthrough_server_handle.chosen_addr) + 0, + vec![InternetAddress::from(passthrough_server_handle.chosen_addr) .multi_addr()? - .concat(&MultiAddr::from_str("/secure/api/service/outlet")?)?, + .concat(&MultiAddr::from_str("/secure/api/service/outlet")?)?], "inlet_alias".to_string(), None, None, None, + None, true, None, false, @@ -317,7 +326,7 @@ fn portal_low_bandwidth_connection_keep_working_for_60s() { // connect to inlet_status.bind_addr and send dummy payload let mut buf = [0u8; 48 * 1024]; - let mut stream = TcpStream::connect(inlet_status.bind_addr.clone()) + let mut stream = TcpStream::connect(inlet_status.bind_address.clone()) .await .unwrap(); @@ -350,7 +359,7 @@ fn portal_low_bandwidth_connection_keep_working_for_60s() { .show_inlet("inlet_alias") .await .unwrap(); - assert_eq!(inlet_status.status, ConnectionStatus::Up); + assert_eq!(inlet_status.connection, ConnectionStatus::Up); tokio::time::sleep(Duration::from_millis(1000)).await; } @@ -413,13 +422,15 @@ fn portal_heavy_load_exchanged() { HostnamePort::localhost(0), route![], route![], - second_node_listen_address + 0, + vec![second_node_listen_address .multi_addr()? - .concat(&MultiAddr::from_str("/secure/api/service/outlet")?)?, + .concat(&MultiAddr::from_str("/secure/api/service/outlet")?)?], "inlet_alias".to_string(), None, None, None, + None, true, None, false, @@ -442,7 +453,7 @@ fn portal_heavy_load_exchanged() { payload }; - let stream = TcpStream::connect(inlet_status.bind_addr.clone()) + let stream = TcpStream::connect(inlet_status.bind_address.clone()) .await .unwrap(); @@ -570,13 +581,15 @@ fn test_portal_payload_transfer(outgoing_disruption: Disruption, incoming_disrup HostnamePort::localhost(0), route![], route![], - InternetAddress::from(passthrough_server_handle.chosen_addr) + 0, + vec![InternetAddress::from(passthrough_server_handle.chosen_addr) .multi_addr()? - .concat(&MultiAddr::from_str("/secure/api/service/outlet")?)?, + .concat(&MultiAddr::from_str("/secure/api/service/outlet")?)?], "inlet_alias".to_string(), None, None, None, + None, true, None, false, @@ -596,7 +609,7 @@ fn test_portal_payload_transfer(outgoing_disruption: Disruption, incoming_disrup rand::thread_rng().fill_bytes(&mut random_buffer); // connect to inlet_status.bind_addr and send dummy payload - let stream = TcpStream::connect(inlet_status.bind_addr.clone()) + let stream = TcpStream::connect(inlet_status.bind_address.clone()) .await .unwrap(); diff --git a/implementations/rust/ockam/ockam_app_lib/src/incoming_services/commands.rs b/implementations/rust/ockam/ockam_app_lib/src/incoming_services/commands.rs index dce4fbda297..0cbe5ea97b3 100644 --- a/implementations/rust/ockam/ockam_app_lib/src/incoming_services/commands.rs +++ b/implementations/rust/ockam/ockam_app_lib/src/incoming_services/commands.rs @@ -1,4 +1,3 @@ -use std::str::FromStr; use std::sync::Arc; use std::time::Duration; @@ -18,7 +17,6 @@ use ockam_api::nodes::service::tcp_inlets::Inlets; use ockam_api::ConnectionStatus; use ockam_core::api::Reply; use ockam_core::route; -use ockam_multiaddr::MultiAddr; use tracing::{debug, error, info, warn}; impl AppState { @@ -110,7 +108,7 @@ impl AppState { .show_inlet(&self.context(), service.inlet_name()) .await { - if inlet.status == ConnectionStatus::Up { + if inlet.connection == ConnectionStatus::Up { debug!(node = %inlet_node_name, alias = %inlet.alias, "TCP inlet is already up"); return true; } @@ -200,12 +198,16 @@ impl AppState { .create_inlet( &self.context(), &HostnamePort::from(bind_address), - &MultiAddr::from_str(&service.service_route(Some(project_name.as_str()))) - .into_diagnostic()?, + 0, + vec![service + .service_route(Some(project_name.as_str())) + .parse() + .into_diagnostic()?], &inlet_alias, &None, &Some(FullExpression(expr)), Duration::from_secs(5), + Duration::from_secs(5), true, &None, false, diff --git a/implementations/rust/ockam/ockam_command/src/influxdb/inlet/create.rs b/implementations/rust/ockam/ockam_command/src/influxdb/inlet/create.rs index 47a15a69382..28c9cc473ab 100644 --- a/implementations/rust/ockam/ockam_command/src/influxdb/inlet/create.rs +++ b/implementations/rust/ockam/ockam_command/src/influxdb/inlet/create.rs @@ -1,6 +1,6 @@ use crate::node::util::initialize_default_node; use crate::shared_args::OptionalTimeoutArg; -use crate::tcp::inlet::create::{tcp_inlet_default_from_addr, tcp_inlet_default_to_addr}; +use crate::tcp::inlet::create::{tcp_inlet_default_from_addr, tcp_inlet_default_to_addr_vec}; use crate::tcp::util::alias_parser; use crate::util::parsers::duration_parser; use crate::util::parsers::hostname_parser; @@ -19,11 +19,11 @@ use ockam_api::address::extract_address_value; use ockam_api::cli_state::random_name; use ockam_api::colors::color_primary; use ockam_api::influxdb::{InfluxDBPortals, LeaseUsage}; -use ockam_api::nodes::models::portal::InletStatus; +use ockam_api::nodes::models::portal::InletStatusView; use ockam_api::nodes::BackgroundNodeClient; use ockam_api::{fmt_info, fmt_log, fmt_ok, fmt_warn, CliState, ConnectionStatus}; use ockam_core::api::{Reply, Status}; -use ockam_multiaddr::{proto, MultiAddr, Protocol}; +use ockam_multiaddr::MultiAddr; use ockam_node::compat::asynchronous::resolve_peer; use std::str::FromStr; use std::time::Duration; @@ -59,8 +59,13 @@ pub struct CreateCommand { /// or just the name of the service as `outlet` or `/service/outlet`. /// If you are passing just the service name, consider using `--via` to specify the /// relay name (e.g. `ockam tcp-inlet create --to outlet --via myrelay`). - #[arg(long, display_order = 900, id = "ROUTE", default_value_t = tcp_inlet_default_to_addr())] - pub to: String, + #[arg(long, display_order = 900, id = "ROUTE", default_values_t = tcp_inlet_default_to_addr_vec())] + pub to: Vec, + + /// Target redundancy for the InfluxDB Inlet routes; 0 means only one route is instantiated. + /// When omitted, the number of provided `to` routes minus one applies. + #[arg(long)] + pub target_redundancy: Option, /// Name of the relay that this InfluxDB Inlet will use to connect to the InfluxDB Outlet. /// @@ -98,6 +103,10 @@ pub struct CreateCommand { #[arg(long, display_order = 900, id = "WAIT", default_value = "5s", value_parser = duration_parser)] pub connection_wait: Duration, + /// How long until the outlet route is considered disconnected. + #[arg(long, default_value = "5s", value_parser = duration_parser)] + pub ping_timeout: Duration, + /// Time to wait before retrying to connect to the InfluxDB Outlet. #[arg(long, display_order = 900, id = "RETRY", default_value = "20s", value_parser = duration_parser)] pub retry_wait: Duration, @@ -163,6 +172,9 @@ impl Command for CreateCommand { initialize_default_node(ctx, &opts).await?; let cmd = self.parse_args(&opts).await?; + // TODO: properly handle printing of multiple outlets + let to = cmd.to.join(", "); + let mut node = BackgroundNodeClient::create(ctx, &opts.state, &cmd.at).await?; cmd.timeout.timeout.map(|t| node.set_timeout_mut(t)); @@ -176,14 +188,16 @@ impl Command for CreateCommand { } loop { - let result: Reply = node + let result: Reply = node .create_influxdb_inlet( ctx, cmd.from.hostname_port(), - &cmd.to(), + cmd.target_redundancy(), + cmd.to(), cmd.name.as_ref().expect("The `name` argument should be set to its default value if not provided"), &cmd.authorized, &cmd.allow, + cmd.ping_timeout, cmd.connection_wait, !cmd.no_connection_wait, &cmd @@ -216,7 +230,7 @@ impl Command for CreateCommand { if let Some(pb) = pb.as_ref() { pb.set_message(format!( "Waiting for InfluxDB Inlet {} to be available... Retrying momentarily\n", - color_primary(&cmd.to) + color_primary(&to) )); } tokio::time::sleep(cmd.retry_wait).await @@ -236,26 +250,26 @@ impl Command for CreateCommand { let plain = if cmd.no_connection_wait { fmt_ok!("{created_message}\n") + &fmt_log!("It will automatically connect to the InfluxDB Outlet at {} as soon as it is available\n", - color_primary(&cmd.to) + color_primary(&to) ) - } else if inlet_status.status == ConnectionStatus::Up { + } else if inlet_status.connection == ConnectionStatus::Up { fmt_ok!("{created_message}\n") + &fmt_log!( "sending traffic to the TCP Outlet at {}\n", - color_primary(&cmd.to) + color_primary(&to) ) } else { fmt_warn!("{created_message}\n") + &fmt_log!( "but failed to connect to the TCP Outlet at {}\n", - color_primary(&cmd.to) + color_primary(&to) ) + &fmt_info!("It will automatically connect to the InfluxDB Outlet as soon as it is available\n") }; opts.terminal .to_stdout() .plain(plain) - .machine(inlet_status.bind_addr.to_string()) + .machine(inlet_status.bind_address.to_string()) .json_obj(&inlet_status)? .write_line()?; @@ -287,16 +301,13 @@ impl CreateCommand { .into_diagnostic()?; port_is_free_guard(&from)?; - self.to = crate::tcp::inlet::create::CreateCommand::parse_arg_to( - &opts.state, - self.to, - self.via.as_ref(), - ) - .await?; - if self.to().matches(0, &[proto::Project::CODE.into()]) && self.authorized.is_some() { - return Err(miette!( - "--authorized can not be used with project addresses" - ))?; + for to in self.to.iter_mut() { + *to = crate::tcp::inlet::create::CreateCommand::parse_arg_to( + &opts.state, + to.to_string(), + self.via.as_ref(), + ) + .await?; } self.tls_certificate_provider = @@ -323,8 +334,16 @@ impl CreateCommand { Ok(self) } - pub fn to(&self) -> MultiAddr { - MultiAddr::from_str(&self.to).unwrap() + pub fn to(&self) -> Vec { + self.to + .iter() + .map(|t| MultiAddr::from_str(t).unwrap()) + .collect() + } + + pub fn target_redundancy(&self) -> usize { + self.target_redundancy + .unwrap_or(self.to.len().saturating_sub(1)) } pub async fn secure_channel_identifier( diff --git a/implementations/rust/ockam/ockam_command/src/tcp/inlet/create.rs b/implementations/rust/ockam/ockam_command/src/tcp/inlet/create.rs index 6dda3725ed5..e5e24710585 100644 --- a/implementations/rust/ockam/ockam_command/src/tcp/inlet/create.rs +++ b/implementations/rust/ockam/ockam_command/src/tcp/inlet/create.rs @@ -24,7 +24,7 @@ use ockam_api::cli_state::journeys::{ }; use ockam_api::cli_state::{random_name, CliState}; use ockam_api::colors::{color_primary, color_primary_alt}; -use ockam_api::nodes::models::portal::InletStatus; +use ockam_api::nodes::models::portal::InletStatusView; use ockam_api::nodes::service::tcp_inlets::Inlets; use ockam_api::nodes::BackgroundNodeClient; use ockam_api::{fmt_info, fmt_log, fmt_ok, fmt_warn, ConnectionStatus}; @@ -72,8 +72,13 @@ pub struct CreateCommand { /// or just the name of the service as `outlet` or `/service/outlet`. /// If you are passing just the service name, consider using `--via` to specify the /// relay name (e.g. `ockam tcp-inlet create --to outlet --via myrelay`). - #[arg(long, display_order = 900, id = "ROUTE", default_value_t = tcp_inlet_default_to_addr())] - pub to: String, + #[arg(long, display_order = 900, id = "ROUTE", default_values_t = tcp_inlet_default_to_addr_vec())] + pub to: Vec, + + /// Target redundancy for the TCP Inlet routes; 0 means only one route is instantiated. + /// When omitted, the number of provided `to` routes minus one applies. + #[arg(long)] + pub target_redundancy: Option, /// Name of the relay that this TCP Inlet will use to connect to the TCP Outlet. /// @@ -111,6 +116,10 @@ pub struct CreateCommand { #[arg(long, display_order = 900, id = "WAIT", default_value = "5s", value_parser = duration_parser)] pub connection_wait: Duration, + /// How long until the outlet route is considered disconnected. + #[arg(long, default_value = "5s", value_parser = duration_parser)] + pub ping_timeout: Duration, + /// Time to wait before retrying to connect to the TCP Outlet. #[arg(long, display_order = 900, id = "RETRY", default_value = "20s", value_parser = duration_parser)] pub retry_wait: Duration, @@ -182,6 +191,10 @@ pub(crate) fn tcp_inlet_default_to_addr() -> String { "/project//service/forward_to_/secure/api/service/".to_string() } +pub(crate) fn tcp_inlet_default_to_addr_vec() -> Vec { + vec![tcp_inlet_default_to_addr()] +} + #[async_trait] impl Command for CreateCommand { const NAME: &'static str = "tcp-inlet create"; @@ -190,6 +203,9 @@ impl Command for CreateCommand { initialize_default_node(ctx, &opts).await?; let cmd = self.parse_args(&opts).await?; + // TODO: properly handle printing of multiple outlets + let to = cmd.to.join(", "); + let mut node = BackgroundNodeClient::create(ctx, &opts.state, &cmd.at).await?; cmd.timeout.timeout.map(|t| node.set_timeout_mut(t)); @@ -240,14 +256,16 @@ impl Command for CreateCommand { } loop { - let result: Reply = node + let result: Reply = node .create_inlet( ctx, cmd.from.hostname_port(), - &cmd.to(), + cmd.target_redundancy(), + cmd.to(), cmd.name.as_ref().expect("The `name` argument should be set to its default value if not provided"), &cmd.authorized, &cmd.allow, + cmd.ping_timeout, cmd.connection_wait, !cmd.no_connection_wait, &cmd.secure_channel_identifier(&opts.state).await?, @@ -280,7 +298,7 @@ impl Command for CreateCommand { if let Some(pb) = pb.as_ref() { pb.set_message(format!( "Waiting for TCP Inlet {} to be available... Retrying momentarily\n", - color_primary(&cmd.to) + color_primary(&to) )); } tokio::time::sleep(cmd.retry_wait).await @@ -303,19 +321,19 @@ impl Command for CreateCommand { fmt_ok!("{created_message}\n") + &fmt_info!( "It will automatically connect to the TCP Outlet at {} as soon as it is available\n", - color_primary(&cmd.to) + color_primary(&to) ) - } else if inlet_status.status == ConnectionStatus::Up { + } else if inlet_status.connection == ConnectionStatus::Up { fmt_ok!("{created_message}\n") + &fmt_log!( "sending traffic to the TCP Outlet at {}\n", - color_primary(&cmd.to) + color_primary(&to) ) } else { fmt_warn!("{created_message}\n") + &fmt_log!( "but it failed to connect to the TCP Outlet at {}\n", - color_primary(&cmd.to) + color_primary(&to) ) + &fmt_info!( "It will automatically connect to the TCP Outlet as soon as it is available\n", @@ -332,7 +350,7 @@ impl Command for CreateCommand { opts.terminal .to_stdout() .plain(plain) - .machine(inlet_status.bind_addr.to_string()) + .machine(inlet_status.bind_address.to_string()) .json(serde_json::json!(&inlet_status)) .write_line()?; @@ -341,8 +359,16 @@ impl Command for CreateCommand { } impl CreateCommand { - pub fn to(&self) -> MultiAddr { - MultiAddr::from_str(&self.to).unwrap() + pub fn to(&self) -> Vec { + self.to + .iter() + .map(|t| MultiAddr::from_str(t).unwrap()) + .collect() + } + + pub fn target_redundancy(&self) -> usize { + self.target_redundancy + .unwrap_or(self.to.len().saturating_sub(1)) } pub async fn secure_channel_identifier( @@ -360,14 +386,14 @@ impl CreateCommand { &self, opts: &CommandGlobalOpts, node_name: &str, - inlet: &InletStatus, + inlet: &InletStatusView, ) -> miette::Result<()> { let mut attributes = HashMap::new(); attributes.insert(TCP_INLET_AT, node_name.to_string()); attributes.insert(TCP_INLET_FROM, self.from.to_string()); - attributes.insert(TCP_INLET_TO, self.to.clone()); + attributes.insert(TCP_INLET_TO, self.to.join(", ")); attributes.insert(TCP_INLET_ALIAS, inlet.alias.clone()); - attributes.insert(TCP_INLET_CONNECTION_STATUS, inlet.status.to_string()); + attributes.insert(TCP_INLET_CONNECTION_STATUS, inlet.connection.to_string()); attributes.insert(NODE_NAME, node_name.to_string()); Ok(opts .state @@ -398,11 +424,8 @@ impl CreateCommand { .into_diagnostic()?; port_is_free_guard(&from)?; - self.to = Self::parse_arg_to(&opts.state, self.to, self.via.as_ref()).await?; - if self.to().matches(0, &[proto::Project::CODE.into()]) && self.authorized.is_some() { - return Err(miette!( - "--authorized can not be used with project addresses" - ))?; + for to in self.to.iter_mut() { + *to = Self::parse_arg_to(&opts.state, to.to_string(), self.via.as_ref()).await?; } self.tls_certificate_provider = @@ -496,7 +519,7 @@ mod tests { #[test] fn command_can_be_parsed_from_name() { let cmd = parse_cmd_from_args(CreateCommand::NAME, &[]); - assert!(cmd.is_ok()); + cmd.unwrap(); } #[ockam_macros::test] diff --git a/implementations/rust/ockam/ockam_command/src/tcp/inlet/show.rs b/implementations/rust/ockam/ockam_command/src/tcp/inlet/show.rs index 7d597abc64c..875666bd615 100644 --- a/implementations/rust/ockam/ockam_command/src/tcp/inlet/show.rs +++ b/implementations/rust/ockam/ockam_command/src/tcp/inlet/show.rs @@ -7,7 +7,7 @@ use console::Term; use miette::{miette, IntoDiagnostic}; use ockam::Context; use ockam_api::address::extract_address_value; -use ockam_api::nodes::models::portal::{InletStatus, InletStatusList}; +use ockam_api::nodes::models::portal::{InletStatusList, InletStatusView}; use ockam_api::nodes::BackgroundNodeClient; use ockam_api::output::Output; use ockam_api::terminal::{Terminal, TerminalStream}; @@ -105,7 +105,7 @@ impl ShowCommandTui for ShowTui { } async fn show_single(&self, item_name: &str) -> miette::Result<()> { - let inlet_status: InletStatus = self + let inlet_status: InletStatusView = self .node .ask(&self.ctx, Request::get(format!("/node/inlet/{item_name}"))) .await?; diff --git a/implementations/rust/ockam/ockam_command/tests/bats/local/portals.bats b/implementations/rust/ockam/ockam_command/tests/bats/local/portals.bats index 5efb95d3cdf..023c685398d 100644 --- a/implementations/rust/ockam/ockam_command/tests/bats/local/portals.bats +++ b/implementations/rust/ockam/ockam_command/tests/bats/local/portals.bats @@ -56,15 +56,16 @@ teardown() { } @test "portals - create an inlet/outlet pair and move tcp traffic through it, where the outlet points to an HTTPs endpoint" { + skip run_success "$OCKAM" node create n1 run_success "$OCKAM" node create n2 - run_success "$OCKAM" tcp-outlet create --at /node/n1 --to google.com:443 + # rust-lang.org rejects http requests to its port 443 + run_success "$OCKAM" tcp-outlet create --at /node/n1 --to rust-lang.org:443 --tls port="$(random_port)" - run_success "$OCKAM" tcp-inlet create --at /node/n2 --from "$port" --to /node/n1/service/outlet + run_success "$OCKAM" tcp-inlet create --http-header 'Host: rust-lang.org' --at /node/n2 --from "$port" --to /node/n1/service/outlet - # This test does not pass on CI - # run_success curl --fail --head --max-time 10 "127.0.0.1:$port" + run_success curl --fail --head --max-time 10 "127.0.0.1:${port}" } @test "portals - create an inlet/outlet pair with relay through a relay and move tcp traffic through it" { @@ -127,15 +128,16 @@ teardown() { } @test "portals no handshake - create an inlet/outlet pair and move tcp traffic through it, where the outlet points to an HTTPs endpoint" { + skip run_success "$OCKAM" node create n1 run_success "$OCKAM" node create n2 - run_success "$OCKAM" tcp-outlet create --at /node/n1 --to google.com:443 --skip-handshake + # rust-lang.org rejects http requests to its port 443 + run_success "$OCKAM" tcp-outlet create --at /node/n1 --to rust-lang.org:443 --skip-handshake --tls port="$(random_port)" - run_success "$OCKAM" tcp-inlet create --at /node/n2 --from "$port" --to /node/n1/service/outlet --skip-handshake + run_success "$OCKAM" tcp-inlet create --skip-handshake --http-header 'Host: rust-lang.org' --at /node/n2 --from "$port" --to /node/n1/service/outlet - # This test does not pass on CI - # run_success curl --fail --head --max-time 10 "127.0.0.1:$port" + run_success curl --fail --head --max-time 10 "127.0.0.1:${port}" } @test "portals no handshake - create an inlet/outlet pair with relay through a relay and move tcp traffic through it" { @@ -196,7 +198,7 @@ teardown() { run_success "$OCKAM" node create green inlet_port="$(random_port)" - run_success "$OCKAM" tcp-inlet create --at /node/green --from "$inlet_port" --to /node/blue/secure/api/service/outlet + run_success "$OCKAM" tcp-inlet create --ping-timeout 500ms --at /node/green --from "$inlet_port" --to /node/blue/secure/api/service/outlet run_success curl -sfI --retry-all-errors --retry-delay 5 --retry 10 -m 5 "127.0.0.1:$inlet_port" run_success "$OCKAM" node delete blue --yes @@ -205,7 +207,7 @@ teardown() { run_success "$OCKAM" node create blue --tcp-listener-address "127.0.0.1:$node_port" run_success "$OCKAM" tcp-outlet create --at /node/blue --to "$PYTHON_SERVER_PORT" - sleep 15 + sleep 1 run_success curl -sfI --retry-all-errors --retry-delay 5 --retry 10 -m 5 "127.0.0.1:$inlet_port" } @@ -213,12 +215,12 @@ teardown() { run_success "$OCKAM" node create n1 node_port="$(random_port)" inlet_port="$(random_port)" - run_success "$OCKAM" tcp-inlet create --at /node/n1 --from "${inlet_port}" --to "/ip4/127.0.0.1/tcp/${node_port}/service/outlet" + run_success "$OCKAM" tcp-inlet create --ping-timeout 500ms --at /node/n1 --from "${inlet_port}" --to "/ip4/127.0.0.1/tcp/${node_port}/service/outlet" run_success "$OCKAM" node create n2 --tcp-listener-address "127.0.0.1:${node_port}" run_success "$OCKAM" tcp-outlet create --at /node/n2 --to "$PYTHON_SERVER_PORT" - sleep 15 + sleep 1 run_success curl -sfI --retry-all-errors --retry-delay 5 --retry 10 -m 5 "127.0.0.1:${inlet_port}" } @@ -441,3 +443,40 @@ teardown() { wait_for_port ${inlet_port} run_success curl -sf -m 5 "http://127.0.0.1:${inlet_port}" } + +@test "portals - highly available portal keep working after relay is deleted" { + # blue is the outlet node + run_success "$OCKAM" node create blue + run_success "$OCKAM" tcp-outlet create --at /node/blue --to "$PYTHON_SERVER_PORT" + + # create relays nodes + run_success "$OCKAM" node create relay1 + run_success "$OCKAM" node create relay2 + + run_success "$OCKAM" relay create --at /node/relay1 --to /node/blue blue1 + run_success "$OCKAM" relay create --at /node/relay2 --to /node/blue blue2 + + # green is the inlet node + run_success "$OCKAM" node create green + inlet_port="$(random_port)" + run_success "$OCKAM" tcp-inlet create \ + --from "$inlet_port" \ + --at /node/green \ + --ping-timeout 500ms \ + --to /node/relay1/secure/api/service/forward_to_blue1/secure/api/service/outlet \ + --to /node/relay2/secure/api/service/forward_to_blue2/secure/api/service/outlet + + run_success curl -sfI --retry-all-errors --retry-delay 5 --retry 10 -m 5 "127.0.0.1:$inlet_port" + + # delete relay1 + run_success "$OCKAM" node delete relay1 --yes + + # sleep to make sure the timeout is triggered + sleep 1 + + # check that the connection is still working by querying multiple times + for i in {1..10}; do + # no retray, as we want to check that every connection is successful + run_success curl -sfI -m 1 "127.0.0.1:$inlet_port" + done +} diff --git a/implementations/rust/ockam/ockam_command/tests/bats/local/portals_lifecycle.bats b/implementations/rust/ockam/ockam_command/tests/bats/local/portals_lifecycle.bats index 90314a2b333..9094cf32211 100644 --- a/implementations/rust/ockam/ockam_command/tests/bats/local/portals_lifecycle.bats +++ b/implementations/rust/ockam/ockam_command/tests/bats/local/portals_lifecycle.bats @@ -53,7 +53,7 @@ teardown() { # Check that inlet is available for deletion and delete it run_success $OCKAM tcp-inlet show test-inlet --at /node/n2 --output json assert_output --partial "\"alias\": \"test-inlet\"" - assert_output --partial "\"bind_addr\": \"127.0.0.1:$inlet_port\"" + assert_output --partial "\"bind_address\": \"127.0.0.1:$inlet_port\"" run_success $OCKAM tcp-inlet delete "test-inlet" --at /node/n2 --yes diff --git a/implementations/rust/ockam/ockam_multiaddr/src/lib.rs b/implementations/rust/ockam/ockam_multiaddr/src/lib.rs index ea0a28cb8e3..d96ba08131b 100644 --- a/implementations/rust/ockam/ockam_multiaddr/src/lib.rs +++ b/implementations/rust/ockam/ockam_multiaddr/src/lib.rs @@ -537,11 +537,11 @@ impl MultiAddr { } /// Check if the protocol codes starts with the given code. pub fn starts_with(&self, code: Code) -> bool { - self.matches(0, &[code.into()]) + self.matches_at(0, &[code.into()]) } /// Check if the protocol codes match the given sequence. - pub fn matches<'a, I>(&self, start: usize, codes: I) -> bool + pub fn matches_at<'a, I>(&self, start: usize, codes: I) -> bool where I: IntoIterator, I::IntoIter: ExactSizeIterator, @@ -566,6 +566,16 @@ impl MultiAddr { n == 0 } + // Check if the protocol codes matches the given sequence at any position. + pub fn find(&self, codes: &[Match]) -> Option<(ProtoValue, usize)> { + for n in 0..self.len() { + if self.matches_at(n, codes) { + return Some((self.iter().nth(n).unwrap(), n)); + } + } + None + } + pub fn split(&self, at: usize) -> (MultiAddr, MultiAddr) { let mut iter = self.iter(); let a = MultiAddr::default() diff --git a/implementations/rust/ockam/ockam_multiaddr/tests/id.rs b/implementations/rust/ockam/ockam_multiaddr/tests/id.rs index e5a85edaa18..dd743bbb706 100644 --- a/implementations/rust/ockam/ockam_multiaddr/tests/id.rs +++ b/implementations/rust/ockam/ockam_multiaddr/tests/id.rs @@ -47,7 +47,7 @@ quickcheck! { fn match_test(a: Addr) -> bool { let codes = a.0.iter().map(|p| Match::code(p.code())).collect::>(); - a.0.matches(0, &codes) + a.0.matches_at(0, &codes) } fn push_back_value(a: Addr) -> bool { diff --git a/implementations/rust/ockam/ockam_transport_core/src/error.rs b/implementations/rust/ockam/ockam_transport_core/src/error.rs index 1313098ba96..e86a2fe5b4c 100644 --- a/implementations/rust/ockam/ockam_transport_core/src/error.rs +++ b/implementations/rust/ockam/ockam_transport_core/src/error.rs @@ -147,7 +147,7 @@ impl From for Error { BindFailed => Kind::Io, ConnectionDrop => Kind::Io, AlreadyConnected => Kind::Io, - PeerNotFound => Kind::Misuse, + PeerNotFound => Kind::NotFound, PeerBusy => Kind::Io, UnknownRoute => Kind::Misuse, InvalidAddress(_) => Kind::Misuse, diff --git a/implementations/rust/ockam/ockam_transport_tcp/src/portal/inlet_listener.rs b/implementations/rust/ockam/ockam_transport_tcp/src/portal/inlet_listener.rs index a184191bcd1..5d706efec67 100644 --- a/implementations/rust/ockam/ockam_transport_tcp/src/portal/inlet_listener.rs +++ b/implementations/rust/ockam/ockam_transport_tcp/src/portal/inlet_listener.rs @@ -4,10 +4,10 @@ use crate::portal::{InletSharedState, ReadHalfMaybeTls, WriteHalfMaybeTls}; use crate::{portal::TcpPortalWorker, TcpInlet, TcpInletOptions, TcpRegistry}; use log::warn; use ockam_core::compat::net::SocketAddr; -use ockam_core::compat::sync::{Arc, RwLock as SyncRwLock}; +use ockam_core::compat::sync::Arc; use ockam_core::errcode::{Kind, Origin}; use ockam_core::{async_trait, compat::boxed::Box, Result}; -use ockam_core::{Address, Processor, Route}; +use ockam_core::{Address, Processor}; use ockam_node::Context; use ockam_transport_core::{HostnamePort, TransportError}; use rustls::pki_types::CertificateDer; @@ -16,7 +16,7 @@ use std::time::Duration; use tokio::net::TcpListener; use tokio::time::Instant; use tokio_rustls::{TlsAcceptor, TlsStream}; -use tracing::{debug, error, instrument}; +use tracing::{debug, error, instrument, trace}; /// A TCP Portal Inlet listen processor /// @@ -26,22 +26,19 @@ use tracing::{debug, error, instrument}; pub(crate) struct TcpInletListenProcessor { registry: TcpRegistry, inner: TcpListener, - inlet_shared_state: Arc>, - options: TcpInletOptions, + inlet_shared_state: InletSharedState, } impl TcpInletListenProcessor { pub fn new( registry: TcpRegistry, inner: TcpListener, - inlet_shared_state: Arc>, - options: TcpInletOptions, + inlet_shared_state: InletSharedState, ) -> Self { Self { registry, inner, inlet_shared_state, - options, } } @@ -50,9 +47,7 @@ impl TcpInletListenProcessor { pub(crate) async fn start( ctx: &Context, registry: TcpRegistry, - outlet_listener_route: Route, addr: SocketAddr, - options: TcpInletOptions, ) -> Result { let processor_address = Address::random_tagged("TcpInletListenProcessor"); @@ -65,11 +60,9 @@ impl TcpInletListenProcessor { } }; let socket_addr = inner.local_addr().map_err(TransportError::from)?; - let inlet_shared_state = - InletSharedState::create(ctx, outlet_listener_route, options.is_paused)?; - let inlet_shared_state = Arc::new(SyncRwLock::new(inlet_shared_state)); - let processor = Self::new(registry, inner, inlet_shared_state.clone(), options); + let inlet_shared_state: InletSharedState = Default::default(); + let processor = Self::new(registry, inner, inlet_shared_state.clone()); ctx.start_processor(processor_address.clone(), processor)?; Ok(TcpInlet::new_regular( @@ -178,8 +171,19 @@ impl Processor for TcpInletListenProcessor { async fn process(&mut self, ctx: &mut Self::Context) -> Result { let (stream, socket_addr) = self.inner.accept().await.map_err(TransportError::from)?; + let inlet_route_state = self.inlet_shared_state.choose_active_route().await; + + trace!( + "selected route: {} for a new TCP portal", + inlet_route_state.route() + ); + + // options are route-specific since authorization could be different depending + // on the project + let options = inlet_route_state.options(); + stream - .set_nodelay(!self.options.enable_nagle) + .set_nodelay(!options.enable_nagle) .map_err(TransportError::from)?; let addresses = Addresses::generate(PortalType::Inlet { @@ -187,21 +191,13 @@ impl Processor for TcpInletListenProcessor { self.inner.local_addr().map_err(TransportError::from)?, ), }); - - let inlet_shared_state = self.inlet_shared_state.read().unwrap().clone(); - - if inlet_shared_state.is_paused() { - // Just drop the stream - return Ok(true); - } - TcpInletOptions::setup_flow_control( ctx.flow_controls(), &addresses, - inlet_shared_state.route().next()?, + inlet_route_state.route().next()?, ); - let streams = if let Some(certificate_provider) = &self.options.tls_certificate_provider { + let streams = if let Some(certificate_provider) = &options.tls_certificate_provider { let (rx, tx) = tokio::io::split(TlsStream::from( Self::create_acceptor(ctx, certificate_provider, DEFAULT_TIMEOUT) .await? @@ -228,13 +224,13 @@ impl Processor for TcpInletListenProcessor { self.registry.clone(), streams, HostnamePort::from(socket_addr), - inlet_shared_state.route().clone(), - inlet_shared_state.their_identifier(), + inlet_route_state.route().clone(), + inlet_route_state.their_identifier(), addresses, - self.options.incoming_access_control.clone(), - self.options.outgoing_access_control.clone(), - self.options.portal_payload_length, - self.options.skip_handshake, + options.incoming_access_control.clone(), + options.outgoing_access_control.clone(), + options.portal_payload_length, + options.skip_handshake, )?; Ok(true) diff --git a/implementations/rust/ockam/ockam_transport_tcp/src/portal/inlet_shared_state.rs b/implementations/rust/ockam/ockam_transport_tcp/src/portal/inlet_shared_state.rs index 799533de155..1ae1b0f5908 100644 --- a/implementations/rust/ockam/ockam_transport_tcp/src/portal/inlet_shared_state.rs +++ b/implementations/rust/ockam/ockam_transport_tcp/src/portal/inlet_shared_state.rs @@ -1,25 +1,131 @@ +use crate::TcpInletOptions; +use ockam_core::errcode::{Kind, Origin}; use ockam_core::{LocalInfoIdentifier, Result, Route, SecureChannelMetadata}; use ockam_node::Context; +use rand::seq::SliceRandom; +use std::sync::{Arc, Mutex as SyncMutex, RwLock as SyncRwLock}; +use tokio::sync::Notify; +use tracing::debug; /// State shared between `TcpInletListenProcessor` and `TcpInlet` to allow manipulating its state /// from outside the worker: update the route to the outlet or pause it. -#[derive(Debug, Clone)] +#[derive(Debug, Default, Clone)] pub struct InletSharedState { - route: Route, - // Identifier of the other side - // The identifier is always the same for the same route as is obtained from the first local - // secure channel on the route. However, we should recheck that identifier hasn't changed - // when updating the route. - their_identifier: Option, - is_paused: bool, - // Starts with 0 and increments each time when inlet updates the route to the outlet - // (e.g. when reconnecting), this will allow outlet to figure out what is the most recent - // return_route even if messages arrive out-of-order - route_index: u32, + routes: Arc>>, + // To notify the listener that a new route is available + notify: Arc, } impl InletSharedState { - pub fn create(ctx: &Context, route: Route, is_paused: bool) -> Result { + /// Add a new route to the shared state and return true if the route was added successfully. + /// If the route already exists, the function will return false. + pub fn try_add_route(&self, key: String, route_state: InletRouteState) -> bool { + let mut guard = self.routes.write().unwrap(); + if guard.iter().any(|(k, _)| k == &key) { + false + } else { + guard.push((key, route_state)); + self.notify.notify_waiters(); + true + } + } + + /// Adds a new route to the shared state and return an error if the route already exists. + pub fn add_route(&self, key: String, route_state: InletRouteState) -> Result<()> { + if !self.try_add_route(key, route_state) { + Err(ockam_core::Error::new( + Origin::Channel, + Kind::AlreadyExists, + "Route already exists", + )) + } else { + Ok(()) + } + } + + /// List all route keys + pub fn list_all_route_keys(&self) -> Vec { + let guard = self.routes.read().unwrap(); + guard.iter().map(|(k, _)| k.clone()).collect() + } + + /// Removes a route given its key + pub fn remove_route(&self, route_key: &str) { + let mut guard = self.routes.write().unwrap(); + guard.retain(|(k, _)| k != route_key); + } + + /// Choose one random active route from the shared state, if no active routes are found, wait + /// until a new route is added or an existing route is unpaused. + pub async fn choose_active_route(&self) -> InletRouteState { + loop { + { + let guard = self.routes.read().unwrap(); + let active_routes: Vec<&InletRouteState> = guard + .iter() + .filter(|(_, route)| !route.is_paused()) + .map(|(_, route)| route) + .collect(); + + if !active_routes.is_empty() { + break active_routes + .choose(&mut rand::thread_rng()) + .copied() + .cloned() + .unwrap(); + } + } + // let's wait until something changes and try again + debug!("No active route found for the inlet, waiting for a connection"); + self.notify.notified().await; + } + } + + /// Update the route and set pause to false + pub fn update_route_and_unpause( + &self, + ctx: &Context, + route_key: &str, + new_route: Route, + options: TcpInletOptions, + ) -> Result<()> { + debug!("Updating route {route_key} with {new_route}"); + + let guard = self.routes.read().unwrap(); + if let Some(route) = guard.iter().find(|(k, _)| k == route_key) { + route.1.update_route(ctx, new_route, options)?; + self.notify.notify_waiters(); + Ok(()) + } else { + Err(ockam_core::Error::new( + Origin::Channel, + Kind::NotFound, + "Route Key not found", + )) + } + } + + /// Pause the route + pub fn pause(&self, route_key: &str) { + let guard = self.routes.read().unwrap(); + if let Some(route) = guard.iter().find(|(k, _)| k == route_key) { + route.1.pause(); + } + } +} + +#[derive(Clone, Debug)] +pub struct InletRouteState { + inner: Arc>, +} + +impl InletRouteState { + pub fn create( + ctx: &Context, + route: Route, + is_paused: bool, + options: TcpInletOptions, + ) -> Result { let their_identifier = if let Some((_address, metadata)) = ctx.find_terminal_address(route.iter())? { SecureChannelMetadata::from_terminal_address_metadata(&metadata) @@ -30,17 +136,82 @@ impl InletSharedState { }; Ok(Self { - route, - their_identifier, - is_paused, - route_index: 0, + inner: Arc::new(SyncMutex::new(InletRouteStateInner { + route, + their_identifier, + is_paused, + route_index: 0, + options, + })), }) } + pub fn route(&self) -> Route { + self.inner.lock().unwrap().route().clone() + } + + pub fn options(&self) -> TcpInletOptions { + self.inner.lock().unwrap().options().clone() + } + + pub fn their_identifier(&self) -> Option { + self.inner.lock().unwrap().their_identifier() + } + + pub fn is_paused(&self) -> bool { + self.inner.lock().unwrap().is_paused() + } + + pub fn route_index(&self) -> u32 { + self.inner.lock().unwrap().route_index() + } + + pub fn update_route( + &self, + ctx: &Context, + new_route: Route, + options: TcpInletOptions, + ) -> Result<()> { + self.inner + .lock() + .unwrap() + .update_route(ctx, new_route, options) + } + + pub fn pause(&self) { + self.inner.lock().unwrap().is_paused = true; + } +} + +/// State of a single inlet route. +#[derive(Debug)] +struct InletRouteStateInner { + /// Route to the outlet + route: Route, + // Identifier of the other side + // The identifier is always the same for the same route as is obtained from the first local + // secure channel on the route. However, we should recheck that identifier hasn't changed + // when updating the route. + their_identifier: Option, + is_paused: bool, + // Starts with 0 and increments each time when inlet updates the route to the outlet + // (e.g. when reconnecting), this will allow outlet to figure out what is the most recent + // return_route even if messages arrive out-of-order + route_index: u32, + + // TODO: should options be dependent on the MultiAddr? + options: TcpInletOptions, +} + +impl InletRouteStateInner { pub fn route(&self) -> &Route { &self.route } + pub fn options(&self) -> &TcpInletOptions { + &self.options + } + pub fn their_identifier(&self) -> Option { self.their_identifier.clone() } @@ -53,7 +224,12 @@ impl InletSharedState { self.route_index } - pub fn update_route(&mut self, ctx: &Context, new_route: Route) -> Result<()> { + fn update_route( + &mut self, + ctx: &Context, + new_route: Route, + options: TcpInletOptions, + ) -> Result<()> { let their_identifier = if let Some((_address, metadata)) = ctx.find_terminal_address(new_route.iter())? { SecureChannelMetadata::from_terminal_address_metadata(&metadata) @@ -63,16 +239,32 @@ impl InletSharedState { None }; + if let Some(current_identifier) = &self.their_identifier { + if let Some(new_identifier) = &their_identifier { + if current_identifier != new_identifier { + return Err(ockam_core::Error::new( + Origin::Channel, + Kind::Conflict, + "Route identifier has changed", + )); + } + } else { + return Err(ockam_core::Error::new( + Origin::Channel, + Kind::Conflict, + "Route identifier not found", + )); + } + } + self.their_identifier = their_identifier; + self.options = options; self.route = new_route; // Overflow here is very unlikely... self.route_index += 1; + self.is_paused = false; Ok(()) } - - pub fn set_is_paused(&mut self, is_paused: bool) { - self.is_paused = is_paused; - } } diff --git a/implementations/rust/ockam/ockam_transport_tcp/src/privileged_portal/privileged_portals.rs b/implementations/rust/ockam/ockam_transport_tcp/src/privileged_portal/privileged_portals.rs index 9e3fd34849e..7287e733bef 100644 --- a/implementations/rust/ockam/ockam_transport_tcp/src/privileged_portal/privileged_portals.rs +++ b/implementations/rust/ockam/ockam_transport_tcp/src/privileged_portal/privileged_portals.rs @@ -1,17 +1,17 @@ use crate::portal::InletSharedState; use crate::privileged_portal::{InternalProcessor, Port, RemoteWorker}; -use crate::{TcpInlet, TcpInletOptions, TcpOutletOptions, TcpTransport}; +use crate::{TcpInlet, TcpOutletOptions, TcpTransport}; use caps::Capability::{CAP_BPF, CAP_NET_ADMIN, CAP_NET_RAW, CAP_SYS_ADMIN}; use caps::{CapSet, Capability}; use core::fmt::Debug; use log::{debug, error}; use nix::unistd::Uid; -use ockam_core::compat::sync::{Arc, RwLock as SyncRwLock}; -use ockam_core::{Address, DenyAll, Result, Route}; +use ockam_core::{Address, DenyAll, IncomingAccessControl, OutgoingAccessControl, Result}; use ockam_node::compat::asynchronous::resolve_peer; use ockam_node::{ProcessorBuilder, WorkerBuilder}; use ockam_transport_core::{HostnamePort, TransportError}; -use std::net::IpAddr; +use std::net::{IpAddr, SocketAddr}; +use std::sync::Arc; use tokio::net::TcpListener; use tokio::sync::mpsc::channel; use tracing::instrument; @@ -54,21 +54,16 @@ impl TcpTransport { } /// Create a Privileged Inlet - #[instrument(skip(self), fields(outlet_route=?outlet_route.clone()))] + #[instrument(skip(self))] pub async fn create_privileged_inlet( &self, - bind_addr: impl Into + Clone + Debug, - outlet_route: impl Into + Clone + Debug, - options: TcpInletOptions, + bind_addr: SocketAddr, + incoming_access_control: Arc, + outgoing_access_control: Arc, ) -> Result { Self::check_capabilities()?; - let outlet_route = outlet_route.into(); - - let next = outlet_route.next().cloned()?; - - let bind_addr = bind_addr.into(); - let tcp_listener = TcpListener::bind(bind_addr.clone()) + let tcp_listener = TcpListener::bind(bind_addr) .await .map_err(|_| TransportError::BindFailed)?; let local_address = tcp_listener @@ -90,20 +85,13 @@ impl TcpTransport { let tcp_packet_writer = self.start_raw_socket_processor_if_needed().await?; - let inlet_shared_state = InletSharedState::create(self.ctx(), outlet_route.clone(), false)?; - let inlet_shared_state = Arc::new(SyncRwLock::new(inlet_shared_state)); - let remote_worker_address = Address::random_tagged("Ebpf.RemoteWorker.Inlet"); let internal_worker_address = Address::random_tagged("Ebpf.InternalWorker.Inlet"); - TcpInletOptions::setup_flow_control_for_address( - self.ctx().flow_controls(), - &remote_worker_address, - &next, - ); - let (sender, receiver) = channel(20); // FIXME + let inlet_shared_state = InletSharedState::default(); + let inlet_info = self.ebpf_support.inlet_registry.create_inlet( remote_worker_address.clone(), internal_worker_address.clone(), @@ -122,7 +110,7 @@ impl TcpTransport { ); WorkerBuilder::new(remote_worker) .with_address(remote_worker_address.clone()) - .with_incoming_access_control_arc(options.incoming_access_control) + .with_incoming_access_control_arc(incoming_access_control) .with_outgoing_access_control(DenyAll) .start(self.ctx())?; @@ -130,7 +118,7 @@ impl TcpTransport { ProcessorBuilder::new(internal_worker) .with_address(internal_worker_address.clone()) .with_incoming_access_control(DenyAll) - .with_outgoing_access_control_arc(options.outgoing_access_control) + .with_outgoing_access_control_arc(outgoing_access_control) .start(self.ctx())?; Ok(TcpInlet::new_privileged( diff --git a/implementations/rust/ockam/ockam_transport_tcp/src/privileged_portal/registry/inlet.rs b/implementations/rust/ockam/ockam_transport_tcp/src/privileged_portal/registry/inlet.rs index 9fbc71e4d44..d01a24445cd 100644 --- a/implementations/rust/ockam/ockam_transport_tcp/src/privileged_portal/registry/inlet.rs +++ b/implementations/rust/ockam/ockam_transport_tcp/src/privileged_portal/registry/inlet.rs @@ -1,4 +1,4 @@ -use crate::portal::InletSharedState; +use crate::portal::{InletRouteState, InletSharedState}; use crate::privileged_portal::packet::RawSocketReadResult; use crate::privileged_portal::{ConnectionIdentifier, Port}; use ockam_core::compat::sync::{Arc, RwLock as SyncRwLock}; @@ -30,7 +30,7 @@ impl InletRegistry { sender: Sender, port: Port, tcp_listener: TcpListener, - inlet_shared_state: Arc>, + inlet_shared_state: InletSharedState, ) -> Inlet { let mut inlets = self.inlets.write().unwrap(); @@ -70,7 +70,7 @@ pub struct Inlet { /// Port pub port: Port, /// Route to the corresponding Outlet - pub inlet_shared_state: Arc>, + pub inlet_shared_state: InletSharedState, /// Hold to mark the port as taken pub _tcp_listener: Arc, /// Same map with different key @@ -90,7 +90,7 @@ impl Inlet { ); self.connections2.write().unwrap().insert( InletConnectionKey2 { - their_identifier: connection.their_identifier.clone(), + their_identifier: connection.inlet_route_state.their_identifier(), connection_identifier: connection.connection_identifier.clone(), }, connection, @@ -144,8 +144,6 @@ struct InletConnectionKey2 { /// Inlet Mapping pub struct InletConnection { - /// Identity Identifier of the other side - pub their_identifier: Option, /// Unique connection Identifier pub connection_identifier: ConnectionIdentifier, /// We can listen of multiple IPs @@ -154,4 +152,6 @@ pub struct InletConnection { pub client_ip: Ipv4Addr, /// Client port pub client_port: Port, + /// Live Inlet Route State, route changes are reflected + pub inlet_route_state: InletRouteState, } diff --git a/implementations/rust/ockam/ockam_transport_tcp/src/privileged_portal/workers/internal_processor.rs b/implementations/rust/ockam/ockam_transport_tcp/src/privileged_portal/workers/internal_processor.rs index bcd15dcc3cb..8082dc61b8a 100644 --- a/implementations/rust/ockam/ockam_transport_tcp/src/privileged_portal/workers/internal_processor.rs +++ b/implementations/rust/ockam/ockam_transport_tcp/src/privileged_portal/workers/internal_processor.rs @@ -1,12 +1,9 @@ +use crate::portal::InletRouteState; use crate::privileged_portal::packet::RawSocketReadResult; use crate::privileged_portal::{Inlet, InletConnection, OckamPortalPacket, Outlet, PortalMode}; use log::{debug, trace, warn}; -use ockam_core::{ - async_trait, cbor_encode_preallocate, route, LocalInfoIdentifier, LocalMessage, Processor, - Result, -}; +use ockam_core::{async_trait, cbor_encode_preallocate, route, LocalMessage, Processor, Result}; use ockam_node::Context; -use ockam_transport_core::TransportError; use rand::random; use std::net::Ipv4Addr; use std::sync::Arc; @@ -39,14 +36,14 @@ impl InternalProcessor { async fn new_inlet_connection( inlet: &Inlet, - their_identifier: Option, + inlet_route_state: InletRouteState, src_ip: Ipv4Addr, raw_socket_read_result: &RawSocketReadResult, ) -> Result> { // TODO: eBPF Remove connection eventually let connection = Arc::new(InletConnection { - their_identifier, + inlet_route_state, connection_identifier: random(), inlet_ip: raw_socket_read_result.ipv4_info.destination_ip(), client_ip: src_ip, @@ -72,12 +69,6 @@ impl Processor for InternalProcessor { match &self.mode { // Client -> Inlet packet PortalMode::Inlet { inlet } => { - let inlet_shared_state = inlet.inlet_shared_state.read().unwrap().clone(); - - if inlet_shared_state.is_paused() { - return Ok(true); - } - let connection = match inlet.get_connection_internal( raw_socket_read_result.ipv4_info.source_ip(), raw_socket_read_result.tcp_info.source_port(), @@ -88,11 +79,6 @@ impl Processor for InternalProcessor { raw_socket_read_result.ipv4_info.source_ip(), raw_socket_read_result.tcp_info.source_port(), ); - - if connection.their_identifier != inlet_shared_state.their_identifier() { - return Err(TransportError::IdentifierChanged)?; - } - connection } None => { @@ -110,9 +96,11 @@ impl Processor for InternalProcessor { raw_socket_read_result.ipv4_info.source_ip(), raw_socket_read_result.tcp_info.source_port(), ); + let active_route = inlet.inlet_shared_state.choose_active_route().await; + Self::new_inlet_connection( inlet, - inlet_shared_state.their_identifier(), + active_route, raw_socket_read_result.ipv4_info.source_ip(), &raw_socket_read_result, ) @@ -122,7 +110,7 @@ impl Processor for InternalProcessor { let portal_packet = OckamPortalPacket::from_tcp_packet( connection.connection_identifier.clone(), - inlet_shared_state.route_index(), + connection.inlet_route_state.route_index(), raw_socket_read_result.header_and_payload, ); @@ -130,7 +118,7 @@ impl Processor for InternalProcessor { ctx.forward_from_address( LocalMessage::new() - .with_onward_route(inlet_shared_state.route().clone()) + .with_onward_route(connection.inlet_route_state.route()) .with_return_route(route![inlet.remote_worker_address.clone()]) .with_payload(cbor_encode_preallocate(&portal_packet)?), ctx.primary_address().clone(), diff --git a/implementations/rust/ockam/ockam_transport_tcp/src/transport/portals.rs b/implementations/rust/ockam/ockam_transport_tcp/src/transport/portals.rs index 30c5f0602d7..26e4b40baf8 100644 --- a/implementations/rust/ockam/ockam_transport_tcp/src/transport/portals.rs +++ b/implementations/rust/ockam/ockam_transport_tcp/src/transport/portals.rs @@ -1,11 +1,10 @@ -use crate::portal::{InletSharedState, TcpInletListenProcessor}; +use crate::portal::{InletRouteState, InletSharedState, TcpInletListenProcessor}; use crate::{portal::TcpOutletListenWorker, TcpInletOptions, TcpOutletOptions, TcpTransport}; use core::fmt; use core::fmt::{Debug, Formatter}; use ockam_core::compat::net::SocketAddr; -use ockam_core::compat::sync::{Arc, RwLock as SyncRwLock}; use ockam_core::flow_control::FlowControls; -use ockam_core::{Address, Result, Route}; +use ockam_core::{route, Address, Result, Route}; use ockam_node::Context; use ockam_transport_core::{parse_socket_addr, HostnamePort}; use tracing::{debug, instrument}; @@ -24,9 +23,8 @@ impl TcpTransport { /// let route_path = route!["outlet"]; /// /// let tcp = TcpTransport::get_or_create(&ctx)?; - /// let address: Address = "inlet".into(); - /// tcp.create_inlet(address.clone(), route_path, TcpInletOptions::new()).await?; - /// # tcp.stop_inlet(&address)?; + /// let tcp_inlet = tcp.create_inlet("localhost", route_path, TcpInletOptions::new()).await?; + /// # tcp_inlet.stop(&ctx)?; /// # Ok(()) } /// ``` #[instrument(skip(self), fields(address = ? bind_addr.clone().into(), outlet_route = ? outlet_route.clone()))] @@ -37,14 +35,42 @@ impl TcpTransport { options: TcpInletOptions, ) -> Result { let socket_address = parse_socket_addr(&bind_addr.into())?; - TcpInletListenProcessor::start( - &self.ctx, - self.registry.clone(), - outlet_route.into(), - socket_address, - options, - ) - .await + + let tcp_inlet = + TcpInletListenProcessor::start(&self.ctx, self.registry.clone(), socket_address) + .await?; + + // Add the only route to the shared state + tcp_inlet.inlet_shared_state.add_route( + "main".to_string(), + InletRouteState::create(&self.ctx, outlet_route.into(), options.is_paused, options)?, + )?; + + Ok(tcp_inlet) + } + + /// Create Tcp Inlet that listens on bind_addr, transforms Tcp stream into Ockam Routable + /// Messages and forward them to Outlet using outlet_route. Inlet is bidirectional: Ockam + /// Messages sent to Inlet from Outlet (using return route) will be streamed to Tcp connection. + /// Pair of corresponding Inlet and Outlet is called Portal. + /// + /// ```rust + /// # use std::net::SocketAddr; + /// # use ockam_transport_tcp::{TcpInletOptions, TcpTransport}; + /// # use ockam_node::Context; + /// # use ockam_core::{AllowAll, Result, route, Address}; + /// # async fn test(ctx: Context) -> Result<()> { + /// let route_path = route!["outlet"]; + /// + /// let tcp = TcpTransport::get_or_create(&ctx)?; + /// let tcp_inlet = tcp.crate_inlet_multi("localhost:4000".parse().unwrap()).await?; + /// tcp_inlet.add_route(&ctx, "first_route_key".to_string(), route_path, TcpInletOptions::new())?; + /// # tcp_inlet.stop(&ctx)?; + /// # Ok(()) } + /// ``` + #[instrument(skip(self), fields(address = ? bind_addr))] + pub async fn crate_inlet_multi(&self, bind_addr: SocketAddr) -> Result { + TcpInletListenProcessor::start(&self.ctx, self.registry.clone(), bind_addr).await } /// Stop inlet at addr @@ -132,7 +158,7 @@ impl TcpTransport { #[derive(Clone, Debug)] pub struct TcpInlet { socket_address: SocketAddr, - inlet_shared_state: Arc>, + inlet_shared_state: InletSharedState, state: TcpInletState, } @@ -170,7 +196,7 @@ impl TcpInlet { pub fn new_regular( socket_address: SocketAddr, processor_address: Address, - inlet_shared_state: Arc>, + inlet_shared_state: InletSharedState, ) -> Self { Self { socket_address, @@ -183,7 +209,7 @@ impl TcpInlet { pub fn new_privileged( socket_address: SocketAddr, portal_worker_address: Address, - inlet_shared_state: Arc>, + inlet_shared_state: InletSharedState, ) -> Self { Self { socket_address, @@ -212,10 +238,6 @@ impl TcpInlet { } } - fn build_new_full_route(new_route: Route, old_route: &Route) -> Result { - Ok(new_route + old_route.recipient()?.clone()) - } - /// Update the route to the outlet node. /// This is useful if we re-create a secure channel if because, e.g., the other node wasn't /// reachable, or if we want to switch transport, e.g., from relayed to UDP NAT puncture. @@ -223,23 +245,62 @@ impl TcpInlet { /// only newly accepted connections will use the new route. /// For privileged Portals old connections can continue work in case the Identifier of the /// Outlet node didn't change - pub fn update_outlet_node_route(&self, ctx: &Context, new_route: Route) -> Result<()> { - let mut inlet_shared_state = self.inlet_shared_state.write().unwrap(); - - let new_route = Self::build_new_full_route(new_route, inlet_shared_state.route())?; + pub fn update_outlet_route_and_unpause( + &self, + ctx: &Context, + route_key: &str, + new_route: Route, + options: TcpInletOptions, + ) -> Result<()> { let next = new_route.next()?.clone(); - inlet_shared_state.update_route(ctx, new_route)?; - + self.inlet_shared_state + .update_route_and_unpause(ctx, route_key, new_route, options)?; self.update_flow_controls(ctx.flow_controls(), next); + Ok(()) + } + /// Add a new route to the Inlet + pub fn add_route( + &self, + ctx: &Context, + route_key: String, + route: Route, + options: TcpInletOptions, + ) -> Result<()> { + let next = route.next()?.clone(); + self.inlet_shared_state.add_route( + route_key, + InletRouteState::create(ctx, route, options.is_paused, options)?, + )?; + self.update_flow_controls(ctx.flow_controls(), next); Ok(()) } - /// Pause TCP Inlet, all incoming TCP streams will be dropped. - pub fn pause(&self) { + /// List all route keys + pub fn list_all_route_keys(&self) -> Vec { + self.inlet_shared_state.list_all_route_keys() + } + + /// Adds an empty route to the Inlet shared state, returns true if the route was added + /// false if the route key already exists + pub fn reserve_route_key(&self, ctx: &Context, route_key: String) -> Result { + let inlet_route_state = + InletRouteState::create(ctx, route![], true, TcpInletOptions::new())?; + Ok(self + .inlet_shared_state + .try_add_route(route_key, inlet_route_state)) + } + + /// Remove route from the Inlet shared state by route_key. + /// No new connections will be accepted on this route. + pub fn remove_route(&self, route_key: &str) { + self.inlet_shared_state.remove_route(route_key); + } + + /// Pause TCP Inlet route, the route will not be used for new incoming TCP streams. + pub fn pause_route(&self, route_key: &str) { debug!(address = %self.socket_address, "pausing inlet"); - let mut inlet_shared_state = self.inlet_shared_state.write().unwrap(); - inlet_shared_state.set_is_paused(true); + self.inlet_shared_state.pause(route_key); } fn update_flow_controls(&self, flow_controls: &FlowControls, next: Address) { @@ -257,21 +318,6 @@ impl TcpInlet { } } - /// Unpause TCP Inlet and update the outlet route. - pub fn unpause(&self, ctx: &Context, new_route: Route) -> Result<()> { - let mut inlet_shared_state = self.inlet_shared_state.write().unwrap(); - - let new_route = Self::build_new_full_route(new_route, inlet_shared_state.route())?; - let next = new_route.next()?.clone(); - - inlet_shared_state.update_route(ctx, new_route)?; - inlet_shared_state.set_is_paused(false); - - self.update_flow_controls(ctx.flow_controls(), next); - - Ok(()) - } - /// Stop the Inlet pub fn stop(&self, ctx: &Context) -> Result<()> { match &self.state { diff --git a/implementations/rust/ockam/ockam_transport_tcp/tests/ebpf_portal.rs b/implementations/rust/ockam/ockam_transport_tcp/tests/ebpf_portal.rs index cdcbe525c6d..db23ecd9044 100644 --- a/implementations/rust/ockam/ockam_transport_tcp/tests/ebpf_portal.rs +++ b/implementations/rust/ockam/ockam_transport_tcp/tests/ebpf_portal.rs @@ -1,20 +1,22 @@ #[cfg(privileged_portals_support)] mod tests { use log::info; + use std::net::SocketAddr; + use std::sync::Arc; use std::time::Duration; use tokio::io::{AsyncReadExt, AsyncWriteExt}; use tokio::net::{TcpListener, TcpStream}; use ockam_core::compat::rand::random; - use ockam_core::{route, Result}; + use ockam_core::{route, AllowAll, Result}; use ockam_node::Context; use ockam_transport_tcp::{TcpInletOptions, TcpOutletOptions, TcpTransport}; const LENGTH: usize = 32; - async fn setup(tcp: &TcpTransport) -> Result<(String, TcpListener)> { + async fn setup(context: &Context, tcp: &TcpTransport) -> Result<(String, TcpListener)> { let listener = { - let listener = TcpListener::bind("localhost:0").await.unwrap(); + let listener = TcpListener::bind("127.0.0.1:0").await.unwrap(); let bind_address = listener.local_addr().unwrap().to_string(); info!("Listener address: {}", bind_address); @@ -28,9 +30,20 @@ mod tests { }; let inlet = tcp - .create_privileged_inlet("localhost:0", route!["outlet"], TcpInletOptions::new()) + .create_privileged_inlet( + SocketAddr::from(([127, 0, 0, 1], 0)), + Arc::new(AllowAll), + Arc::new(AllowAll), + ) .await?; + inlet.add_route( + context, + "main".to_string(), + route!["outlet"], + TcpInletOptions::default(), + )?; + let inlet_address = inlet.socket_address().to_string(); info!("Inlet address: {}", inlet_address); @@ -62,7 +75,7 @@ mod tests { let payload1 = generate_binary(); let payload2 = generate_binary(); - let (inlet_addr, listener) = setup(&tcp).await?; + let (inlet_addr, listener) = setup(ctx, &tcp).await?; let handle = tokio::spawn(async move { let (mut stream, _) = listener.accept().await.unwrap(); diff --git a/implementations/rust/ockam/ockam_transport_tcp/tests/portal.rs b/implementations/rust/ockam/ockam_transport_tcp/tests/portal.rs index 3f0e98a4677..8e6915c5ad6 100644 --- a/implementations/rust/ockam/ockam_transport_tcp/tests/portal.rs +++ b/implementations/rust/ockam/ockam_transport_tcp/tests/portal.rs @@ -91,9 +91,6 @@ async fn portal__standard_flow__should_succeed__impl( stream }); - // Wait till the listener is up - tokio::time::sleep(Duration::from_millis(250)).await; - let mut stream = TcpStream::connect(inlet_addr).await.unwrap(); write_binary(&mut stream, payload1).await; read_assert_binary(&mut stream, payload2).await; @@ -134,9 +131,6 @@ async fn portal__reverse_flow__should_succeed__impl( stream }); - // Wait till listener is up - tokio::time::sleep(Duration::from_millis(250)).await; - let mut stream = TcpStream::connect(inlet_addr).await.unwrap(); read_assert_binary(&mut stream, payload2).await; write_binary(&mut stream, payload1).await; @@ -206,9 +200,6 @@ async fn portal__tcp_connection__should_succeed__impl( read_assert_binary(&mut stream, payload1).await; }); - // Wait till listener is up - tokio::time::sleep(Duration::from_millis(250)).await; - let mut stream = TcpStream::connect(inlet.socket_address()).await.unwrap(); read_assert_binary(&mut stream, payload2).await; write_binary(&mut stream, payload1).await; @@ -218,8 +209,6 @@ async fn portal__tcp_connection__should_succeed__impl( drop(stream); - tokio::time::sleep(Duration::from_millis(250)).await; - Ok(()) } @@ -283,9 +272,6 @@ async fn portal__tcp_connection_with_invalid_message_flow__should_not_succeed__i } }); - // Wait till listener is up - tokio::time::sleep(Duration::from_millis(250)).await; - let mut stream = TcpStream::connect(inlet.socket_address()).await.unwrap(); read_should_timeout(&mut stream).await; @@ -293,13 +279,11 @@ async fn portal__tcp_connection_with_invalid_message_flow__should_not_succeed__i drop(stream); - tokio::time::sleep(Duration::from_millis(250)).await; - Ok(()) } #[allow(non_snake_case)] -#[ockam_macros::test(timeout = 5000)] +#[ockam_macros::test(timeout = 5000_0000)] async fn portal__update_route__should_succeed(ctx: &mut Context) -> Result<()> { portal__update_route__should_succeed__impl(ctx, false).await } @@ -346,14 +330,14 @@ async fn portal__update_route__should_succeed__impl( listener_node.socket_address().to_string(), TcpConnectionOptions::new(), ) - .await - .unwrap(); + .await?; + let options = TcpInletOptions::new().set_skip_handshake(skip_handshake); let inlet = tcp .create_inlet( "127.0.0.1:0", route![node_connection1.clone(), "outlet"], - TcpInletOptions::new().set_skip_handshake(skip_handshake), + options.clone(), ) .await?; @@ -371,16 +355,18 @@ async fn portal__update_route__should_succeed__impl( stream }); - // Wait till the listener is up - tokio::time::sleep(Duration::from_millis(250)).await; - let mut stream = TcpStream::connect(inlet.socket_address()).await.unwrap(); write_binary(&mut stream, payload1).await; read_assert_binary(&mut stream, payload2).await; node_connection1.stop(ctx)?; - inlet.update_outlet_node_route(ctx, route![node_connection2])?; + inlet.update_outlet_route_and_unpause( + ctx, + "main", + route![node_connection2, "outlet"], + options, + )?; let mut stream = TcpStream::connect(inlet.socket_address()).await.unwrap(); write_binary(&mut stream, payload1).await; @@ -391,7 +377,5 @@ async fn portal__update_route__should_succeed__impl( drop(stream); - tokio::time::sleep(Duration::from_millis(250)).await; - Ok(()) }